summaryrefslogtreecommitdiff
path: root/deps/v8/src
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-05-31 11:52:20 -0700
committerRyan Dahl <ry@tinyclouds.org>2010-05-31 11:52:20 -0700
commitf86a214357ce4e54d0d5c56c7db295ec745814c3 (patch)
tree2c6954837f85bc586ba348678322267a26a14a73 /deps/v8/src
parent0c1aa36835fa6a3557843dcbc6ed6714d353a783 (diff)
downloadnode-new-f86a214357ce4e54d0d5c56c7db295ec745814c3.tar.gz
Upgrade to V8 2.2.13
Diffstat (limited to 'deps/v8/src')
-rw-r--r--deps/v8/src/arm/assembler-arm.cc90
-rw-r--r--deps/v8/src/arm/assembler-arm.h15
-rw-r--r--deps/v8/src/arm/builtins-arm.cc45
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h24
-rw-r--r--deps/v8/src/arm/codegen-arm.cc958
-rw-r--r--deps/v8/src/arm/codegen-arm.h67
-rw-r--r--deps/v8/src/arm/constants-arm.h7
-rw-r--r--deps/v8/src/arm/disasm-arm.cc44
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc205
-rw-r--r--deps/v8/src/arm/ic-arm.cc109
-rw-r--r--deps/v8/src/arm/jump-target-arm.cc11
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc82
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h21
-rw-r--r--deps/v8/src/arm/simulator-arm.cc45
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc366
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc97
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h24
-rw-r--r--deps/v8/src/builtins.cc6
-rw-r--r--deps/v8/src/codegen.h286
-rw-r--r--deps/v8/src/flag-definitions.h1
-rw-r--r--deps/v8/src/full-codegen.cc72
-rw-r--r--deps/v8/src/full-codegen.h5
-rw-r--r--deps/v8/src/globals.h9
-rw-r--r--deps/v8/src/heap-inl.h56
-rw-r--r--deps/v8/src/heap.cc632
-rw-r--r--deps/v8/src/heap.h113
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc23
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc569
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h76
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc202
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc103
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc106
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h6
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc504
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h2
-rw-r--r--deps/v8/src/jump-target-heavy.cc29
-rw-r--r--deps/v8/src/jump-target-light.cc5
-rw-r--r--deps/v8/src/macros.py10
-rw-r--r--deps/v8/src/mark-compact.cc193
-rw-r--r--deps/v8/src/mark-compact.h62
-rw-r--r--deps/v8/src/objects-debug.cc3
-rw-r--r--deps/v8/src/objects-inl.h110
-rw-r--r--deps/v8/src/objects.cc21
-rw-r--r--deps/v8/src/objects.h214
-rw-r--r--deps/v8/src/platform-freebsd.cc6
-rw-r--r--deps/v8/src/platform-linux.cc3
-rw-r--r--deps/v8/src/profile-generator.cc6
-rw-r--r--deps/v8/src/runtime.cc138
-rw-r--r--deps/v8/src/runtime.h1
-rw-r--r--deps/v8/src/spaces-inl.h224
-rw-r--r--deps/v8/src/spaces.cc444
-rw-r--r--deps/v8/src/spaces.h278
-rw-r--r--deps/v8/src/string.js39
-rw-r--r--deps/v8/src/stub-cache.h8
-rw-r--r--deps/v8/src/v8.cc4
-rw-r--r--deps/v8/src/v8natives.js20
-rw-r--r--deps/v8/src/version.cc2
-rw-r--r--deps/v8/src/virtual-frame-light-inl.h20
-rw-r--r--deps/v8/src/x64/assembler-x64.h2
-rw-r--r--deps/v8/src/x64/builtins-x64.cc44
-rw-r--r--deps/v8/src/x64/codegen-x64.cc705
-rw-r--r--deps/v8/src/x64/codegen-x64.h39
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc196
-rw-r--r--deps/v8/src/x64/ic-x64.cc131
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc169
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h16
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc392
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h2
68 files changed, 4753 insertions, 3764 deletions
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 050e15bcc2..6dd381febb 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -903,20 +903,6 @@ void Assembler::bx(Register target, Condition cond) { // v5 and above, plus v4t
// Data-processing instructions.
-// UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
-// Instruction details available in ARM DDI 0406A, A8-464.
-// cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
-// Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
-void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond) {
- ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
- ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
- ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
- emit(cond | 0x3F*B21 | src3.imm32_*B16 |
- dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
-}
-
-
void Assembler::and_(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | 0*B21 | s, src1, dst, src2);
@@ -1106,6 +1092,82 @@ void Assembler::clz(Register dst, Register src, Condition cond) {
}
+// Bitfield manipulation instructions.
+
+// Unsigned bit field extract.
+// Extracts #width adjacent bits from position #lsb in a register, and
+// writes them to the low bits of a destination register.
+// ubfx dst, src, #lsb, #width
+void Assembler::ubfx(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
+ lsb*B7 | B6 | B4 | src.code());
+}
+
+
+// Signed bit field extract.
+// Extracts #width adjacent bits from position #lsb in a register, and
+// writes them to the low bits of a destination register. The extracted
+// value is sign extended to fill the destination register.
+// sbfx dst, src, #lsb, #width
+void Assembler::sbfx(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
+ lsb*B7 | B6 | B4 | src.code());
+}
+
+
+// Bit field clear.
+// Sets #width adjacent bits at position #lsb in the destination register
+// to zero, preserving the value of the other bits.
+// bfc dst, #lsb, #width
+void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ int msb = lsb + width - 1;
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
+}
+
+
+// Bit field insert.
+// Inserts #width adjacent bits from the low bits of the source register
+// into position #lsb of the destination register.
+// bfi dst, src, #lsb, #width
+void Assembler::bfi(Register dst,
+ Register src,
+ int lsb,
+ int width,
+ Condition cond) {
+ // v7 and above.
+ ASSERT(CpuFeatures::IsSupported(ARMv7));
+ ASSERT(!dst.is(pc) && !src.is(pc));
+ ASSERT((lsb >= 0) && (lsb <= 31));
+ ASSERT((width >= 1) && (width <= (32 - lsb)));
+ int msb = lsb + width - 1;
+ emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
+ src.code());
+}
+
+
// Status register access instructions.
void Assembler::mrs(Register dst, SRegister s, Condition cond) {
ASSERT(!dst.is(pc));
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index a1b98f673d..947c3631df 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -671,8 +671,6 @@ class Assembler : public Malloced {
void blx(Label* L) { blx(branch_offset(L, false)); } // v5 and above
// Data-processing instructions
- void ubfx(Register dst, Register src1, const Operand& src2,
- const Operand& src3, Condition cond = al);
void and_(Register dst, Register src1, const Operand& src2,
SBit s = LeaveCC, Condition cond = al);
@@ -759,6 +757,19 @@ class Assembler : public Malloced {
void clz(Register dst, Register src, Condition cond = al); // v5 and above
+ // Bitfield manipulation instructions. v7 and above.
+
+ void ubfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
+ void sbfx(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
+ void bfc(Register dst, int lsb, int width, Condition cond = al);
+
+ void bfi(Register dst, Register src, int lsb, int width,
+ Condition cond = al);
+
// Status register access instructions
void mrs(Register dst, SRegister s, Condition cond = al);
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 1f776562f2..ddbb9777d7 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -138,7 +138,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
// Clear the heap tag on the elements array.
__ and_(scratch1, scratch1, Operand(~kHeapObjectTagMask));
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array (untagged)
@@ -146,7 +146,7 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ LoadRoot(scratch3, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
- __ mov(scratch3, Operand(initial_capacity));
+ __ mov(scratch3, Operand(Smi::FromInt(initial_capacity)));
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(scratch3, MemOperand(scratch1, kPointerSize, PostIndex));
@@ -243,23 +243,23 @@ static void AllocateJSArray(MacroAssembler* masm,
__ and_(elements_array_storage,
elements_array_storage,
Operand(~kHeapObjectTagMask));
- // Initialize the fixed array and fill it with holes. FixedArray length is not
+ // Initialize the fixed array and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// elements_array_storage: elements array (untagged)
// array_size: size of array (smi)
- ASSERT(kSmiTag == 0);
__ LoadRoot(scratch1, Heap::kFixedArrayMapRootIndex);
ASSERT_EQ(0 * kPointerSize, FixedArray::kMapOffset);
__ str(scratch1, MemOperand(elements_array_storage, kPointerSize, PostIndex));
- // Convert array_size from smi to value.
- __ mov(array_size,
- Operand(array_size, ASR, kSmiTagSize));
+ ASSERT(kSmiTag == 0);
__ tst(array_size, array_size);
// Length of the FixedArray is the number of pre-allocated elements if
// the actual JSArray has length 0 and the size of the JSArray for non-empty
- // JSArrays. The length of a FixedArray is not stored as a smi.
- __ mov(array_size, Operand(JSArray::kPreallocatedArrayElements), LeaveCC, eq);
+ // JSArrays. The length of a FixedArray is stored as a smi.
+ __ mov(array_size,
+ Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)),
+ LeaveCC,
+ eq);
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
__ str(array_size,
MemOperand(elements_array_storage, kPointerSize, PostIndex));
@@ -267,10 +267,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Calculate elements array and elements array end.
// result: JSObject
// elements_array_storage: elements array element storage
- // array_size: size of elements array
+ // array_size: smi-tagged size of elements array
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
__ add(elements_array_end,
elements_array_storage,
- Operand(array_size, LSL, kPointerSizeLog2));
+ Operand(array_size, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@@ -543,7 +544,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
- // r7: undefined
+ // r7: undefined value
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
@@ -555,14 +556,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// instance type would be JS_FUNCTION_TYPE.
// r1: constructor function
// r2: initial map
- // r7: undefined
+ // r7: undefined value
__ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE);
__ b(eq, &rt_call);
// Now allocate the JSObject on the heap.
// r1: constructor function
// r2: initial map
- // r7: undefined
+ // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
__ AllocateInNewSpace(r3, r4, r5, r6, &rt_call, SIZE_IN_WORDS);
@@ -572,7 +573,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r2: initial map
// r3: object size
// r4: JSObject (not tagged)
- // r7: undefined
+ // r7: undefined value
__ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
__ mov(r5, r4);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
@@ -588,7 +589,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: object size (in words)
// r4: JSObject (not tagged)
// r5: First in-object property of JSObject (not tagged)
- // r7: undefined
+ // r7: undefined value
__ add(r6, r4, Operand(r3, LSL, kPointerSizeLog2)); // End of object.
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
{ Label loop, entry;
@@ -611,7 +612,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
// r4: JSObject
// r5: start of next object (not tagged)
- // r7: undefined
+ // r7: undefined value
__ ldrb(r3, FieldMemOperand(r2, Map::kUnusedPropertyFieldsOffset));
// The field instance sizes contains both pre-allocated property fields and
// in-object properties.
@@ -633,7 +634,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: start of next object
- // r7: undefined
+ // r7: undefined value
__ add(r0, r3, Operand(FixedArray::kHeaderSize / kPointerSize));
__ AllocateInNewSpace(
r0,
@@ -648,13 +649,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r3: number of elements in properties array
// r4: JSObject
// r5: FixedArray (not tagged)
- // r7: undefined
+ // r7: undefined value
__ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
__ mov(r2, r5);
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
__ str(r6, MemOperand(r2, kPointerSize, PostIndex));
- ASSERT_EQ(1 * kPointerSize, Array::kLengthOffset);
- __ str(r3, MemOperand(r2, kPointerSize, PostIndex));
+ ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+ __ mov(r0, Operand(r3, LSL, kSmiTagSize));
+ __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
// Initialize the fields to undefined.
// r1: constructor function
@@ -1047,6 +1049,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r3, SharedFunctionInfo::kCodeOffset));
__ add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
__ cmp(r2, r0); // Check formal and actual parameter counts.
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
index 6edec4d760..264498dbfb 100644
--- a/deps/v8/src/arm/codegen-arm-inl.h
+++ b/deps/v8/src/arm/codegen-arm-inl.h
@@ -36,30 +36,6 @@ namespace internal {
#define __ ACCESS_MASM(masm_)
-void CodeGenerator::LoadConditionAndSpill(Expression* expression,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control) {
- LoadCondition(expression, true_target, false_target, force_control);
-}
-
-
-void CodeGenerator::LoadAndSpill(Expression* expression) {
- ASSERT(VirtualFrame::SpilledScope::is_spilled());
- Load(expression);
-}
-
-
-void CodeGenerator::VisitAndSpill(Statement* statement) {
- Visit(statement);
-}
-
-
-void CodeGenerator::VisitStatementsAndSpill(ZoneList<Statement*>* statements) {
- VisitStatements(statements);
-}
-
-
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 64ed425a77..d550cbd44e 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -49,8 +49,6 @@ namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
-
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
Condition cc,
@@ -68,33 +66,41 @@ static void MultiplyByKnownInt(MacroAssembler* masm,
static bool IsEasyToMultiplyBy(int x);
+#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// Platform-specific DeferredCode functions.
void DeferredCode::SaveRegisters() {
- for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
- int action = registers_[i];
- if (action == kPush) {
- __ push(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore && (action & kSyncedFlag) == 0) {
- __ str(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
- }
- }
+ // On ARM you either have a completely spilled frame or you
+ // handle it yourself, but at the moment there's no automation
+ // of registers and deferred code.
}
void DeferredCode::RestoreRegisters() {
- // Restore registers in reverse order due to the stack.
- for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
- int action = registers_[i];
- if (action == kPush) {
- __ pop(RegisterAllocator::ToRegister(i));
- } else if (action != kIgnore) {
- action &= ~kSyncedFlag;
- __ ldr(RegisterAllocator::ToRegister(i), MemOperand(fp, action));
- }
- }
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->frame()->AssertIsSpilled();
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
}
@@ -315,7 +321,7 @@ void CodeGenerator::Generate(CompilationInfo* info) {
// Ignore the return value.
}
#endif
- VisitStatementsAndSpill(info->function()->body());
+ VisitStatements(info->function()->body());
}
}
@@ -652,7 +658,6 @@ void CodeGenerator::StoreArgumentsObject(bool initial) {
void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// Special handling of identifiers as subexpressions of typeof.
- VirtualFrame::SpilledScope spilled_scope(frame_);
Variable* variable = expr->AsVariableProxy()->AsVariable();
if (variable != NULL && !variable->is_this() && variable->is_global()) {
// For a global variable we build the property reference
@@ -667,10 +672,9 @@ void CodeGenerator::LoadTypeofExpression(Expression* expr) {
// For a variable that rewrites to a slot, we signal it is the immediate
// subexpression of a typeof.
LoadFromSlotCheckForArguments(variable->slot(), INSIDE_TYPEOF);
- frame_->SpillAll();
} else {
// Anything else can be handled normally.
- LoadAndSpill(expr);
+ Load(expr);
}
}
@@ -719,8 +723,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
}
} else {
// Anything else is a runtime error.
- VirtualFrame::SpilledScope spilled_scope(frame_);
- LoadAndSpill(e);
+ Load(e);
frame_->CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
@@ -850,11 +853,9 @@ void CodeGenerator::VirtualFrameBinaryOperation(Token::Value op,
case Token::SAR: {
Register rhs = frame_->PopToRegister();
Register lhs = frame_->PopToRegister(rhs); // Don't pop to rhs register.
- {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
- frame_->CallStub(&stub, 0);
- }
+ GenericBinaryOpStub stub(op, overwrite_mode, lhs, rhs, constant_rhs);
+ frame_->SpillAll();
+ frame_->CallStub(&stub, 0);
frame_->EmitPush(r0);
break;
}
@@ -1328,11 +1329,12 @@ void CodeGenerator::Comparison(Condition cc,
void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
CallFunctionFlags flags,
int position) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
+ frame_->AssertIsSpilled();
+
// Push the arguments ("left-to-right") on the stack.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
// Record the position for debugging purposes.
@@ -1368,7 +1370,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load applicand.apply onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
- LoadAndSpill(applicand);
+ Load(applicand);
Handle<String> name = Factory::LookupAsciiSymbol("apply");
frame_->Dup();
frame_->CallLoadIC(name, RelocInfo::CODE_TARGET);
@@ -1376,7 +1378,7 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
- LoadAndSpill(receiver);
+ Load(receiver);
LoadFromSlot(scope()->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
@@ -1564,7 +1566,7 @@ void CodeGenerator::VisitStatements(ZoneList<Statement*>* statements) {
#endif
VirtualFrame::SpilledScope spilled_scope(frame_);
for (int i = 0; frame_ != NULL && i < statements->length(); i++) {
- VisitAndSpill(statements->at(i));
+ Visit(statements->at(i));
}
ASSERT(!has_valid_frame() || frame_->height() == original_height);
}
@@ -1578,7 +1580,7 @@ void CodeGenerator::VisitBlock(Block* node) {
Comment cmnt(masm_, "[ Block");
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
- VisitStatementsAndSpill(node->statements());
+ VisitStatements(node->statements());
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
@@ -1668,12 +1670,11 @@ void CodeGenerator::VisitExpressionStatement(ExpressionStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ ExpressionStatement");
CodeForStatementPosition(node);
Expression* expression = node->expression();
expression->MarkAsStatement();
- LoadAndSpill(expression);
+ Load(expression);
frame_->Drop();
ASSERT(frame_->height() == original_height);
}
@@ -1683,7 +1684,6 @@ void CodeGenerator::VisitEmptyStatement(EmptyStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "// EmptyStatement");
CodeForStatementPosition(node);
// nothing to do
@@ -1695,7 +1695,6 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ IfStatement");
// Generate different code depending on which parts of the if statement
// are present or not.
@@ -1710,14 +1709,14 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
JumpTarget then;
JumpTarget else_;
// if (cond)
- LoadConditionAndSpill(node->condition(), &then, &else_, true);
+ LoadCondition(node->condition(), &then, &else_, true);
if (frame_ != NULL) {
Branch(false, &else_);
}
// then
if (frame_ != NULL || then.is_linked()) {
then.Bind();
- VisitAndSpill(node->then_statement());
+ Visit(node->then_statement());
}
if (frame_ != NULL) {
exit.Jump();
@@ -1725,7 +1724,7 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
// else
if (else_.is_linked()) {
else_.Bind();
- VisitAndSpill(node->else_statement());
+ Visit(node->else_statement());
}
} else if (has_then_stm) {
@@ -1733,14 +1732,14 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_else_stm);
JumpTarget then;
// if (cond)
- LoadConditionAndSpill(node->condition(), &then, &exit, true);
+ LoadCondition(node->condition(), &then, &exit, true);
if (frame_ != NULL) {
Branch(false, &exit);
}
// then
if (frame_ != NULL || then.is_linked()) {
then.Bind();
- VisitAndSpill(node->then_statement());
+ Visit(node->then_statement());
}
} else if (has_else_stm) {
@@ -1748,21 +1747,21 @@ void CodeGenerator::VisitIfStatement(IfStatement* node) {
ASSERT(!has_then_stm);
JumpTarget else_;
// if (!cond)
- LoadConditionAndSpill(node->condition(), &exit, &else_, true);
+ LoadCondition(node->condition(), &exit, &else_, true);
if (frame_ != NULL) {
Branch(true, &exit);
}
// else
if (frame_ != NULL || else_.is_linked()) {
else_.Bind();
- VisitAndSpill(node->else_statement());
+ Visit(node->else_statement());
}
} else {
Comment cmnt(masm_, "[ If");
ASSERT(!has_then_stm && !has_else_stm);
// if (cond)
- LoadConditionAndSpill(node->condition(), &exit, &exit, false);
+ LoadCondition(node->condition(), &exit, &exit, false);
if (frame_ != NULL) {
if (has_cc()) {
cc_reg_ = al;
@@ -1801,7 +1800,7 @@ void CodeGenerator::VisitReturnStatement(ReturnStatement* node) {
Comment cmnt(masm_, "[ ReturnStatement");
CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
+ Load(node->expression());
if (function_return_is_shadowed_) {
frame_->EmitPop(r0);
function_return_.Jump();
@@ -1823,7 +1822,7 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ WithEnterStatement");
CodeForStatementPosition(node);
- LoadAndSpill(node->expression());
+ Load(node->expression());
if (node->is_catch_block()) {
frame_->CallRuntime(Runtime::kPushCatchContext, 1);
} else {
@@ -1866,7 +1865,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
CodeForStatementPosition(node);
node->break_target()->SetExpectedHeight();
- LoadAndSpill(node->tag());
+ Load(node->tag());
JumpTarget next_test;
JumpTarget fall_through;
@@ -1905,7 +1904,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
fall_through.Bind();
fall_through.Unuse();
}
- VisitStatementsAndSpill(clause->statements());
+ VisitStatements(clause->statements());
// If control flow can fall through from the body, jump to the next body
// or the end of the statement.
@@ -1926,7 +1925,7 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
if (default_clause != NULL) {
Comment cmnt(masm_, "[ Default clause");
default_entry.Bind();
- VisitStatementsAndSpill(default_clause->statements());
+ VisitStatements(default_clause->statements());
// If control flow can fall out of the default and there is a case after
// it, jup to that case's body.
if (frame_ != NULL && default_exit.is_bound()) {
@@ -1976,7 +1975,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
}
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// Compile the test.
switch (info) {
@@ -2003,7 +2002,7 @@ void CodeGenerator::VisitDoWhileStatement(DoWhileStatement* node) {
if (has_valid_frame()) {
Comment cmnt(masm_, "[ DoWhileCondition");
CodeForDoWhileConditionPosition(node);
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A invalid frame here indicates that control did not
// fall out of the test expression.
@@ -2044,7 +2043,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
// A NULL frame indicates that control did not fall out of the
// test expression.
@@ -2057,7 +2056,7 @@ void CodeGenerator::VisitWhileStatement(WhileStatement* node) {
if (has_valid_frame()) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// If control flow can fall out of the body, jump back to the top.
if (has_valid_frame()) {
@@ -2080,7 +2079,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
Comment cmnt(masm_, "[ ForStatement");
CodeForStatementPosition(node);
if (node->init() != NULL) {
- VisitAndSpill(node->init());
+ Visit(node->init());
}
// If the test is never true there is no need to compile the test or
@@ -2105,7 +2104,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// If the test is always true, there is no need to compile it.
if (info == DONT_KNOW) {
JumpTarget body;
- LoadConditionAndSpill(node->cond(), &body, node->break_target(), true);
+ LoadCondition(node->cond(), &body, node->break_target(), true);
if (has_valid_frame()) {
Branch(false, node->break_target());
}
@@ -2116,7 +2115,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
if (has_valid_frame()) {
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
if (node->next() == NULL) {
// If there is no update statement and control flow can fall out
@@ -2136,7 +2135,7 @@ void CodeGenerator::VisitForStatement(ForStatement* node) {
// after the code for the body actually belongs to the loop
// statement and not the body.
CodeForStatementPosition(node);
- VisitAndSpill(node->next());
+ Visit(node->next());
loop.Jump();
}
}
@@ -2165,7 +2164,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
JumpTarget exit;
// Get the object to enumerate over (converted to JSObject).
- LoadAndSpill(node->enumerable());
+ Load(node->enumerable());
// Both SpiderMonkey and kjs ignore null and undefined in contrast
// to the specification. 12.6.4 mandates a call to ToObject.
@@ -2276,7 +2275,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(r0); // map
frame_->EmitPush(r2); // enum cache bridge cache
__ ldr(r0, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
@@ -2289,7 +2287,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ ldr(r0, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(r0, LSL, kSmiTagSize));
frame_->EmitPush(r0);
__ mov(r0, Operand(Smi::FromInt(0))); // init index
frame_->EmitPush(r0);
@@ -2359,7 +2356,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
}
// Body.
CheckStack(); // TODO(1222600): ignore if body contains calls.
- VisitAndSpill(node->body());
+ Visit(node->body());
// Next. Reestablish a spilled frame in case we are coming here via
// a continue in the body.
@@ -2406,7 +2403,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
// Remove the exception from the stack.
frame_->Drop();
- VisitStatementsAndSpill(node->catch_block()->statements());
+ VisitStatements(node->catch_block()->statements());
if (frame_ != NULL) {
exit.Jump();
}
@@ -2441,7 +2438,7 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
+ VisitStatements(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2555,7 +2552,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
}
// Generate code for the statements in the try block.
- VisitStatementsAndSpill(node->try_block()->statements());
+ VisitStatements(node->try_block()->statements());
// Stop the introduced shadowing and count the number of required unlinks.
// After shadowing stops, the original labels are unshadowed and the
@@ -2645,7 +2642,7 @@ void CodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* node) {
// and the state - while evaluating the finally block.
//
// Generate code for the statements in the finally block.
- VisitStatementsAndSpill(node->finally_block()->statements());
+ VisitStatements(node->finally_block()->statements());
if (has_valid_frame()) {
// Restore state and return value or faked TOS.
@@ -2692,7 +2689,6 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -2705,19 +2701,18 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateFunction(
Handle<SharedFunctionInfo> function_info) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ mov(r0, Operand(function_info));
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && function_info->num_literals() == 0) {
FastNewClosureStub stub;
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(function_info));
+ frame_->SpillAll();
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
} else {
// Create a new closure.
frame_->EmitPush(cp);
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(function_info));
frame_->CallRuntime(Runtime::kNewClosure, 2);
frame_->EmitPush(r0);
}
@@ -2762,19 +2757,19 @@ void CodeGenerator::VisitConditional(Conditional* node) {
Comment cmnt(masm_, "[ Conditional");
JumpTarget then;
JumpTarget else_;
- LoadConditionAndSpill(node->condition(), &then, &else_, true);
+ LoadCondition(node->condition(), &then, &else_, true);
if (has_valid_frame()) {
Branch(false, &else_);
}
if (has_valid_frame() || then.is_linked()) {
then.Bind();
- LoadAndSpill(node->then_expression());
+ Load(node->then_expression());
}
if (else_.is_linked()) {
JumpTarget exit;
if (has_valid_frame()) exit.Jump();
else_.Bind();
- LoadAndSpill(node->else_expression());
+ Load(node->else_expression());
if (exit.is_linked()) exit.Bind();
}
ASSERT_EQ(original_height + 1, frame_->height());
@@ -3194,7 +3189,7 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- LoadAndSpill(value);
+ Load(value);
frame_->EmitPop(r0);
__ mov(r2, Operand(key->handle()));
__ ldr(r1, frame_->Top()); // Load the receiver.
@@ -3205,28 +3200,28 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
case ObjectLiteral::Property::PROTOTYPE: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0); // dup the result
- LoadAndSpill(key);
- LoadAndSpill(value);
+ Load(key);
+ Load(value);
frame_->CallRuntime(Runtime::kSetProperty, 3);
break;
}
case ObjectLiteral::Property::SETTER: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(key);
+ Load(key);
__ mov(r0, Operand(Smi::FromInt(1)));
frame_->EmitPush(r0);
- LoadAndSpill(value);
+ Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
case ObjectLiteral::Property::GETTER: {
__ ldr(r0, frame_->Top());
frame_->EmitPush(r0);
- LoadAndSpill(key);
+ Load(key);
__ mov(r0, Operand(Smi::FromInt(0)));
frame_->EmitPush(r0);
- LoadAndSpill(value);
+ Load(value);
frame_->CallRuntime(Runtime::kDefineAccessor, 4);
break;
}
@@ -3275,7 +3270,7 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
if (CompileTimeValue::IsCompileTimeValue(value)) continue;
// The property must be set by generated code.
- LoadAndSpill(value);
+ Load(value);
frame_->EmitPop(r0);
// Fetch the object literal.
@@ -3299,12 +3294,11 @@ void CodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
// Call runtime routine to allocate the catch extension object and
// assign the exception value to the catch variable.
Comment cmnt(masm_, "[ CatchExtensionObject");
- LoadAndSpill(node->key());
- LoadAndSpill(node->value());
+ Load(node->key());
+ Load(node->value());
frame_->CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
frame_->EmitPush(r0);
ASSERT_EQ(original_height + 1, frame_->height());
@@ -3419,7 +3413,6 @@ void CodeGenerator::EmitNamedPropertyAssignment(Assignment* node) {
frame_->Dup();
}
EmitNamedLoad(name, var != NULL);
- frame_->EmitPush(r0);
// Perform the binary operation.
Literal* literal = node->value()->AsLiteral();
@@ -3624,10 +3617,9 @@ void CodeGenerator::VisitThrow(Throw* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Throw");
- LoadAndSpill(node->exception());
+ Load(node->exception());
CodeForSourcePosition(node->position());
frame_->CallRuntime(Runtime::kThrow, 1);
frame_->EmitPush(r0);
@@ -3652,7 +3644,6 @@ void CodeGenerator::VisitCall(Call* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ Call");
Expression* function = node->expression();
@@ -3673,6 +3664,7 @@ void CodeGenerator::VisitCall(Call* node) {
// ------------------------------------------------------------------------
if (var != NULL && var->is_possibly_eval()) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript example: 'eval(arg)' // eval is not known to be shadowed
// ----------------------------------
@@ -3682,12 +3674,12 @@ void CodeGenerator::VisitCall(Call* node) {
// call. Then we call the resolved function using the given
// arguments.
// Prepare stack for call to resolved function.
- LoadAndSpill(function);
+ Load(function);
__ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
frame_->EmitPush(r2); // Slot for receiver
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
// Prepare stack for call to ResolvePossiblyDirectEval.
@@ -3735,9 +3727,10 @@ void CodeGenerator::VisitCall(Call* node) {
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Setup the name register and call the IC initialization code.
__ mov(r2, Operand(var->name()));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -3750,6 +3743,7 @@ void CodeGenerator::VisitCall(Call* node) {
} else if (var != NULL && var->slot() != NULL &&
var->slot()->type() == Slot::LOOKUP) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// ----------------------------------
// JavaScript examples:
//
@@ -3827,13 +3821,14 @@ void CodeGenerator::VisitCall(Call* node) {
node->position());
} else {
- LoadAndSpill(property->obj()); // Receiver.
+ Load(property->obj()); // Receiver.
// Load the arguments.
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Set the name register and call the IC initialization code.
__ mov(r2, Operand(name));
InLoopFlag in_loop = loop_nesting() > 0 ? IN_LOOP : NOT_IN_LOOP;
@@ -3848,14 +3843,15 @@ void CodeGenerator::VisitCall(Call* node) {
// -------------------------------------------
// JavaScript example: 'array[index](1, 2, 3)'
// -------------------------------------------
+ VirtualFrame::SpilledScope spilled_scope(frame_);
- LoadAndSpill(property->obj());
+ Load(property->obj());
if (!property->is_synthetic()) {
// Duplicate receiver for later use.
__ ldr(r0, MemOperand(sp, 0));
frame_->EmitPush(r0);
}
- LoadAndSpill(property->key());
+ Load(property->key());
EmitKeyedLoad();
// Put the function below the receiver.
if (property->is_synthetic()) {
@@ -3880,7 +3876,9 @@ void CodeGenerator::VisitCall(Call* node) {
// ----------------------------------
// Load the function.
- LoadAndSpill(function);
+ Load(function);
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
// Pass the global proxy as the receiver.
LoadGlobalReceiver(r0);
@@ -3897,7 +3895,6 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -3909,16 +3906,18 @@ void CodeGenerator::VisitCallNew(CallNew* node) {
// Compute function to call and use the global object as the
// receiver. There is no need to use the global proxy here because
// it will always be replaced with a newly allocated object.
- LoadAndSpill(node->expression());
+ Load(node->expression());
LoadGlobal();
// Push the arguments ("left-to-right") on the stack.
ZoneList<Expression*>* args = node->arguments();
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
// r0: the number of arguments.
__ mov(r0, Operand(arg_count));
// Load the function into r1 as per calling convention.
@@ -3942,7 +3941,7 @@ void CodeGenerator::GenerateClassOf(ZoneList<Expression*>* args) {
JumpTarget leave, null, function, non_function_constructor;
// Load the object into r0.
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r0);
// If the object is a smi, we return null.
@@ -4000,7 +3999,7 @@ void CodeGenerator::GenerateValueOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
JumpTarget leave;
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r0); // r0 contains object.
// if (object->IsSmi()) return the object.
__ tst(r0, Operand(kSmiTagMask));
@@ -4019,8 +4018,8 @@ void CodeGenerator::GenerateSetValueOf(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 2);
JumpTarget leave;
- LoadAndSpill(args->at(0)); // Load the object.
- LoadAndSpill(args->at(1)); // Load the value.
+ Load(args->at(0)); // Load the object.
+ Load(args->at(1)); // Load the value.
frame_->EmitPop(r0); // r0 contains value
frame_->EmitPop(r1); // r1 contains object
// if (object->IsSmi()) return object.
@@ -4056,9 +4055,7 @@ void CodeGenerator::GenerateLog(ZoneList<Expression*>* args) {
if (ShouldGenerateLog(args->at(0))) {
Load(args->at(1));
Load(args->at(2));
- frame_->SpillAll();
- VirtualFrame::SpilledScope spilled_scope(frame_);
- __ CallRuntime(Runtime::kLog, 2);
+ frame_->CallRuntime(Runtime::kLog, 2);
}
#endif
frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
@@ -4093,99 +4090,240 @@ void CodeGenerator::GenerateMathSqrt(ZoneList<Expression*>* args) {
}
-// This generates code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat, 8 and 16 bit characters and cons strings where the
-// answer is found in the left hand branch of the cons. The slow case will
-// flatten the string, which will ensure that the answer is in the left hand
-// side the next time around.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
- Comment(masm_, "[ GenerateFastCharCodeAt");
Load(args->at(0));
Load(args->at(1));
- Register index = frame_->PopToRegister(); // Index.
- Register string = frame_->PopToRegister(index); // String.
- Register result = VirtualFrame::scratch0();
- Register scratch = VirtualFrame::scratch1();
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- string,
- index,
- scratch,
- result,
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
+ Register index = r1;
+ Register object = r2;
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r2);
- __ bind(&exit);
+ // We need two extra registers.
+ Register scratch = r3;
+ Register result = r0;
+
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object,
+ index,
+ scratch,
+ result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->EmitPush(result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
+ Load(args->at(0));
+
Register code = r1;
- Register scratch = ip;
Register result = r0;
- LoadAndSpill(args->at(0));
frame_->EmitPop(code);
- StringHelper::GenerateCharFromCode(masm_,
- code,
- scratch,
- result,
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code, result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->EmitPush(result);
}
-void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result_, Operand(Smi::FromInt(0)));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
VirtualFrame::SpilledScope spilled_scope(frame_);
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ Register index = r1;
+ Register object = r2;
+
+ frame_->EmitPop(r1);
+ frame_->EmitPop(r2);
+
+ // We need three extra registers.
+ Register scratch1 = r3;
+ Register scratch2 = r4;
+ Register result = r0;
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object,
+ index,
+ scratch1,
+ scratch2,
+ result);
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->EmitPush(result);
+}
+
+
+void CodeGenerator::GenerateIsArray(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
- frame_->EmitPop(r0);
- __ and_(r1, r0, Operand(kSmiTagMask));
- __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+ Register possible_array = frame_->PopToRegister();
+ Register scratch = VirtualFrame::scratch0();
+ __ and_(scratch, possible_array, Operand(kSmiTagMask));
+ __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
answer.Branch(ne);
// It is a heap object - get the map. Check if the object is a JS array.
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+ __ CompareObjectType(possible_array, scratch, scratch, JS_ARRAY_TYPE);
answer.Bind();
cc_reg_ = eq;
}
void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
JumpTarget answer;
// We need the CC bits to come out as not_equal in the case where the
// object is a smi. This can't be done with the usual test opcode so
// we use XOR to get the right CC bits.
- frame_->EmitPop(r0);
- __ and_(r1, r0, Operand(kSmiTagMask));
- __ eor(r1, r1, Operand(kSmiTagMask), SetCC);
+ Register possible_regexp = frame_->PopToRegister();
+ Register scratch = VirtualFrame::scratch0();
+ __ and_(scratch, possible_regexp, Operand(kSmiTagMask));
+ __ eor(scratch, scratch, Operand(kSmiTagMask), SetCC);
answer.Branch(ne);
// It is a heap object - get the map. Check if the object is a regexp.
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+ __ CompareObjectType(possible_regexp, scratch, scratch, JS_REGEXP_TYPE);
answer.Bind();
cc_reg_ = eq;
}
@@ -4194,28 +4332,27 @@ void CodeGenerator::GenerateIsRegExp(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (typeof(arg) === 'object' || %_ClassOf(arg) == 'RegExp')
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r1);
- __ tst(r1, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_object = frame_->PopToRegister();
+ __ tst(possible_object, Operand(kSmiTagMask));
false_target()->Branch(eq);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r1, ip);
+ __ cmp(possible_object, ip);
true_target()->Branch(eq);
- Register map_reg = r2;
- __ ldr(map_reg, FieldMemOperand(r1, HeapObject::kMapOffset));
+ Register map_reg = VirtualFrame::scratch0();
+ __ ldr(map_reg, FieldMemOperand(possible_object, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kBitFieldOffset));
+ __ tst(possible_object, Operand(1 << Map::kIsUndetectable));
false_target()->Branch(ne);
- __ ldrb(r1, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_JS_OBJECT_TYPE));
+ __ ldrb(possible_object, FieldMemOperand(map_reg, Map::kInstanceTypeOffset));
+ __ cmp(possible_object, Operand(FIRST_JS_OBJECT_TYPE));
false_target()->Branch(lt);
- __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
+ __ cmp(possible_object, Operand(LAST_JS_OBJECT_TYPE));
cc_reg_ = le;
}
@@ -4223,28 +4360,29 @@ void CodeGenerator::GenerateIsObject(ZoneList<Expression*>* args) {
void CodeGenerator::GenerateIsFunction(ZoneList<Expression*>* args) {
// This generates a fast version of:
// (%_ClassOf(arg) === 'Function')
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_function = frame_->PopToRegister();
+ __ tst(possible_function, Operand(kSmiTagMask));
false_target()->Branch(eq);
- Register map_reg = r2;
- __ CompareObjectType(r0, map_reg, r1, JS_FUNCTION_TYPE);
+ Register map_reg = VirtualFrame::scratch0();
+ Register scratch = VirtualFrame::scratch1();
+ __ CompareObjectType(possible_function, map_reg, scratch, JS_FUNCTION_TYPE);
cc_reg_ = eq;
}
void CodeGenerator::GenerateIsUndetectableObject(ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope(frame_);
ASSERT(args->length() == 1);
- LoadAndSpill(args->at(0));
- frame_->EmitPop(r0);
- __ tst(r0, Operand(kSmiTagMask));
+ Load(args->at(0));
+ Register possible_undetectable = frame_->PopToRegister();
+ __ tst(possible_undetectable, Operand(kSmiTagMask));
false_target()->Branch(eq);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ Register scratch = VirtualFrame::scratch0();
+ __ ldr(scratch,
+ FieldMemOperand(possible_undetectable, HeapObject::kMapOffset));
+ __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ tst(scratch, Operand(1 << Map::kIsUndetectable));
cc_reg_ = ne;
}
@@ -4305,7 +4443,7 @@ void CodeGenerator::GenerateArguments(ZoneList<Expression*>* args) {
// Satisfy contract with ArgumentsAccessStub:
// Load the key into r1 and the formal parameters count into r0.
- LoadAndSpill(args->at(0));
+ Load(args->at(0));
frame_->EmitPop(r1);
__ mov(r0, Operand(Smi::FromInt(scope()->num_parameters())));
@@ -4377,6 +4515,7 @@ void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
Load(args->at(1));
StringAddStub stub(NO_STRING_ADD_FLAGS);
+ frame_->SpillAll();
frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -4390,6 +4529,7 @@ void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
Load(args->at(2));
SubStringStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 3);
frame_->EmitPush(r0);
}
@@ -4402,6 +4542,7 @@ void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
Load(args->at(1));
StringCompareStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 2);
frame_->EmitPush(r0);
}
@@ -4415,6 +4556,7 @@ void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
Load(args->at(2));
Load(args->at(3));
RegExpExecStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 4);
frame_->EmitPush(r0);
}
@@ -4488,7 +4630,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(r2, Operand(Factory::fixed_array_map()));
__ str(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
// Set FixedArray length.
- __ str(r5, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ mov(r6, Operand(r5, LSL, kSmiTagSize));
+ __ str(r6, FieldMemOperand(r3, FixedArray::kLengthOffset));
// Fill contents of fixed-array with the-hole.
__ mov(r2, Operand(Factory::the_hole_value()));
__ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -4548,12 +4691,14 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
Top::global_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort("Attempt to use undefined cache.");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- frame_->EmitPush(r0);
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
return;
}
Load(args->at(1));
+
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
frame_->EmitPop(r2);
__ ldr(r1, ContextOperand(cp, Context::GLOBAL_INDEX));
@@ -4589,6 +4734,7 @@ void CodeGenerator::GenerateNumberToString(ZoneList<Expression*>* args) {
Load(args->at(0));
NumberToStringStub stub;
+ frame_->SpillAll();
frame_->CallStub(&stub, 1);
frame_->EmitPush(r0);
}
@@ -4625,6 +4771,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
Load(args->at(1));
Load(args->at(2));
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
Register index2 = r2;
Register index1 = r1;
Register object = r0;
@@ -4749,7 +4897,6 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
#ifdef DEBUG
int original_height = frame_->height();
#endif
- VirtualFrame::SpilledScope spilled_scope(frame_);
if (CheckForInlineRuntimeCall(node)) {
ASSERT((has_cc() && frame_->height() == original_height) ||
(!has_cc() && frame_->height() == original_height + 1));
@@ -4763,17 +4910,21 @@ void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (function == NULL) {
// Prepare stack for calling JS runtime function.
// Push the builtins object found in the current global object.
- __ ldr(r1, GlobalObject());
- __ ldr(r0, FieldMemOperand(r1, GlobalObject::kBuiltinsOffset));
- frame_->EmitPush(r0);
+ Register scratch = VirtualFrame::scratch0();
+ __ ldr(scratch, GlobalObject());
+ Register builtins = frame_->GetTOSRegister();
+ __ ldr(builtins, FieldMemOperand(scratch, GlobalObject::kBuiltinsOffset));
+ frame_->EmitPush(builtins);
}
// Push the arguments ("left-to-right").
int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
- LoadAndSpill(args->at(i));
+ Load(args->at(i));
}
+ VirtualFrame::SpilledScope spilled_scope(frame_);
+
if (function == NULL) {
// Call the JS runtime function.
__ mov(r2, Operand(node->name()));
@@ -4801,10 +4952,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Token::Value op = node->op();
if (op == Token::NOT) {
- LoadConditionAndSpill(node->expression(),
- false_target(),
- true_target(),
- true);
+ LoadCondition(node->expression(), false_target(), true_target(), true);
// LoadCondition may (and usually does) leave a test and branch to
// be emitted by the caller. In that case, negate the condition.
if (has_cc()) cc_reg_ = NegateCondition(cc_reg_);
@@ -4813,43 +4961,42 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
Property* property = node->expression()->AsProperty();
Variable* variable = node->expression()->AsVariableProxy()->AsVariable();
if (property != NULL) {
- LoadAndSpill(property->obj());
- LoadAndSpill(property->key());
+ Load(property->obj());
+ Load(property->key());
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else if (variable != NULL) {
Slot* slot = variable->slot();
if (variable->is_global()) {
LoadGlobal();
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else if (slot != NULL && slot->type() == Slot::LOOKUP) {
// lookup the context holding the named variable
frame_->EmitPush(cp);
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->CallRuntime(Runtime::kLookupContext, 2);
// r0: context
frame_->EmitPush(r0);
- __ mov(r0, Operand(variable->name()));
- frame_->EmitPush(r0);
+ frame_->EmitPush(Operand(variable->name()));
frame_->InvokeBuiltin(Builtins::DELETE, CALL_JS, 2);
+ frame_->EmitPush(r0);
} else {
// Default: Result of deleting non-global, not dynamically
// introduced variables is false.
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
+ frame_->EmitPushRoot(Heap::kFalseValueRootIndex);
}
} else {
// Default: Result of deleting expressions is true.
- LoadAndSpill(node->expression()); // may have side-effects
+ Load(node->expression()); // may have side-effects
frame_->Drop();
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
+ frame_->EmitPushRoot(Heap::kTrueValueRootIndex);
}
- frame_->EmitPush(r0);
} else if (op == Token::TYPEOF) {
// Special case for loading the typeof expression; see comment on
@@ -4862,8 +5009,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- LoadAndSpill(node->expression());
- frame_->EmitPop(r0);
+ Load(node->expression());
switch (op) {
case Token::NOT:
case Token::DELETE:
@@ -4872,13 +5018,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
break;
case Token::SUB: {
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
+ frame_->EmitPush(r0); // r0 has result
break;
}
case Token::BIT_NOT: {
// smi check
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
JumpTarget smi_label;
JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
@@ -4892,16 +5043,18 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ mvn(r0, Operand(r0));
__ bic(r0, r0, Operand(kSmiTagMask)); // bit-clear inverted smi-tag
continue_label.Bind();
+ frame_->EmitPush(r0); // r0 has result
break;
}
case Token::VOID:
- // since the stack top is cached in r0, popping and then
- // pushing a value can be done by just writing to r0.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ frame_->Drop();
+ frame_->EmitPushRoot(Heap::kUndefinedValueRootIndex);
break;
case Token::ADD: {
+ VirtualFrame::SpilledScope spilled(frame_);
+ frame_->EmitPop(r0);
// Smi check.
JumpTarget continue_label;
__ tst(r0, Operand(kSmiTagMask));
@@ -4909,12 +5062,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
frame_->EmitPush(r0);
frame_->InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS, 1);
continue_label.Bind();
+ frame_->EmitPush(r0); // r0 has result
break;
}
default:
UNREACHABLE();
}
- frame_->EmitPush(r0); // r0 has result
}
ASSERT(!has_valid_frame() ||
(has_cc() && frame_->height() == original_height) ||
@@ -5042,10 +5195,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
VirtualFrame::SpilledScope spilled_scope(frame_);
if (node->op() == Token::AND) {
JumpTarget is_true;
- LoadConditionAndSpill(node->left(),
- &is_true,
- false_target(),
- false);
+ LoadCondition(node->left(), &is_true, false_target(), false);
if (has_valid_frame() && !has_cc()) {
// The left-hand side result is on top of the virtual frame.
JumpTarget pop_and_continue;
@@ -5064,7 +5214,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Evaluate right side expression.
is_true.Bind();
- LoadAndSpill(node->right());
+ Load(node->right());
// Exit (always with a materialized value).
exit.Bind();
@@ -5076,10 +5226,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Branch(false, false_target());
}
is_true.Bind();
- LoadConditionAndSpill(node->right(),
- true_target(),
- false_target(),
- false);
+ LoadCondition(node->right(), true_target(), false_target(), false);
} else {
// Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_true.is_linked());
@@ -5088,10 +5235,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
} else {
ASSERT(node->op() == Token::OR);
JumpTarget is_false;
- LoadConditionAndSpill(node->left(),
- true_target(),
- &is_false,
- false);
+ LoadCondition(node->left(), true_target(), &is_false, false);
if (has_valid_frame() && !has_cc()) {
// The left-hand side result is on top of the virtual frame.
JumpTarget pop_and_continue;
@@ -5110,7 +5254,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
// Evaluate right side expression.
is_false.Bind();
- LoadAndSpill(node->right());
+ Load(node->right());
// Exit (always with a materialized value).
exit.Bind();
@@ -5122,10 +5266,7 @@ void CodeGenerator::GenerateLogicalBooleanOperation(BinaryOperation* node) {
Branch(true, true_target());
}
is_false.Bind();
- LoadConditionAndSpill(node->right(),
- true_target(),
- false_target(),
- false);
+ LoadCondition(node->right(), true_target(), false_target(), false);
} else {
// Nothing to do.
ASSERT(!has_valid_frame() && !has_cc() && !is_false.is_linked());
@@ -5392,8 +5533,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::IN: {
VirtualFrame::SpilledScope scope(frame_);
- LoadAndSpill(left);
- LoadAndSpill(right);
+ Load(left);
+ Load(right);
frame_->InvokeBuiltin(Builtins::IN, CALL_JS, 2);
frame_->EmitPush(r0);
break;
@@ -5401,8 +5542,8 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
case Token::INSTANCEOF: {
VirtualFrame::SpilledScope scope(frame_);
- LoadAndSpill(left);
- LoadAndSpill(right);
+ Load(left);
+ Load(right);
InstanceofStub stub;
frame_->CallStub(&stub, 2);
// At this point if instanceof succeeded then r0 == 0.
@@ -5435,11 +5576,19 @@ class DeferredReferenceGetNamedValue: public DeferredCode {
};
+// Convention for this is that on entry the receiver is in a register that
+// is not used by the stack. On exit the answer is found in that same
+// register and the stack has the same height.
void DeferredReferenceGetNamedValue::Generate() {
- ASSERT(receiver_.is(r0) || receiver_.is(r1));
+#ifdef DEBUG
+ int expected_height = frame_state()->frame()->height();
+#endif
+ VirtualFrame copied_frame(*frame_state()->frame());
+ copied_frame.SpillAll();
Register scratch1 = VirtualFrame::scratch0();
Register scratch2 = VirtualFrame::scratch1();
+ ASSERT(!receiver_.is(scratch1) && !receiver_.is(scratch2));
__ DecrementCounter(&Counters::named_load_inline, 1, scratch1, scratch2);
__ IncrementCounter(&Counters::named_load_inline_miss, 1, scratch1, scratch2);
@@ -5455,11 +5604,23 @@ void DeferredReferenceGetNamedValue::Generate() {
// in-object has been inlined.
__ nop(PROPERTY_ACCESS_INLINED);
+ // At this point the answer is in r0. We move it to the expected register
+ // if necessary.
+ __ Move(receiver_, r0);
+
+ // Now go back to the frame that we entered with. This will not overwrite
+ // the receiver register since that register was not in use when we came
+ // in. The instructions emitted by this merge are skipped over by the
+ // inline load patching mechanism when looking for the branch instruction
+ // that tells it where the code to patch is.
+ copied_frame.MergeTo(frame_state()->frame());
+
// Block the constant pool for one more instruction after leaving this
// constant pool block scope to include the branch instruction ending the
// deferred code.
__ BlockConstPoolFor(1);
}
+ ASSERT_EQ(expected_height, frame_state()->frame()->height());
}
@@ -5560,6 +5721,7 @@ void DeferredReferenceSetKeyedValue::Generate() {
}
+// Consumes the top of stack (the receiver) and pushes the result instead.
void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
Comment cmnt(masm(), "[ Load from named Property");
@@ -5568,6 +5730,7 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
is_contextual
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET);
+ frame_->EmitPush(r0); // Push answer.
} else {
// Inline the in-object property case.
Comment cmnt(masm(), "[ Inlined named property load");
@@ -5584,7 +5747,6 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
// Load the receiver from the stack.
Register receiver = frame_->PopToRegister();
- VirtualFrame::SpilledScope spilled(frame_);
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(receiver, name);
@@ -5600,16 +5762,19 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
__ tst(receiver, Operand(kSmiTagMask));
deferred->Branch(eq);
+ Register scratch = VirtualFrame::scratch0();
+ Register scratch2 = VirtualFrame::scratch1();
+
// Check the map. The null map used below is patched by the inline cache
- // code.
- __ ldr(r2, FieldMemOperand(receiver, HeapObject::kMapOffset));
- __ mov(r3, Operand(Factory::null_value()));
- __ cmp(r2, r3);
+ // code. Therefore we can't use a LoadRoot call.
+ __ ldr(scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ mov(scratch2, Operand(Factory::null_value()));
+ __ cmp(scratch, scratch2);
deferred->Branch(ne);
// Initially use an invalid index. The index will be patched by the
// inline cache code.
- __ ldr(r0, MemOperand(receiver, 0));
+ __ ldr(receiver, MemOperand(receiver, 0));
// Make sure that the expected number of instructions are generated.
ASSERT_EQ(kInlinedNamedLoadInstructions,
@@ -5617,6 +5782,9 @@ void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
}
deferred->BindExit();
+ // At this point the receiver register has the result, either from the
+ // deferred code or from the inlined code.
+ frame_->EmitPush(receiver);
}
}
@@ -5690,7 +5858,7 @@ void CodeGenerator::EmitKeyedLoad() {
// Check that key is within bounds. Use unsigned comparison to handle
// negative keys.
__ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
- __ cmp(scratch2, Operand(key, ASR, kSmiTagSize));
+ __ cmp(scratch2, key);
deferred->Branch(ls); // Unsigned less equal.
// Load and check that the result is not the hole (key is a smi).
@@ -5832,6 +6000,27 @@ Handle<String> Reference::GetName() {
}
+void Reference::DupIfPersist() {
+ if (persist_after_get_) {
+ switch (type_) {
+ case KEYED:
+ cgen_->frame()->Dup2();
+ break;
+ case NAMED:
+ cgen_->frame()->Dup();
+ // Fall through.
+ case UNLOADED:
+ case ILLEGAL:
+ case SLOT:
+ // Do nothing.
+ ;
+ }
+ } else {
+ set_unloaded();
+ }
+}
+
+
void Reference::GetValue() {
ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal());
@@ -5847,10 +6036,8 @@ void Reference::GetValue() {
Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL);
+ DupIfPersist();
cgen_->LoadFromSlotCheckForArguments(slot, NOT_INSIDE_TYPEOF);
- if (!persist_after_get_) {
- cgen_->UnloadReference(this);
- }
break;
}
@@ -5858,23 +6045,17 @@ void Reference::GetValue() {
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
- if (persist_after_get_) {
- cgen_->frame()->Dup();
- }
- cgen_->EmitNamedLoad(GetName(), is_global);
- cgen_->frame()->EmitPush(r0);
- if (!persist_after_get_) set_unloaded();
+ Handle<String> name = GetName();
+ DupIfPersist();
+ cgen_->EmitNamedLoad(name, is_global);
break;
}
case KEYED: {
ASSERT(property != NULL);
- if (persist_after_get_) {
- cgen_->frame()->Dup2();
- }
+ DupIfPersist();
cgen_->EmitKeyedLoad();
cgen_->frame()->EmitPush(r0);
- if (!persist_after_get_) set_unloaded();
break;
}
@@ -5991,8 +6172,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(r2, Heap::kContextMapRootIndex);
__ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ mov(r2, Operand(length));
- __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
+ __ mov(r2, Operand(Smi::FromInt(length)));
+ __ str(r2, FieldMemOperand(r0, FixedArray::kLengthOffset));
// Setup the fixed slots.
__ mov(r1, Operand(Smi::FromInt(0)));
@@ -6623,8 +6804,8 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ ldr(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
- // Divide length by two (length is not a smi).
- __ mov(mask, Operand(mask, ASR, 1));
+ // Divide length by two (length is a smi).
+ __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
__ sub(mask, mask, Operand(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@@ -8515,9 +8696,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ cmp(r1, Operand(0));
__ b(eq, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ ldr(r2, MemOperand(sp, 1 * kPointerSize));
- __ mov(r1, Operand(r1, LSR, kSmiTagSize));
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -8526,6 +8706,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ LoadRoot(r3, Heap::kFixedArrayMapRootIndex);
__ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset));
__ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ mov(r1, Operand(r1, LSR, kSmiTagSize)); // Untag the length for the loop.
// Copy the fixed array slots.
Label loop;
@@ -8676,7 +8857,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(r0,
FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
__ add(r2, r2, Operand(RegExpImpl::kLastMatchOverhead));
- __ cmp(r2, r0);
+ __ cmp(r2, Operand(r0, ASR, kSmiTagSize));
__ b(gt, &runtime);
// subject: Subject string
@@ -9009,142 +9190,195 @@ int CompareStub::MinorKey() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ BranchOnSmi(object, receiver_not_string);
+ __ BranchOnSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ tst(result, Operand(kIsNotStringMask));
- __ b(ne, receiver_not_string);
+ __ tst(result_, Operand(kIsNotStringMask));
+ __ b(ne, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ BranchOnNotSmi(index, index_not_smi);
+ __ BranchOnNotSmi(index_, &index_not_smi_);
+
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
// Check for index out of range.
- __ ldr(scratch, FieldMemOperand(object, String::kLengthOffset));
- // Now scratch has the length of the string. Compare with the index.
- __ cmp(scratch, Operand(index));
- __ b(ls, index_out_of_range);
-
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
+ __ cmp(ip, Operand(scratch_));
+ __ b(ls, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT_EQ(0, kSeqStringTag);
- __ tst(result, Operand(kStringRepresentationMask));
- __ b(ne, &not_a_flat_string);
-
- // Check for 1-byte or 2-byte string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ tst(result, Operand(kStringEncodingMask));
- __ b(ne, &ascii_string);
-
- // 2-byte string. We can add without shifting since the Smi tag size is the
- // log2 of the number of bytes in a two-byte character.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiShiftSize);
- __ add(scratch, object, Operand(index));
- __ ldrh(result, FieldMemOperand(scratch, SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
+ ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(eq, &flat_string);
// Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, result, Operand(kStringRepresentationMask));
- __ cmp(result, Operand(kConsStringTag));
- __ b(ne, slow_case);
+ __ tst(result_, Operand(kIsConsStringMask));
+ __ b(eq, &call_runtime_);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
- __ ldr(result, FieldMemOperand(object, ConsString::kSecondOffset));
- __ LoadRoot(scratch, Heap::kEmptyStringRootIndex);
- __ cmp(result, Operand(scratch));
- __ b(ne, slow_case);
-
+ __ ldr(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
+ __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(ne, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ ldr(object, FieldMemOperand(object, ConsString::kFirstOffset));
- __ ldr(result, FieldMemOperand(object, HeapObject::kMapOffset));
- __ ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
+ __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
+ __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ tst(result_, Operand(kStringRepresentationMask));
+ __ b(nz, &call_runtime_);
+
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ tst(result_, Operand(kStringEncodingMask));
+ __ b(nz, &ascii_string);
+
+ // 2-byte string.
+ // Load the 2-byte character code into the result register. We can
+ // add without shifting since the smi tag size is the log2 of the
+ // number of bytes in a two-byte character.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+ __ add(scratch_, object_, Operand(scratch_));
+ __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
// ASCII string.
+ // Load the byte into the result register.
__ bind(&ascii_string);
- __ add(scratch, object, Operand(index, LSR, kSmiTagSize));
- __ ldrb(result, FieldMemOperand(scratch, SeqAsciiString::kHeaderSize));
+ __ add(scratch_, object_, Operand(scratch_, LSR, kSmiTagSize));
+ __ ldrb(result_, FieldMemOperand(scratch_, SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ mov(result, Operand(result, LSL, kSmiTagSize));
+ __ mov(result_, Operand(result_, LSL, kSmiTagSize));
+ __ bind(&exit_);
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register scratch,
- Register result,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
- Label slow_case;
- Label exit;
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, scratch_,
+ Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_, result_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(r0)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, r0);
+ }
+ __ pop(result_);
+ __ pop(index_);
+ __ pop(object_);
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ BranchOnNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ Push(object_, index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(r0)) {
+ __ mov(result_, r0);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(code, Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case);
+ __ tst(code_,
+ Operand(kSmiTagMask |
+ ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
+ __ b(nz, &slow_case_);
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ // At this point code register contains smi tagged ascii char code.
ASSERT(kSmiTag == 0);
- __ mov(result, Operand(Factory::single_character_string_cache()));
- __ add(result, result, Operand(code, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(result, MemOperand(result, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ cmp(result, scratch);
- __ b(eq, &slow_case);
- __ b(&exit);
+ __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
+ __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+ __ cmp(result_, Operand(ip));
+ __ b(eq, &slow_case_);
+ __ bind(&exit_);
+}
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(r0)) {
- __ mov(result, r0);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(r0));
- __ push(code);
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(r0));
- __ Ret();
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(r0)) {
+ __ mov(result_, r0);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 361ea131de..d9df82cd15 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -101,6 +101,11 @@ class Reference BASE_EMBEDDED {
// is popped from beneath it (unloaded).
void SetValue(InitState init_state);
+ // This is in preparation for something that uses the reference on the stack.
+ // If we need this reference afterwards get then dup it now. Otherwise mark
+ // it as used.
+ inline void DupIfPersist();
+
private:
CodeGenerator* cgen_;
Expression* expression_;
@@ -252,16 +257,6 @@ class CodeGenerator: public AstVisitor {
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
- // Visit a statement and then spill the virtual frame if control flow can
- // reach the end of the statement (ie, it does not exit via break,
- // continue, return, or throw). This function is used temporarily while
- // the code generator is being transformed.
- inline void VisitAndSpill(Statement* statement);
-
- // Visit a list of statements and then spill the virtual frame if control
- // flow can reach the end of the list.
- inline void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
-
// Main code generation function
void Generate(CompilationInfo* info);
@@ -299,19 +294,6 @@ class CodeGenerator: public AstVisitor {
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
- // Generate code to push the value of an expression on top of the frame
- // and then spill the frame fully to memory. This function is used
- // temporarily while the code generator is being transformed.
- inline void LoadAndSpill(Expression* expression);
-
- // Call LoadCondition and then spill the virtual frame unless control flow
- // cannot reach the end of the expression (ie, by emitting only
- // unconditional jumps to the control targets).
- inline void LoadConditionAndSpill(Expression* expression,
- JumpTarget* true_target,
- JumpTarget* false_target,
- bool force_control);
-
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
@@ -445,10 +427,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -693,38 +678,6 @@ class GenericBinaryOpStub : public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be r0 register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register scratch,
- Register result,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersLong adds too much
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 57c5c1c00c..e36f595c3d 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -66,10 +66,15 @@
# define CAN_USE_THUMB_INSTRUCTIONS 1
#endif
-// Simulator should support ARM5 instructions.
+// Simulator should support ARM5 instructions and unaligned access by default.
#if !defined(__arm__)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
+
+# ifndef CAN_USE_UNALIGNED_ACCESSES
+# define CAN_USE_UNALIGNED_ACCESSES 1
+# endif
+
#endif
#if CAN_USE_UNALIGNED_ACCESSES
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index 0ac7d19f66..1c05bc3a4a 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -401,6 +401,20 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
PrintCondition(instr);
return 4;
}
+ case 'f': { // 'f: bitfield instructions - v7 and above.
+ uint32_t lsbit = instr->Bits(11, 7);
+ uint32_t width = instr->Bits(20, 16) + 1;
+ if (instr->Bit(21) == 0) {
+ // BFC/BFI:
+ // Bits 20-16 represent most-significant bit. Covert to width.
+ width -= lsbit;
+ ASSERT(width > 0);
+ }
+ ASSERT((width + lsbit) <= 32);
+ out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "#%d, #%d", lsbit, width);
+ return 1;
+ }
case 'h': { // 'h: halfword operation for extra loads and stores
if (instr->HasH()) {
Print("h");
@@ -446,16 +460,6 @@ int Decoder::FormatOption(Instr* instr, const char* format) {
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", instr->Offset12Field());
return 5;
- } else if ((format[3] == '1') && (format[4] == '6')) {
- ASSERT(STRING_STARTS_WITH(format, "off16to20"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->Bits(20, 16) +1);
- return 9;
- } else if (format[3] == '7') {
- ASSERT(STRING_STARTS_WITH(format, "off7to11"));
- out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
- "%d", instr->ShiftAmountField());
- return 8;
} else if (format[3] == '0') {
// 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
@@ -882,10 +886,26 @@ void Decoder::DecodeType3(Instr* instr) {
case 3: {
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
- Format(instr, "ubfx'cond 'rd, 'rm, #'off7to11, #'off16to20");
+ if (instr->Bit(22)) {
+ Format(instr, "ubfx'cond 'rd, 'rm, 'f");
+ } else {
+ Format(instr, "sbfx'cond 'rd, 'rm, 'f");
+ }
+ } else {
+ UNREACHABLE();
+ }
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ if (instr->RmField() == 15) {
+ Format(instr, "bfc'cond 'rd, 'f");
+ } else {
+ Format(instr, "bfi'cond 'rd, 'rm, 'f");
+ }
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index fecc2137d3..5fa412ffc9 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -917,7 +917,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Setup the four remaining stack slots.
__ push(r0); // Map.
__ ldr(r1, FieldMemOperand(r2, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
// Push enumeration cache, enumeration cache length (as smi) and zero.
__ Push(r2, r1, r0);
@@ -928,7 +927,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ mov(r1, Operand(Smi::FromInt(0))); // Map (0) - force slow check.
__ Push(r1, r0);
__ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r1, Operand(r1, LSL, kSmiTagSize));
__ mov(r0, Operand(Smi::FromInt(0)));
__ Push(r1, r0); // Fixed array length (as smi) and initial index.
@@ -1829,76 +1827,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2349,49 +2277,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ tst(r0, Operand(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ b(nz, &slow_case);
- __ mov(r1, Operand(Factory::single_character_string_cache()));
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ ldr(r1, MemOperand(r1, FixedArray::kHeaderSize - kHeapObjectTag));
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r1, r2);
- __ b(eq, &slow_case);
- __ mov(r0, r1);
- __ b(&done);
+ Label done;
+ StringCharFromCodeGenerator generator(r0, r1);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
- __ bind(&slow_case);
- __ push(r0);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, r0);
+ Apply(context_, r1);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- Apply(context_, r0);
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = r1;
+ Register index = r0;
+ Register scratch = r2;
+ Register result = r3;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Load the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = r1;
+ Register index = r0;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ mov(result, Operand(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index ba318fd2ec..80c8765f92 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -163,11 +163,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
+ // Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// t0 - holds the untagged key on entry and holds the hash once computed.
- // Holds the result on exit if the load succeeded.
//
// t1 - used to hold the capacity mask of the dictionary
//
@@ -235,7 +235,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index and return.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ ldr(t0, FieldMemOperand(t2, kValueOffset));
+ __ ldr(key, FieldMemOperand(t2, kValueOffset));
}
@@ -579,7 +579,13 @@ static inline bool IsInlinedICSite(Address address,
}
Address address_after_nop = address_after_call + Assembler::kInstrSize;
Instr instr_after_nop = Assembler::instr_at(address_after_nop);
- ASSERT(Assembler::IsBranch(instr_after_nop));
+ // There may be some reg-reg move and frame merging code to skip over before
+ // the branch back from the DeferredReferenceGetKeyedValue code to the inlined
+ // code.
+ while (!Assembler::IsBranch(instr_after_nop)) {
+ address_after_nop += Assembler::kInstrSize;
+ instr_after_nop = Assembler::instr_at(address_after_nop);
+ }
// Find the end of the inlined code for handling the load.
int b_offset =
@@ -743,9 +749,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ BranchOnNotSmi(key, &slow);
- // Untag key into r2..
- __ mov(r2, Operand(key, ASR, kSmiTagSize));
-
// Get the elements array of the object.
__ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
@@ -754,12 +757,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_pixel_array);
// Check that the key (index) is within bounds.
- __ ldr(r3, FieldMemOperand(r4, Array::kLengthOffset));
- __ cmp(r2, r3);
+ __ ldr(r3, FieldMemOperand(r4, FixedArray::kLengthOffset));
+ __ cmp(key, Operand(r3));
__ b(hs, &slow);
// Fast case: Do the load.
__ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r2, MemOperand(r3, r2, LSL, kPointerSizeLog2));
+ // The key is a smi.
+ ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+ __ ldr(r2, MemOperand(r3, key, LSL, kPointerSizeLog2 - kSmiTagSize));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(r2, ip);
// In case the loaded value is the_hole we have to consult GetProperty
@@ -770,7 +775,6 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
// r0: key
- // r2: untagged index
// r3: elements map
// r4: elements
__ bind(&check_pixel_array);
@@ -778,6 +782,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ cmp(r3, ip);
__ b(ne, &check_number_dictionary);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kLengthOffset));
+ __ mov(r2, Operand(key, ASR, kSmiTagSize));
__ cmp(r2, ip);
__ b(hs, &slow);
__ ldr(ip, FieldMemOperand(r4, PixelArray::kExternalPointerOffset));
@@ -788,14 +793,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
// r0: key
- // r2: untagged index
// r3: elements map
// r4: elements
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
__ cmp(r3, ip);
__ b(ne, &slow);
+ __ mov(r2, Operand(r0, ASR, kSmiTagSize));
GenerateNumberDictionaryLoad(masm, &slow, r4, r0, r2, r3, r5);
- __ mov(r0, r2);
__ Ret();
// Slow case, key and receiver still in r0 and r1.
@@ -808,70 +812,39 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ---------- S t a t e --------------
// -- lr : return address
- // -- r0 : key
+ // -- r0 : key (index)
// -- r1 : receiver
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
- Register object = r1;
+ Register receiver = r1;
Register index = r0;
- Register code = r2;
- Register scratch = r3;
+ Register scratch1 = r2;
+ Register scratch2 = r3;
+ Register result = r0;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ Ret();
- StringHelper::GenerateFastCharCodeAt(masm,
- object,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
-
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, scratch, r0, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CheckMap(index, scratch, Factory::heap_number_map(), &miss, true);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ Push(object, index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(r0));
- __ mov(code, r0);
- __ LeaveInternalFrame();
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- __ BranchOnSmi(code, &got_char_code);
- __ ldr(scratch, FieldMemOperand(code, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, ip);
- __ Assert(eq, "StringCharCodeAt must return smi or heap number");
- }
- __ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ cmp(code, scratch);
- __ b(ne, &got_char_code);
__ bind(&index_out_of_range);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
__ Ret();
__ bind(&miss);
- GenerateGeneric(masm);
+ GenerateMiss(masm);
}
@@ -1283,11 +1256,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
__ cmp(r4, ip);
__ b(ne, &check_pixel_array);
- // Untag the key (for checking against untagged length in the fixed array).
- __ mov(r4, Operand(key, ASR, kSmiTagSize));
- // Compute address to store into and check array bounds.
+ // Check array bounds. Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(r4, Operand(ip));
+ __ cmp(key, Operand(ip));
__ b(lo, &fast);
// Slow case, handle jump to runtime.
@@ -1333,9 +1304,9 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Condition code from comparing key and array length is still available.
__ b(ne, &slow); // Only support writing to writing to array[array.length].
// Check for room in the elements backing store.
- __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag key.
+ // Both the key and the length of FixedArray are smis.
__ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
- __ cmp(r4, Operand(ip));
+ __ cmp(key, Operand(ip));
__ b(hs, &slow);
// Calculate key + 1 as smi.
ASSERT_EQ(0, kSmiTag);
diff --git a/deps/v8/src/arm/jump-target-arm.cc b/deps/v8/src/arm/jump-target-arm.cc
index 3c43d168d8..f369acdbd3 100644
--- a/deps/v8/src/arm/jump-target-arm.cc
+++ b/deps/v8/src/arm/jump-target-arm.cc
@@ -69,18 +69,15 @@ void JumpTarget::DoBranch(Condition cc, Hint ignored) {
if (entry_frame_set_) {
// Backward branch. We have an expected frame to merge to on the
// backward edge.
- if (cc == al) {
- cgen()->frame()->MergeTo(&entry_frame_);
- } else {
- // We can't do conditional merges yet so you have to ensure that all
- // conditional branches to the JumpTarget have the same virtual frame.
- ASSERT(cgen()->frame()->Equals(&entry_frame_));
- }
+ cgen()->frame()->MergeTo(&entry_frame_, cc);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
__ b(cc, &entry_label_);
+ if (cc == al) {
+ cgen()->DeleteFrame();
+ }
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 29c48a400f..2a4f354d9f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -183,15 +183,18 @@ void MacroAssembler::Drop(int count, Condition cond) {
}
-void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+void MacroAssembler::Swap(Register reg1,
+ Register reg2,
+ Register scratch,
+ Condition cond) {
if (scratch.is(no_reg)) {
- eor(reg1, reg1, Operand(reg2));
- eor(reg2, reg2, Operand(reg1));
- eor(reg1, reg1, Operand(reg2));
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
+ eor(reg2, reg2, Operand(reg1), LeaveCC, cond);
+ eor(reg1, reg1, Operand(reg2), LeaveCC, cond);
} else {
- mov(scratch, reg1);
- mov(reg1, reg2);
- mov(reg2, scratch);
+ mov(scratch, reg1, LeaveCC, cond);
+ mov(reg1, reg2, LeaveCC, cond);
+ mov(reg2, scratch, LeaveCC, cond);
}
}
@@ -252,63 +255,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- // This is how much we shift the remembered set bit offset to get the
- // offset of the word in the remembered set. We divide by kBitsPerInt (32,
- // shift right 5) and then multiply by kIntSize (4, shift left 2).
- const int kRSetWordShift = 3;
-
- Label fast;
+ mov(ip, Operand(Page::kPageAlignmentMask)); // Load mask only once.
- // Compute the bit offset in the remembered set.
- // object: heap object pointer (with tag)
- // offset: offset to store location from the object
- mov(ip, Operand(Page::kPageAlignmentMask)); // load mask only once
- and_(scratch, object, Operand(ip)); // offset into page of the object
- add(offset, scratch, Operand(offset)); // add offset into the object
- mov(offset, Operand(offset, LSR, kObjectAlignmentBits));
+ // Calculate region number.
+ add(offset, object, Operand(offset)); // Add offset into the object.
+ and_(offset, offset, Operand(ip)); // Offset into page of the object.
+ mov(offset, Operand(offset, LSR, Page::kRegionSizeLog2));
- // Compute the page address from the heap object pointer.
- // object: heap object pointer (with tag)
- // offset: bit offset of store position in the remembered set
+ // Calculate page address.
bic(object, object, Operand(ip));
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- // object: page start
- // offset: bit offset of store position in the remembered set
- cmp(offset, Operand(Page::kPageSize / kPointerSize));
- b(lt, &fast);
-
- // Adjust the bit offset to be relative to the start of the extra
- // remembered set and the start address to be the address of the extra
- // remembered set.
- sub(offset, offset, Operand(Page::kPageSize / kPointerSize));
- // Load the array length into 'scratch' and multiply by four to get the
- // size in bytes of the elements.
- ldr(scratch, MemOperand(object, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- mov(scratch, Operand(scratch, LSL, kObjectAlignmentBits));
- // Add the page header (including remembered set), array header, and array
- // body size to the page address.
- add(object, object, Operand(Page::kObjectStartOffset
- + FixedArray::kHeaderSize));
- add(object, object, Operand(scratch));
-
- bind(&fast);
- // Get address of the rset word.
- // object: start of the remembered set (page start for the fast case)
- // offset: bit offset of store position in the remembered set
- bic(scratch, offset, Operand(kBitsPerInt - 1)); // clear the bit offset
- add(object, object, Operand(scratch, LSR, kRSetWordShift));
- // Get bit offset in the rset word.
- // object: address of remembered set word
- // offset: bit offset of store position
- and_(offset, offset, Operand(kBitsPerInt - 1));
-
- ldr(scratch, MemOperand(object));
+ // Mark region dirty.
+ ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
mov(ip, Operand(1));
orr(scratch, scratch, Operand(ip, LSL, offset));
- str(scratch, MemOperand(object));
+ str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
}
@@ -336,7 +297,7 @@ void MacroAssembler::RecordWrite(Register object, Register offset,
Label done;
// First, test that the object is not in the new space. We cannot set
- // remembered set bits in the new space.
+ // region marks for new space pages.
InNewSpace(object, scratch, eq, &done);
// Record the actual write.
@@ -664,6 +625,7 @@ void MacroAssembler::InvokeFunction(Register fun,
ldr(expected_reg,
FieldMemOperand(code_reg,
SharedFunctionInfo::kFormalParameterCountOffset));
+ mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
ldr(code_reg,
MemOperand(code_reg, SharedFunctionInfo::kCodeOffset - kHeapObjectTag));
add(code_reg, code_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -1328,7 +1290,7 @@ void MacroAssembler::GetLeastBitsFromSmi(Register dst,
Register src,
int num_least_bits) {
if (CpuFeatures::IsSupported(ARMv7)) {
- ubfx(dst, src, Operand(kSmiTagSize), Operand(num_least_bits - 1));
+ ubfx(dst, src, kSmiTagSize, num_least_bits);
} else {
mov(dst, Operand(src, ASR, kSmiTagSize));
and_(dst, dst, Operand((1 << num_least_bits) - 1));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 494f2b6926..4bfabb30cf 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -88,7 +88,10 @@ class MacroAssembler: public Assembler {
// Swap two registers. If the scratch register is omitted then a slightly
// less efficient form using xor instead of mov is emitted.
- void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+ void Swap(Register reg1,
+ Register reg2,
+ Register scratch = no_reg,
+ Condition cond = al);
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
@@ -114,16 +117,14 @@ class MacroAssembler: public Assembler {
Label* branch);
- // Set the remebered set bit for an offset into an
- // object. RecordWriteHelper only works if the object is not in new
- // space.
- void RecordWriteHelper(Register object, Register offset, Register scracth);
+ // For the page containing |object| mark the region covering [object+offset]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ void RecordWriteHelper(Register object, Register offset, Register scratch);
- // Sets the remembered set bit for [address+offset], where address is the
- // address of the heap object 'object'. The address must be in the first 8K
- // of an allocated page. The 'scratch' register is used in the
- // implementation and all 3 registers are clobbered by the operation, as
- // well as the ip register.
+ // For the page containing |object| mark the region covering [object+offset]
+ // dirty. The object address must be in the first 8K of an allocated page.
+ // The 'scratch' register is used in the implementation and all 3 registers
+ // are clobbered by the operation, as well as the ip register.
void RecordWrite(Register object, Register offset, Register scratch);
// Push two registers. Pushes leftmost register first (to highest address).
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index e72a8796dc..3bdca38eba 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -2031,7 +2031,6 @@ void Simulator::DecodeType2(Instr* instr) {
void Simulator::DecodeType3(Instr* instr) {
- ASSERT(instr->Bits(6, 4) == 0x5 || instr->Bit(4) == 0);
int rd = instr->RdField();
int rn = instr->RnField();
int32_t rn_val = get_register(rn);
@@ -2058,17 +2057,47 @@ void Simulator::DecodeType3(Instr* instr) {
break;
}
case 3: {
- // UBFX.
if (instr->HasW() && (instr->Bits(6, 4) == 0x5)) {
uint32_t widthminus1 = static_cast<uint32_t>(instr->Bits(20, 16));
- uint32_t lsbit = static_cast<uint32_t>(instr->ShiftAmountField());
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
uint32_t msbit = widthminus1 + lsbit;
if (msbit <= 31) {
- uint32_t rm_val =
- static_cast<uint32_t>(get_register(instr->RmField()));
- uint32_t extr_val = rm_val << (31 - msbit);
- extr_val = extr_val >> (31 - widthminus1);
- set_register(instr->RdField(), extr_val);
+ if (instr->Bit(22)) {
+ // ubfx - unsigned bitfield extract.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->RmField()));
+ uint32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->RdField(), extr_val);
+ } else {
+ // sbfx - signed bitfield extract.
+ int32_t rm_val = get_register(instr->RmField());
+ int32_t extr_val = rm_val << (31 - msbit);
+ extr_val = extr_val >> (31 - widthminus1);
+ set_register(instr->RdField(), extr_val);
+ }
+ } else {
+ UNREACHABLE();
+ }
+ return;
+ } else if (!instr->HasW() && (instr->Bits(6, 4) == 0x1)) {
+ uint32_t lsbit = static_cast<uint32_t>(instr->Bits(11, 7));
+ uint32_t msbit = static_cast<uint32_t>(instr->Bits(20, 16));
+ if (msbit >= lsbit) {
+ // bfc or bfi - bitfield clear/insert.
+ uint32_t rd_val =
+ static_cast<uint32_t>(get_register(instr->RdField()));
+ uint32_t bitcount = msbit - lsbit + 1;
+ uint32_t mask = (1 << bitcount) - 1;
+ rd_val &= ~(mask << lsbit);
+ if (instr->RmField() != 15) {
+ // bfi - bitfield insert.
+ uint32_t rm_val =
+ static_cast<uint32_t>(get_register(instr->RmField()));
+ rm_val &= mask;
+ rd_val |= rm_val << lsbit;
+ }
+ set_register(instr->RdField(), rd_val);
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index d82ef21ce0..f1a52e6eaa 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -426,191 +426,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- __ push(receiver);
- __ Push(holder, name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
- __ cmp(r0, scratch1);
- __ b(eq, &interceptor_failed);
- __ LeaveInternalFrame();
- __ Ret();
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- __ pop(receiver);
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder,
- holder,
- lookup->holder(),
- scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm,
- r0,
- holder,
- lookup->holder(),
- lookup->GetFieldIndex());
- __ Ret();
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call: push receiver to stack.
- Label cleanup;
- __ push(receiver);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters.
- __ push(holder);
- __ Move(holder, Handle<AccessorInfo>(callback));
- __ push(holder);
- __ ldr(scratch1, FieldMemOperand(holder, AccessorInfo::kDataOffset));
- __ Push(scratch1, name_);
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver and need to remove it.
- __ bind(&cleanup);
- __ pop(scratch2);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
-static void CompileLoadInterceptor(LoadInterceptorCompiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ BranchOnSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -770,9 +585,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -785,9 +600,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(), scratch1,
- scratch2, name, depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -1015,7 +838,7 @@ bool StubCompiler::GenerateLoadCallback(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -1023,18 +846,133 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ BranchOnSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ Push(receiver, holder_reg, name_reg);
+ } else {
+ __ Push(holder_reg, name_reg);
+ }
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ cmp(r0, scratch1);
+ __ b(eq, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ Ret();
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), r0, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ Ret();
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ Move(scratch2, Handle<AccessorInfo>(callback));
+ // holder_reg is either receiver or scratch1.
+ if (!receiver.is(holder_reg)) {
+ ASSERT(scratch1.is(holder_reg));
+ __ Push(receiver, holder_reg, scratch2);
+ __ ldr(scratch1,
+ FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ Push(scratch1, name_reg);
+ } else {
+ __ push(receiver);
+ __ ldr(scratch1,
+ FieldMemOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ Push(holder_reg, scratch2, scratch1, name_reg);
+ }
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
@@ -1204,6 +1142,26 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 3acd2df478..068ebef204 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -40,10 +40,8 @@ namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::PopToR1R0() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is in r0 and r1.
- where_to_go.top_of_stack_state_ = R0_R1_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
element_count_ -= 2;
top_of_stack_state_ = NO_TOS_REGISTERS;
@@ -51,10 +49,8 @@ void VirtualFrame::PopToR1R0() {
void VirtualFrame::PopToR1() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack is only in r1.
- where_to_go.top_of_stack_state_ = R1_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
@@ -62,100 +58,98 @@ void VirtualFrame::PopToR1() {
void VirtualFrame::PopToR0() {
- VirtualFrame where_to_go = *this;
// Shuffle things around so the top of stack only in r0.
- where_to_go.top_of_stack_state_ = R0_TOS;
- MergeTo(&where_to_go);
+ MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
element_count_ -= 1;
top_of_stack_state_ = NO_TOS_REGISTERS;
}
-void VirtualFrame::MergeTo(VirtualFrame* expected) {
+void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
- MergeTOSTo(expected->top_of_stack_state_);
+ MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
- VirtualFrame::TopOfStack expected_top_of_stack_state) {
+ VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
- __ pop(r0);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
- __ pop(r1);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
- __ pop(r0);
- __ pop(r1);
+ __ pop(r0, cond);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
- __ pop(r1);
- __ pop(r0);
+ __ pop(r1, cond);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
- __ push(r0);
+ __ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
- __ mov(r1, r0);
+ __ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
- __ pop(r1);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
- __ mov(r1, r0);
- __ pop(r0);
+ __ mov(r1, r0, LeaveCC, cond);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
- __ push(r1);
+ __ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
- __ mov(r0, r1);
+ __ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
- __ mov(r0, r1);
- __ pop(r1);
+ __ mov(r0, r1, LeaveCC, cond);
+ __ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
- __ pop(r0);
+ __ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
- __ Push(r1, r0);
+ __ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
- __ push(r1);
+ __ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
- __ push(r1);
- __ mov(r1, r0);
+ __ push(r1, cond);
+ __ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
- __ Swap(r0, r1, ip);
+ __ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
- __ Push(r0, r1);
+ __ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
- __ push(r0);
- __ mov(r0, r1);
+ __ push(r0, cond);
+ __ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
- __ push(r0);
+ __ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
- __ Swap(r0, r1, ip);
+ __ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
@@ -163,7 +157,16 @@ void VirtualFrame::MergeTOSTo(
UNREACHABLE();
#undef CASE_NUMBER
}
- top_of_stack_state_ = expected_top_of_stack_state;
+ // A conditional merge will be followed by a conditional branch and the
+ // fall-through code will have an unchanged virtual frame state. If the
+ // merge is unconditional ('al'ways) then it might be followed by a fall
+ // through. We need to update the virtual frame state to match the code we
+ // are falling into. The final case is an unconditional merge followed by an
+ // unconditional branch, in which case it doesn't matter what we do to the
+ // virtual frame state, because the virtual frame will be invalidated.
+ if (cond == al) {
+ top_of_stack_state_ = expected_top_of_stack_state;
+ }
}
@@ -264,7 +267,8 @@ void VirtualFrame::PushTryHandler(HandlerType type) {
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
- EmitPop(r1);
+ PopToR1();
+ SpillAll();
// +1 for receiver.
Forget(arg_count + 1);
@@ -277,7 +281,7 @@ void VirtualFrame::CallJSFunction(int arg_count) {
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
- ASSERT(SpilledScope::is_spilled());
+ SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
@@ -285,6 +289,7 @@ void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
+ SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
@@ -622,7 +627,17 @@ void VirtualFrame::EnsureOneFreeTOSRegister() {
void VirtualFrame::EmitPush(Register reg) {
element_count_++;
+ if (reg.is(cp)) {
+ // If we are pushing cp then we are about to make a call and things have to
+ // be pushed to the physical stack. There's nothing to be gained my moving
+ // to a TOS register and then pushing that, we might as well push to the
+ // physical stack immediately.
+ MergeTOSTo(NO_TOS_REGISTERS);
+ __ push(reg);
+ return;
+ }
if (SpilledScope::is_spilled()) {
+ ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
__ push(reg);
return;
}
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index 9471d61e1b..2214c960e5 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -107,14 +107,14 @@ class VirtualFrame : public ZoneObject {
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
- inline CodeGenerator* cgen();
+ inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
- int element_count() { return element_count_; }
+ int element_count() const { return element_count_; }
// The height of the virtual expression stack.
- inline int height();
+ inline int height() const;
bool is_used(int num) {
switch (num) {
@@ -162,7 +162,7 @@ class VirtualFrame : public ZoneObject {
// Spill all values from the frame to memory.
void SpillAll();
- void AssertIsSpilled() {
+ void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
@@ -184,7 +184,7 @@ class VirtualFrame : public ZoneObject {
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
- void MergeTo(VirtualFrame* expected);
+ void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
@@ -426,13 +426,13 @@ class VirtualFrame : public ZoneObject {
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
- inline int parameter_count();
- inline int local_count();
+ inline int parameter_count() const;
+ inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
- inline int frame_pointer();
+ inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
@@ -448,10 +448,10 @@ class VirtualFrame : public ZoneObject {
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
- inline int local0_index();
+ inline int local0_index() const;
// The index of the base of the expression stack.
- inline int expression_base_index();
+ inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
@@ -469,9 +469,9 @@ class VirtualFrame : public ZoneObject {
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
- void MergeTOSTo(TopOfStack expected_state);
+ void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
- inline bool Equals(VirtualFrame* other);
+ inline bool Equals(const VirtualFrame* other);
friend class JumpTarget;
};
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 9a0fbd2704..0677809d91 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -305,7 +305,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
// In old space we do not use this trick to avoid dealing with
- // remembered sets.
+ // region dirty marks.
ASSERT(Heap::new_space()->Contains(elms));
STATIC_ASSERT(FixedArray::kMapOffset == 0);
@@ -322,7 +322,7 @@ static FixedArray* LeftTrimFixedArray(FixedArray* elms, int to_trim) {
Heap::CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
former_start[to_trim] = Heap::fixed_array_map();
- former_start[to_trim + 1] = reinterpret_cast<Object*>(len - to_trim);
+ former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
ASSERT_EQ(elms->address() + to_trim * kPointerSize,
(elms + to_trim * kPointerSize)->address());
@@ -500,7 +500,7 @@ BUILTIN(ArrayShift) {
if (Heap::new_space()->Contains(elms)) {
// As elms still in the same space they used to be (new space),
- // there is no need to update remembered set.
+ // there is no need to update region dirty mark.
array->set_elements(LeftTrimFixedArray(elms, 1), SKIP_WRITE_BARRIER);
} else {
// Shift the elements.
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 358c6fccd3..0576fbb903 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -110,8 +110,9 @@ namespace internal {
F(ClassOf, 1, 1) \
F(ValueOf, 1, 1) \
F(SetValueOf, 2, 1) \
- F(FastCharCodeAt, 2, 1) \
- F(CharFromCode, 1, 1) \
+ F(StringCharCodeAt, 2, 1) \
+ F(StringCharFromCode, 1, 1) \
+ F(StringCharAt, 2, 1) \
F(ObjectEquals, 2, 1) \
F(Log, 3, 1) \
F(RandomHeapNumber, 0, 1) \
@@ -179,6 +180,111 @@ class CodeGeneratorScope BASE_EMBEDDED {
};
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
+
+// State of used registers in a virtual frame.
+class FrameRegisterState {
+ public:
+ // Captures the current state of the given frame.
+ explicit FrameRegisterState(VirtualFrame* frame);
+
+ // Saves the state in the stack.
+ void Save(MacroAssembler* masm) const;
+
+ // Restores the state from the stack.
+ void Restore(MacroAssembler* masm) const;
+
+ private:
+ // Constants indicating special actions. They should not be multiples
+ // of kPointerSize so they will not collide with valid offsets from
+ // the frame pointer.
+ static const int kIgnore = -1;
+ static const int kPush = 1;
+
+ // This flag is ored with a valid offset from the frame pointer, so
+ // it should fit in the low zero bits of a valid offset.
+ static const int kSyncedFlag = 2;
+
+ int registers_[RegisterAllocator::kNumRegisters];
+};
+
+#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+
+
+class FrameRegisterState {
+ public:
+ inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
+
+ inline const VirtualFrame* frame() const { return &frame_; }
+
+ private:
+ VirtualFrame frame_;
+};
+
+#else
+
+#error Unsupported target architecture.
+
+#endif
+
+
+// Helper interface to prepare to/restore after making runtime calls.
+class RuntimeCallHelper {
+ public:
+ virtual ~RuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const = 0;
+
+ virtual void AfterCall(MacroAssembler* masm) const = 0;
+
+ protected:
+ RuntimeCallHelper() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(RuntimeCallHelper);
+};
+
+
+// RuntimeCallHelper implementation that saves/restores state of a
+// virtual frame.
+class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ // Does not take ownership of |frame_state|.
+ explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
+ : frame_state_(frame_state) {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+
+ private:
+ const FrameRegisterState* frame_state_;
+};
+
+
+// RuntimeCallHelper implementation used in IC stubs: enters/leaves a
+// newly created internal frame before/after the runtime call.
+class ICRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ ICRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const;
+
+ virtual void AfterCall(MacroAssembler* masm) const;
+};
+
+
+// Trivial RuntimeCallHelper implementation.
+class NopRuntimeCallHelper : public RuntimeCallHelper {
+ public:
+ NopRuntimeCallHelper() {}
+
+ virtual void BeforeCall(MacroAssembler* masm) const {}
+
+ virtual void AfterCall(MacroAssembler* masm) const {}
+};
+
+
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
@@ -209,6 +315,8 @@ class DeferredCode: public ZoneObject {
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
+ const FrameRegisterState* frame_state() const { return &frame_state_; }
+
void SaveRegisters();
void RestoreRegisters();
@@ -216,28 +324,13 @@ class DeferredCode: public ZoneObject {
MacroAssembler* masm_;
private:
- // Constants indicating special actions. They should not be multiples
- // of kPointerSize so they will not collide with valid offsets from
- // the frame pointer.
- static const int kIgnore = -1;
- static const int kPush = 1;
-
- // This flag is ored with a valid offset from the frame pointer, so
- // it should fit in the low zero bits of a valid offset.
- static const int kSyncedFlag = 2;
-
int statement_position_;
int position_;
Label entry_label_;
Label exit_label_;
- // C++ doesn't allow zero length arrays, so we make the array length 1 even
- // if we don't need it.
- static const int kRegistersArrayLength =
- (RegisterAllocator::kNumRegisters == 0) ?
- 1 : RegisterAllocator::kNumRegisters;
- int registers_[kRegistersArrayLength];
+ FrameRegisterState frame_state_;
#ifdef DEBUG
const char* comment_;
@@ -611,6 +704,163 @@ class ToBooleanStub: public CodeStub {
};
+enum StringIndexFlags {
+ // Accepts smis or heap numbers.
+ STRING_INDEX_IS_NUMBER,
+
+ // Accepts smis or heap numbers that are valid array indices
+ // (ECMA-262 15.4). Invalid indices are reported as being out of
+ // range.
+ STRING_INDEX_IS_ARRAY_INDEX
+};
+
+
+// Generates code implementing String.prototype.charCodeAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch| and |result| are clobbered.
+class StringCharCodeAtGenerator {
+ public:
+ StringCharCodeAtGenerator(Register object,
+ Register index,
+ Register scratch,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : object_(object),
+ index_(index),
+ scratch_(scratch),
+ result_(result),
+ receiver_not_string_(receiver_not_string),
+ index_not_number_(index_not_number),
+ index_out_of_range_(index_out_of_range),
+ index_flags_(index_flags) {
+ ASSERT(!scratch_.is(object_));
+ ASSERT(!scratch_.is(index_));
+ ASSERT(!scratch_.is(result_));
+ ASSERT(!result_.is(object_));
+ ASSERT(!result_.is(index_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register object_;
+ Register index_;
+ Register scratch_;
+ Register result_;
+
+ Label* receiver_not_string_;
+ Label* index_not_number_;
+ Label* index_out_of_range_;
+
+ StringIndexFlags index_flags_;
+
+ Label call_runtime_;
+ Label index_not_smi_;
+ Label got_smi_index_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharCodeAtGenerator);
+};
+
+
+// Generates code for creating a one-char string from a char code.
+class StringCharFromCodeGenerator {
+ public:
+ StringCharFromCodeGenerator(Register code,
+ Register result)
+ : code_(code),
+ result_(result) {
+ ASSERT(!code_.is(result_));
+ }
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ Register code_;
+ Register result_;
+
+ Label slow_case_;
+ Label exit_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharFromCodeGenerator);
+};
+
+
+// Generates code implementing String.prototype.charAt.
+//
+// Only supports the case when the receiver is a string and the index
+// is a number (smi or heap number) that is a valid index into the
+// string. Additional index constraints are specified by the
+// flags. Otherwise, bails out to the provided labels.
+//
+// Register usage: |object| may be changed to another string in a way
+// that doesn't affect charCodeAt/charAt semantics, |index| is
+// preserved, |scratch1|, |scratch2|, and |result| are clobbered.
+class StringCharAtGenerator {
+ public:
+ StringCharAtGenerator(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result,
+ Label* receiver_not_string,
+ Label* index_not_number,
+ Label* index_out_of_range,
+ StringIndexFlags index_flags)
+ : char_code_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ receiver_not_string,
+ index_not_number,
+ index_out_of_range,
+ index_flags),
+ char_from_code_generator_(scratch2, result) {}
+
+ // Generates the fast case code. On the fallthrough path |result|
+ // register contains the result.
+ void GenerateFast(MacroAssembler* masm);
+
+ // Generates the slow case code. Must not be naturally
+ // reachable. Expected to be put after a ret instruction (e.g., in
+ // deferred code). Always jumps back to the fast case.
+ void GenerateSlow(MacroAssembler* masm,
+ const RuntimeCallHelper& call_helper);
+
+ private:
+ StringCharCodeAtGenerator char_code_at_generator_;
+ StringCharFromCodeGenerator char_from_code_generator_;
+
+ DISALLOW_COPY_AND_ASSIGN(StringCharAtGenerator);
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index c086df4c77..0e6dd880c2 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -333,7 +333,6 @@ DEFINE_bool(code_stats, false, "report code statistics after GC")
DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
DEFINE_bool(print_handles, false, "report handles after GC")
DEFINE_bool(print_global_handles, false, "report global handles after GC")
-DEFINE_bool(print_rset, false, "print remembered sets before GC")
// ic.cc
DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 2ccbca87ef..f7a063aed8 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -571,6 +571,78 @@ void FullCodeGenerator::SetSourcePosition(int pos) {
}
+void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
+ Handle<String> name = expr->name();
+ if (strcmp("_IsSmi", *name->ToCString()) == 0) {
+ EmitIsSmi(expr->arguments());
+ } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
+ EmitIsNonNegativeSmi(expr->arguments());
+ } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
+ EmitIsObject(expr->arguments());
+ } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
+ EmitIsUndetectableObject(expr->arguments());
+ } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
+ EmitIsFunction(expr->arguments());
+ } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
+ EmitIsArray(expr->arguments());
+ } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
+ EmitIsRegExp(expr->arguments());
+ } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
+ EmitIsConstructCall(expr->arguments());
+ } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
+ EmitObjectEquals(expr->arguments());
+ } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
+ EmitArguments(expr->arguments());
+ } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
+ EmitArgumentsLength(expr->arguments());
+ } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
+ EmitClassOf(expr->arguments());
+ } else if (strcmp("_Log", *name->ToCString()) == 0) {
+ EmitLog(expr->arguments());
+ } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
+ EmitRandomHeapNumber(expr->arguments());
+ } else if (strcmp("_SubString", *name->ToCString()) == 0) {
+ EmitSubString(expr->arguments());
+ } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
+ EmitRegExpExec(expr->arguments());
+ } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
+ EmitValueOf(expr->arguments());
+ } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
+ EmitSetValueOf(expr->arguments());
+ } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
+ EmitNumberToString(expr->arguments());
+ } else if (strcmp("_StringCharFromCode", *name->ToCString()) == 0) {
+ EmitStringCharFromCode(expr->arguments());
+ } else if (strcmp("_StringCharCodeAt", *name->ToCString()) == 0) {
+ EmitStringCharCodeAt(expr->arguments());
+ } else if (strcmp("_StringCharAt", *name->ToCString()) == 0) {
+ EmitStringCharAt(expr->arguments());
+ } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
+ EmitStringAdd(expr->arguments());
+ } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
+ EmitStringCompare(expr->arguments());
+ } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
+ EmitMathPow(expr->arguments());
+ } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
+ EmitMathSin(expr->arguments());
+ } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
+ EmitMathCos(expr->arguments());
+ } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
+ EmitMathSqrt(expr->arguments());
+ } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
+ EmitCallFunction(expr->arguments());
+ } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
+ EmitRegExpConstructResult(expr->arguments());
+ } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
+ EmitSwapElements(expr->arguments());
+ } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
+ EmitGetFromCache(expr->arguments());
+ } else {
+ UNREACHABLE();
+ }
+}
+
+
void FullCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
Label eval_right, done;
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index c7d0093712..e9a05be622 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -388,8 +388,9 @@ class FullCodeGenerator: public AstVisitor {
void EmitValueOf(ZoneList<Expression*>* arguments);
void EmitSetValueOf(ZoneList<Expression*>* arguments);
void EmitNumberToString(ZoneList<Expression*>* arguments);
- void EmitCharFromCode(ZoneList<Expression*>* arguments);
- void EmitFastCharCodeAt(ZoneList<Expression*>* arguments);
+ void EmitStringCharFromCode(ZoneList<Expression*>* arguments);
+ void EmitStringCharCodeAt(ZoneList<Expression*>* arguments);
+ void EmitStringCharAt(ZoneList<Expression*>* arguments);
void EmitStringCompare(ZoneList<Expression*>* arguments);
void EmitStringAdd(ZoneList<Expression*>* arguments);
void EmitLog(ZoneList<Expression*>* arguments);
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 292d8d8040..24ff8cb457 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -303,7 +303,6 @@ class HeapObject;
class IC;
class InterceptorInfo;
class IterationStatement;
-class Array;
class JSArray;
class JSFunction;
class JSObject;
@@ -544,16 +543,16 @@ enum StateTag {
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
-// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
-#define OBJECT_SIZE_ALIGN(value) \
+// OBJECT_POINTER_ALIGN returns the value aligned as a HeapObject pointer
+#define OBJECT_POINTER_ALIGN(value) \
(((value) + kObjectAlignmentMask) & ~kObjectAlignmentMask)
// POINTER_SIZE_ALIGN returns the value aligned as a pointer.
#define POINTER_SIZE_ALIGN(value) \
(((value) + kPointerAlignmentMask) & ~kPointerAlignmentMask)
-// MAP_SIZE_ALIGN returns the value aligned as a map pointer.
-#define MAP_SIZE_ALIGN(value) \
+// MAP_POINTER_ALIGN returns the value aligned as a map pointer.
+#define MAP_POINTER_ALIGN(value) \
(((value) + kMapAlignmentMask) & ~kMapAlignmentMask)
// The expression OFFSET_OF(type, field) computes the byte-offset
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 82e1a9125d..80157d019d 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -184,7 +184,7 @@ void Heap::RecordWrite(Address address, int offset) {
if (new_space_.Contains(address)) return;
ASSERT(!new_space_.FromSpaceContains(address));
SLOW_ASSERT(Contains(address + offset));
- Page::SetRSet(address, offset);
+ Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
@@ -195,7 +195,7 @@ void Heap::RecordWrites(Address address, int start, int len) {
offset < start + len * kPointerSize;
offset += kPointerSize) {
SLOW_ASSERT(Contains(address + offset));
- Page::SetRSet(address, offset);
+ Page::FromAddress(address)->MarkRegionDirty(address + offset);
}
}
@@ -234,13 +234,40 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
}
-void Heap::CopyBlock(Object** dst, Object** src, int byte_size) {
+void Heap::CopyBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
- CopyWords(dst, src, byte_size / kPointerSize);
+ CopyWords(reinterpret_cast<Object**>(dst),
+ reinterpret_cast<Object**>(src),
+ byte_size / kPointerSize);
}
-void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
+void Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+
+ Page* page = Page::FromAddress(dst);
+ uint32_t marks = page->GetRegionMarks();
+
+ for (int remaining = byte_size / kPointerSize;
+ remaining > 0;
+ remaining--) {
+ Memory::Object_at(dst) = Memory::Object_at(src);
+
+ if (Heap::InNewSpace(Memory::Object_at(dst))) {
+ marks |= page->GetRegionMaskForAddress(dst);
+ }
+
+ dst += kPointerSize;
+ src += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
+}
+
+
+void Heap::MoveBlock(Address dst, Address src, int byte_size) {
ASSERT(IsAligned(byte_size, kPointerSize));
int size_in_words = byte_size / kPointerSize;
@@ -250,10 +277,12 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
((OffsetFrom(reinterpret_cast<Address>(src)) -
OffsetFrom(reinterpret_cast<Address>(dst))) >= kPointerSize));
- Object** end = src + size_in_words;
+ Object** src_slot = reinterpret_cast<Object**>(src);
+ Object** dst_slot = reinterpret_cast<Object**>(dst);
+ Object** end_slot = src_slot + size_in_words;
- while (src != end) {
- *dst++ = *src++;
+ while (src_slot != end_slot) {
+ *dst_slot++ = *src_slot++;
}
} else {
memmove(dst, src, byte_size);
@@ -261,6 +290,17 @@ void Heap::MoveBlock(Object** dst, Object** src, int byte_size) {
}
+void Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size) {
+ ASSERT(IsAligned(byte_size, kPointerSize));
+ ASSERT((dst >= (src + byte_size)) ||
+ ((OffsetFrom(src) - OffsetFrom(dst)) >= kPointerSize));
+
+ CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, byte_size);
+}
+
+
void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
ASSERT(InFromSpace(object));
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index d554a3ba68..79aced7d4d 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -326,13 +326,6 @@ void Heap::GarbageCollectionPrologue() {
}
if (FLAG_gc_verbose) Print();
-
- if (FLAG_print_rset) {
- // Not all spaces have remembered set bits that we care about.
- old_pointer_space_->PrintRSet();
- map_space_->PrintRSet();
- lo_space_->PrintRSet();
- }
#endif
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -519,9 +512,8 @@ void Heap::ReserveSpace(
Heap::CollectGarbage(cell_space_size, CELL_SPACE);
gc_performed = true;
}
- // We add a slack-factor of 2 in order to have space for the remembered
- // set and a series of large-object allocations that are only just larger
- // than the page size.
+ // We add a slack-factor of 2 in order to have space for a series of
+ // large-object allocations that are only just larger than the page size.
large_object_size *= 2;
// The ReserveSpace method on the large object space checks how much
// we can expand the old generation. This includes expansion caused by
@@ -572,6 +564,25 @@ void Heap::ClearJSFunctionResultCaches() {
}
+#ifdef DEBUG
+
+enum PageWatermarkValidity {
+ ALL_VALID,
+ ALL_INVALID
+};
+
+static void VerifyPageWatermarkValidity(PagedSpace* space,
+ PageWatermarkValidity validity) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ bool expected_value = (validity == ALL_VALID);
+ while (it.has_next()) {
+ Page* page = it.next();
+ ASSERT(page->IsWatermarkValid() == expected_value);
+ }
+}
+#endif
+
+
void Heap::PerformGarbageCollection(AllocationSpace space,
GarbageCollector collector,
GCTracer* tracer) {
@@ -816,6 +827,20 @@ void Heap::Scavenge() {
gc_state_ = SCAVENGE;
+ Page::FlipMeaningOfInvalidatedWatermarkFlag();
+#ifdef DEBUG
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_VALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_VALID);
+#endif
+
+ // We do not update an allocation watermark of the top page during linear
+ // allocation to avoid overhead. So to maintain the watermark invariant
+ // we have to manually cache the watermark and mark the top page as having an
+ // invalid watermark. This guarantees that dirty regions iteration will use a
+ // correct watermark even if a linear allocation happens.
+ old_pointer_space_->FlushTopPageWatermark();
+ map_space_->FlushTopPageWatermark();
+
// Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin"));
@@ -858,9 +883,17 @@ void Heap::Scavenge() {
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
- IterateRSet(old_pointer_space_, &ScavengePointer);
- IterateRSet(map_space_, &ScavengePointer);
- lo_space_->IterateRSet(&ScavengePointer);
+ IterateDirtyRegions(old_pointer_space_,
+ &IteratePointersInDirtyRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ IterateDirtyRegions(map_space_,
+ &IteratePointersInDirtyMapsRegion,
+ &ScavengePointer,
+ WATERMARK_CAN_BE_INVALID);
+
+ lo_space_->IterateDirtyRegions(&ScavengePointer);
// Copy objects reachable from cells by scavenging cell values directly.
HeapObjectIterator cell_iterator(cell_space_);
@@ -963,9 +996,8 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
// Copy the from-space object to its new location (given by the
// forwarding address) and fix its map.
HeapObject* target = source->map_word().ToForwardingAddress();
- CopyBlock(reinterpret_cast<Object**>(target->address()),
- reinterpret_cast<Object**>(source->address()),
- source->SizeFromMap(map));
+ int size = source->SizeFromMap(map);
+ CopyBlock(target->address(), source->address(), size);
target->set_map(map);
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
@@ -973,8 +1005,10 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
RecordCopiedObject(target);
#endif
// Visit the newly copied object for pointers to new space.
- target->Iterate(scavenge_visitor);
- UpdateRSet(target);
+ ASSERT(!target->IsMap());
+ IterateAndMarkPointersToNewSpace(target->address(),
+ target->address() + size,
+ &ScavengePointer);
}
// Take another spin if there are now unswept objects in new space
@@ -985,117 +1019,6 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
}
-void Heap::ClearRSetRange(Address start, int size_in_bytes) {
- uint32_t start_bit;
- Address start_word_address =
- Page::ComputeRSetBitPosition(start, 0, &start_bit);
- uint32_t end_bit;
- Address end_word_address =
- Page::ComputeRSetBitPosition(start + size_in_bytes - kIntSize,
- 0,
- &end_bit);
-
- // We want to clear the bits in the starting word starting with the
- // first bit, and in the ending word up to and including the last
- // bit. Build a pair of bitmasks to do that.
- uint32_t start_bitmask = start_bit - 1;
- uint32_t end_bitmask = ~((end_bit << 1) - 1);
-
- // If the start address and end address are the same, we mask that
- // word once, otherwise mask the starting and ending word
- // separately and all the ones in between.
- if (start_word_address == end_word_address) {
- Memory::uint32_at(start_word_address) &= (start_bitmask | end_bitmask);
- } else {
- Memory::uint32_at(start_word_address) &= start_bitmask;
- Memory::uint32_at(end_word_address) &= end_bitmask;
- start_word_address += kIntSize;
- memset(start_word_address, 0, end_word_address - start_word_address);
- }
-}
-
-
-class UpdateRSetVisitor: public ObjectVisitor {
- public:
-
- void VisitPointer(Object** p) {
- UpdateRSet(p);
- }
-
- void VisitPointers(Object** start, Object** end) {
- // Update a store into slots [start, end), used (a) to update remembered
- // set when promoting a young object to old space or (b) to rebuild
- // remembered sets after a mark-compact collection.
- for (Object** p = start; p < end; p++) UpdateRSet(p);
- }
- private:
-
- void UpdateRSet(Object** p) {
- // The remembered set should not be set. It should be clear for objects
- // newly copied to old space, and it is cleared before rebuilding in the
- // mark-compact collector.
- ASSERT(!Page::IsRSetSet(reinterpret_cast<Address>(p), 0));
- if (Heap::InNewSpace(*p)) {
- Page::SetRSet(reinterpret_cast<Address>(p), 0);
- }
- }
-};
-
-
-int Heap::UpdateRSet(HeapObject* obj) {
- ASSERT(!InNewSpace(obj));
- // Special handling of fixed arrays to iterate the body based on the start
- // address and offset. Just iterating the pointers as in UpdateRSetVisitor
- // will not work because Page::SetRSet needs to have the start of the
- // object for large object pages.
- if (obj->IsFixedArray()) {
- FixedArray* array = FixedArray::cast(obj);
- int length = array->length();
- for (int i = 0; i < length; i++) {
- int offset = FixedArray::kHeaderSize + i * kPointerSize;
- ASSERT(!Page::IsRSetSet(obj->address(), offset));
- if (Heap::InNewSpace(array->get(i))) {
- Page::SetRSet(obj->address(), offset);
- }
- }
- } else if (!obj->IsCode()) {
- // Skip code object, we know it does not contain inter-generational
- // pointers.
- UpdateRSetVisitor v;
- obj->Iterate(&v);
- }
- return obj->Size();
-}
-
-
-void Heap::RebuildRSets() {
- // By definition, we do not care about remembered set bits in code,
- // data, or cell spaces.
- map_space_->ClearRSet();
- RebuildRSets(map_space_);
-
- old_pointer_space_->ClearRSet();
- RebuildRSets(old_pointer_space_);
-
- Heap::lo_space_->ClearRSet();
- RebuildRSets(lo_space_);
-}
-
-
-void Heap::RebuildRSets(PagedSpace* space) {
- HeapObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
-}
-
-
-void Heap::RebuildRSets(LargeObjectSpace* space) {
- LargeObjectIterator it(space);
- for (HeapObject* obj = it.next(); obj != NULL; obj = it.next())
- Heap::UpdateRSet(obj);
-}
-
-
#if defined(DEBUG) || defined(ENABLE_LOGGING_AND_PROFILING)
void Heap::RecordCopiedObject(HeapObject* obj) {
bool should_record = false;
@@ -1121,9 +1044,7 @@ HeapObject* Heap::MigrateObject(HeapObject* source,
HeapObject* target,
int size) {
// Copy the content of source to target.
- CopyBlock(reinterpret_cast<Object**>(target->address()),
- reinterpret_cast<Object**>(source->address()),
- size);
+ CopyBlock(target->address(), source->address(), size);
// Set the forwarding address.
source->set_map_word(MapWord::FromForwardingAddress(target));
@@ -1178,21 +1099,30 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (object_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
- // Save the from-space object pointer and its map pointer at the
- // top of the to space to be swept and copied later. Write the
- // forwarding address over the map word of the from-space
- // object.
HeapObject* target = HeapObject::cast(result);
- promotion_queue.insert(object, first_word.ToMap());
- object->set_map_word(MapWord::FromForwardingAddress(target));
- // Give the space allocated for the result a proper map by
- // treating it as a free list node (not linked into the free
- // list).
- FreeListNode* node = FreeListNode::FromAddress(target->address());
- node->set_size(object_size);
+ if (object->IsFixedArray()) {
+ // Save the from-space object pointer and its map pointer at the
+ // top of the to space to be swept and copied later. Write the
+ // forwarding address over the map word of the from-space
+ // object.
+ promotion_queue.insert(object, first_word.ToMap());
+ object->set_map_word(MapWord::FromForwardingAddress(target));
+
+ // Give the space allocated for the result a proper map by
+ // treating it as a free list node (not linked into the free
+ // list).
+ FreeListNode* node = FreeListNode::FromAddress(target->address());
+ node->set_size(object_size);
+
+ *p = target;
+ } else {
+ // In large object space only fixed arrays might possibly contain
+ // intergenerational references.
+ // All other objects can be copied immediately and not revisited.
+ *p = MigrateObject(object, target, object_size);
+ }
- *p = target;
tracer()->increment_promoted_objects_size(object_size);
return;
}
@@ -1682,7 +1612,7 @@ bool Heap::CreateInitialObjects() {
// loop above because it needs to be allocated manually with the special
// hash code in place. The hash code for the hidden_symbol is zero to ensure
// that it will always be at the first entry in property descriptors.
- obj = AllocateSymbol(CStrVector(""), 0, String::kHashComputedMask);
+ obj = AllocateSymbol(CStrVector(""), 0, String::kZeroHash);
if (obj->IsFailure()) return false;
hidden_symbol_ = String::cast(obj);
@@ -1918,6 +1848,9 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_compiler_hints(0);
share->set_this_property_assignments_count(0);
share->set_this_property_assignments(undefined_value());
+ share->set_num_literals(0);
+ share->set_end_position(0);
+ share->set_function_token_position(0);
return result;
}
@@ -2179,8 +2112,8 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
: lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(byte_array_map());
- reinterpret_cast<Array*>(result)->set_length(length);
+ reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2195,8 +2128,8 @@ Object* Heap::AllocateByteArray(int length) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(byte_array_map());
- reinterpret_cast<Array*>(result)->set_length(length);
+ reinterpret_cast<ByteArray*>(result)->set_map(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -2312,9 +2245,7 @@ Object* Heap::CopyCode(Code* code) {
// Copy code object.
Address old_addr = code->address();
Address new_addr = reinterpret_cast<HeapObject*>(result)->address();
- CopyBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ CopyBlock(new_addr, old_addr, obj_size);
// Relocate the copy.
Code* new_code = Code::cast(result);
ASSERT(!CodeRange::exists() || CodeRange::contains(code->address()));
@@ -2460,8 +2391,8 @@ Object* Heap::AllocateArgumentsObject(Object* callee, int length) {
// Copy the content. The arguments boilerplate doesn't have any
// fields that point to new space so it's safe to skip the write
// barrier here.
- CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(result)->address()),
- reinterpret_cast<Object**>(boilerplate->address()),
+ CopyBlock(HeapObject::cast(result)->address(),
+ boilerplate->address(),
kArgumentsObjectSize);
// Set the two properties.
@@ -2683,8 +2614,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
clone = AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
if (clone->IsFailure()) return clone;
Address clone_address = HeapObject::cast(clone)->address();
- CopyBlock(reinterpret_cast<Object**>(clone_address),
- reinterpret_cast<Object**>(source->address()),
+ CopyBlock(clone_address,
+ source->address(),
object_size);
// Update write barrier for all fields that lie beyond the header.
RecordWrites(clone_address,
@@ -2696,8 +2627,8 @@ Object* Heap::CopyJSObject(JSObject* source) {
ASSERT(Heap::InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
- CopyBlock(reinterpret_cast<Object**>(HeapObject::cast(clone)->address()),
- reinterpret_cast<Object**>(source->address()),
+ CopyBlock(HeapObject::cast(clone)->address(),
+ source->address(),
object_size);
}
@@ -2968,8 +2899,8 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* result = AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
// Initialize the object.
- reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
- reinterpret_cast<Array*>(result)->set_length(0);
+ reinterpret_cast<FixedArray*>(result)->set_map(fixed_array_map());
+ reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@@ -2994,9 +2925,7 @@ Object* Heap::CopyFixedArray(FixedArray* src) {
if (obj->IsFailure()) return obj;
if (Heap::InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- CopyBlock(reinterpret_cast<Object**>(dst->address()),
- reinterpret_cast<Object**>(src->address()),
- FixedArray::SizeFor(len));
+ CopyBlock(dst->address(), src->address(), FixedArray::SizeFor(len));
return obj;
}
HeapObject::cast(obj)->set_map(src->map());
@@ -3017,8 +2946,8 @@ Object* Heap::AllocateFixedArray(int length) {
Object* result = AllocateRawFixedArray(length);
if (!result->IsFailure()) {
// Initialize header.
- reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
- FixedArray* array = FixedArray::cast(result);
+ FixedArray* array = reinterpret_cast<FixedArray*>(result);
+ array->set_map(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!Heap::InNewSpace(undefined_value()));
@@ -3045,27 +2974,10 @@ Object* Heap::AllocateRawFixedArray(int length, PretenureFlag pretenure) {
space = LO_SPACE;
}
- // Specialize allocation for the space.
- Object* result = Failure::OutOfMemoryException();
- if (space == NEW_SPACE) {
- // We cannot use Heap::AllocateRaw() because it will not properly
- // allocate extra remembered set bits if always_allocate() is true and
- // new space allocation fails.
- result = new_space_.AllocateRaw(size);
- if (result->IsFailure() && always_allocate()) {
- if (size <= MaxObjectSizeInPagedSpace()) {
- result = old_pointer_space_->AllocateRaw(size);
- } else {
- result = lo_space_->AllocateRawFixedArray(size);
- }
- }
- } else if (space == OLD_POINTER_SPACE) {
- result = old_pointer_space_->AllocateRaw(size);
- } else {
- ASSERT(space == LO_SPACE);
- result = lo_space_->AllocateRawFixedArray(size);
- }
- return result;
+ AllocationSpace retry_space =
+ (size <= MaxObjectSizeInPagedSpace()) ? OLD_POINTER_SPACE : LO_SPACE;
+
+ return AllocateRaw(size, space, retry_space);
}
@@ -3113,7 +3025,7 @@ Object* Heap::AllocateUninitializedFixedArray(int length) {
Object* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
Object* result = Heap::AllocateFixedArray(length, pretenure);
if (result->IsFailure()) return result;
- reinterpret_cast<Array*>(result)->set_map(hash_table_map());
+ reinterpret_cast<HeapObject*>(result)->set_map(hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@@ -3365,6 +3277,49 @@ bool Heap::InSpace(Address addr, AllocationSpace space) {
#ifdef DEBUG
+static void DummyScavengePointer(HeapObject** p) {
+}
+
+
+static void VerifyPointersUnderWatermark(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region) {
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+
+ while (it.has_next()) {
+ Page* page = it.next();
+ Address start = page->ObjectAreaStart();
+ Address end = page->AllocationWatermark();
+
+ Heap::IterateDirtyRegions(Page::kAllRegionsDirtyMarks,
+ start,
+ end,
+ visit_dirty_region,
+ &DummyScavengePointer);
+ }
+}
+
+
+static void VerifyPointersUnderWatermark(LargeObjectSpace* space) {
+ LargeObjectIterator it(space);
+ for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
+ if (object->IsFixedArray()) {
+ Address slot_address = object->address();
+ Address end = object->address() + object->Size();
+
+ while (slot_address < end) {
+ HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+ // When we are not in GC the Heap::InNewSpace() predicate
+ // checks that pointers which satisfy predicate point into
+ // the active semispace.
+ Heap::InNewSpace(*slot);
+ slot_address += kPointerSize;
+ }
+ }
+ }
+}
+
+
void Heap::Verify() {
ASSERT(HasBeenSetup());
@@ -3373,14 +3328,23 @@ void Heap::Verify() {
new_space_.Verify();
- VerifyPointersAndRSetVisitor rset_visitor;
- old_pointer_space_->Verify(&rset_visitor);
- map_space_->Verify(&rset_visitor);
+ VerifyPointersAndDirtyRegionsVisitor dirty_regions_visitor;
+ old_pointer_space_->Verify(&dirty_regions_visitor);
+ map_space_->Verify(&dirty_regions_visitor);
- VerifyPointersVisitor no_rset_visitor;
- old_data_space_->Verify(&no_rset_visitor);
- code_space_->Verify(&no_rset_visitor);
- cell_space_->Verify(&no_rset_visitor);
+ VerifyPointersUnderWatermark(old_pointer_space_,
+ &IteratePointersInDirtyRegion);
+ VerifyPointersUnderWatermark(map_space_,
+ &IteratePointersInDirtyMapsRegion);
+ VerifyPointersUnderWatermark(lo_space_);
+
+ VerifyPageWatermarkValidity(old_pointer_space_, ALL_INVALID);
+ VerifyPageWatermarkValidity(map_space_, ALL_INVALID);
+
+ VerifyPointersVisitor no_dirty_regions_visitor;
+ old_data_space_->Verify(&no_dirty_regions_visitor);
+ code_space_->Verify(&no_dirty_regions_visitor);
+ cell_space_->Verify(&no_dirty_regions_visitor);
lo_space_->Verify();
}
@@ -3433,65 +3397,253 @@ void Heap::ZapFromSpace() {
#endif // DEBUG
-int Heap::IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func) {
- Address object_address = object_start;
- Address rset_address = rset_start;
- int set_bits_count = 0;
-
- // Loop over all the pointers in [object_start, object_end).
- while (object_address < object_end) {
- uint32_t rset_word = Memory::uint32_at(rset_address);
- if (rset_word != 0) {
- uint32_t result_rset = rset_word;
- for (uint32_t bitmask = 1; bitmask != 0; bitmask = bitmask << 1) {
- // Do not dereference pointers at or past object_end.
- if ((rset_word & bitmask) != 0 && object_address < object_end) {
- Object** object_p = reinterpret_cast<Object**>(object_address);
- if (Heap::InNewSpace(*object_p)) {
- copy_object_func(reinterpret_cast<HeapObject**>(object_p));
- }
- // If this pointer does not need to be remembered anymore, clear
- // the remembered set bit.
- if (!Heap::InNewSpace(*object_p)) result_rset &= ~bitmask;
- set_bits_count++;
- }
- object_address += kPointerSize;
+bool Heap::IteratePointersInDirtyRegion(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address slot_address = start;
+ bool pointers_to_new_space_found = false;
+
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ copy_object_func(reinterpret_cast<HeapObject**>(slot));
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ pointers_to_new_space_found = true;
}
- // Update the remembered set if it has changed.
- if (result_rset != rset_word) {
- Memory::uint32_at(rset_address) = result_rset;
+ }
+ slot_address += kPointerSize;
+ }
+ return pointers_to_new_space_found;
+}
+
+
+// Compute start address of the first map following given addr.
+static inline Address MapStartAlign(Address addr) {
+ Address page = Page::FromAddress(addr)->ObjectAreaStart();
+ return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
+}
+
+
+// Compute end address of the first map preceding given addr.
+static inline Address MapEndAlign(Address addr) {
+ Address page = Page::FromAllocationTop(addr)->ObjectAreaStart();
+ return page + ((addr - page) / Map::kSize * Map::kSize);
+}
+
+
+static bool IteratePointersInDirtyMaps(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ ASSERT(MapStartAlign(start) == start);
+ ASSERT(MapEndAlign(end) == end);
+
+ Address map_address = start;
+ bool pointers_to_new_space_found = false;
+
+ while (map_address < end) {
+ ASSERT(!Heap::InNewSpace(Memory::Object_at(map_address)));
+ ASSERT(Memory::Object_at(map_address)->IsMap());
+
+ Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
+ Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
+
+ if (Heap::IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)) {
+ pointers_to_new_space_found = true;
+ }
+
+ map_address += Map::kSize;
+ }
+
+ return pointers_to_new_space_found;
+}
+
+
+bool Heap::IteratePointersInDirtyMapsRegion(
+ Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func) {
+ Address map_aligned_start = MapStartAlign(start);
+ Address map_aligned_end = MapEndAlign(end);
+
+ bool contains_pointers_to_new_space = false;
+
+ if (map_aligned_start != start) {
+ Address prev_map = map_aligned_start - Map::kSize;
+ ASSERT(Memory::Object_at(prev_map)->IsMap());
+
+ Address pointer_fields_start =
+ Max(start, prev_map + Map::kPointerFieldsBeginOffset);
+
+ Address pointer_fields_end =
+ Min(prev_map + Map::kCodeCacheOffset + kPointerSize, end);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyMaps(map_aligned_start,
+ map_aligned_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+
+ if (map_aligned_end != end) {
+ ASSERT(Memory::Object_at(map_aligned_end)->IsMap());
+
+ Address pointer_fields_start = map_aligned_end + Map::kPrototypeOffset;
+
+ Address pointer_fields_end =
+ Min(end, map_aligned_end + Map::kCodeCacheOffset + kPointerSize);
+
+ contains_pointers_to_new_space =
+ IteratePointersInDirtyRegion(pointer_fields_start,
+ pointer_fields_end,
+ copy_object_func)
+ || contains_pointers_to_new_space;
+ }
+
+ return contains_pointers_to_new_space;
+}
+
+
+void Heap::IterateAndMarkPointersToNewSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback) {
+ Address slot_address = start;
+ Page* page = Page::FromAddress(start);
+
+ uint32_t marks = page->GetRegionMarks();
+
+ while (slot_address < end) {
+ Object** slot = reinterpret_cast<Object**>(slot_address);
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ callback(reinterpret_cast<HeapObject**>(slot));
+ if (Heap::InNewSpace(*slot)) {
+ ASSERT((*slot)->IsHeapObject());
+ marks |= page->GetRegionMaskForAddress(slot_address);
+ }
+ }
+ slot_address += kPointerSize;
+ }
+
+ page->SetRegionMarks(marks);
+}
+
+
+uint32_t Heap::IterateDirtyRegions(
+ uint32_t marks,
+ Address area_start,
+ Address area_end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func) {
+ uint32_t newmarks = 0;
+ uint32_t mask = 1;
+
+ if (area_start >= area_end) {
+ return newmarks;
+ }
+
+ Address region_start = area_start;
+
+ // area_start does not necessarily coincide with start of the first region.
+ // Thus to calculate the beginning of the next region we have to align
+ // area_start by Page::kRegionSize.
+ Address second_region =
+ reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(area_start + Page::kRegionSize) &
+ ~Page::kRegionAlignmentMask);
+
+ // Next region might be beyond area_end.
+ Address region_end = Min(second_region, area_end);
+
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+ mask <<= 1;
+
+ // Iterate subsequent regions which fully lay inside [area_start, area_end[.
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ while (region_end <= area_end) {
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, region_end, copy_object_func)) {
+ newmarks |= mask;
+ }
+ }
+
+ region_start = region_end;
+ region_end = region_start + Page::kRegionSize;
+
+ mask <<= 1;
+ }
+
+ if (region_start != area_end) {
+ // A small piece of area left uniterated because area_end does not coincide
+ // with region end. Check whether region covering last part of area is
+ // dirty.
+ if (marks & mask) {
+ if (visit_dirty_region(region_start, area_end, copy_object_func)) {
+ newmarks |= mask;
}
- } else {
- // No bits in the word were set. This is the common case.
- object_address += kPointerSize * kBitsPerInt;
}
- rset_address += kIntSize;
}
- return set_bits_count;
+
+ return newmarks;
}
-void Heap::IterateRSet(PagedSpace* space, ObjectSlotCallback copy_object_func) {
- ASSERT(Page::is_rset_in_use());
- ASSERT(space == old_pointer_space_ || space == map_space_);
- static void* paged_rset_histogram = StatsTable::CreateHistogram(
- "V8.RSetPaged",
- 0,
- Page::kObjectAreaSize / kPointerSize,
- 30);
+void Heap::IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback copy_object_func,
+ ExpectedPageWatermarkState expected_page_watermark_state) {
PageIterator it(space, PageIterator::PAGES_IN_USE);
+
while (it.has_next()) {
Page* page = it.next();
- int count = IterateRSetRange(page->ObjectAreaStart(), page->AllocationTop(),
- page->RSetStart(), copy_object_func);
- if (paged_rset_histogram != NULL) {
- StatsTable::AddHistogramSample(paged_rset_histogram, count);
+ uint32_t marks = page->GetRegionMarks();
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ Address start = page->ObjectAreaStart();
+
+ // Do not try to visit pointers beyond page allocation watermark.
+ // Page can contain garbage pointers there.
+ Address end;
+
+ if ((expected_page_watermark_state == WATERMARK_SHOULD_BE_VALID) ||
+ page->IsWatermarkValid()) {
+ end = page->AllocationWatermark();
+ } else {
+ end = page->CachedAllocationWatermark();
+ }
+
+ ASSERT(space == old_pointer_space_ ||
+ (space == map_space_ &&
+ ((page->ObjectAreaStart() - end) % Map::kSize == 0)));
+
+ page->SetRegionMarks(IterateDirtyRegions(marks,
+ start,
+ end,
+ visit_dirty_region,
+ copy_object_func));
}
+
+ // Mark page watermark as invalid to maintain watermark validity invariant.
+ // See Page::FlipMeaningOfInvalidatedWatermarkFlag() for details.
+ page->InvalidateWatermark(true);
}
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 74e5a31b19..b67418e059 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -206,6 +206,10 @@ class HeapStats;
typedef String* (*ExternalStringTableUpdaterCallback)(Object** pointer);
+typedef bool (*DirtyRegionCallback)(Address start,
+ Address end,
+ ObjectSlotCallback copy_object_func);
+
// The all static Heap captures the interface to the global object heap.
// All JavaScript contexts by this process share the same object heap.
@@ -740,17 +744,54 @@ class Heap : public AllStatic {
// Iterates over all the other roots in the heap.
static void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
- // Iterates remembered set of an old space.
- static void IterateRSet(PagedSpace* space, ObjectSlotCallback callback);
+ enum ExpectedPageWatermarkState {
+ WATERMARK_SHOULD_BE_VALID,
+ WATERMARK_CAN_BE_INVALID
+ };
+
+ // For each dirty region on a page in use from an old space call
+ // visit_dirty_region callback.
+ // If either visit_dirty_region or callback can cause an allocation
+ // in old space and changes in allocation watermark then
+ // can_preallocate_during_iteration should be set to true.
+ // All pages will be marked as having invalid watermark upon
+ // iteration completion.
+ static void IterateDirtyRegions(
+ PagedSpace* space,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback,
+ ExpectedPageWatermarkState expected_page_watermark_state);
+
+ // Interpret marks as a bitvector of dirty marks for regions of size
+ // Page::kRegionSize aligned by Page::kRegionAlignmentMask and covering
+ // memory interval from start to top. For each dirty region call a
+ // visit_dirty_region callback. Return updated bitvector of dirty marks.
+ static uint32_t IterateDirtyRegions(uint32_t marks,
+ Address start,
+ Address end,
+ DirtyRegionCallback visit_dirty_region,
+ ObjectSlotCallback callback);
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // Update dirty marks for page containing start address.
+ static void IterateAndMarkPointersToNewSpace(Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyRegion(Address start,
+ Address end,
+ ObjectSlotCallback callback);
+
+
+ // Iterate pointers to new space found in memory interval from start to end.
+ // This interval is considered to belong to the map space.
+ // Return true if pointers to new space was found.
+ static bool IteratePointersInDirtyMapsRegion(Address start,
+ Address end,
+ ObjectSlotCallback callback);
- // Iterates a range of remembered set addresses starting with rset_start
- // corresponding to the range of allocated pointers
- // [object_start, object_end).
- // Returns the number of bits that were set.
- static int IterateRSetRange(Address object_start,
- Address object_end,
- Address rset_start,
- ObjectSlotCallback copy_object_func);
// Returns whether the object resides in new space.
static inline bool InNewSpace(Object* object);
@@ -852,17 +893,6 @@ class Heap : public AllStatic {
static void ScavengePointer(HeapObject** p);
static inline void ScavengeObject(HeapObject** p, HeapObject* object);
- // Clear a range of remembered set addresses corresponding to the object
- // area address 'start' with size 'size_in_bytes', eg, when adding blocks
- // to the free list.
- static void ClearRSetRange(Address start, int size_in_bytes);
-
- // Rebuild remembered set in old and map spaces.
- static void RebuildRSets();
-
- // Update an old object's remembered set
- static int UpdateRSet(HeapObject* obj);
-
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
@@ -955,11 +985,19 @@ class Heap : public AllStatic {
// Copy block of memory from src to dst. Size of block should be aligned
// by pointer size.
- static inline void CopyBlock(Object** dst, Object** src, int byte_size);
+ static inline void CopyBlock(Address dst, Address src, int byte_size);
+
+ static inline void CopyBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Optimized version of memmove for blocks with pointer size aligned sizes and
// pointer size aligned addresses.
- static inline void MoveBlock(Object** dst, Object** src, int byte_size);
+ static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+ static inline void MoveBlockToOldSpaceAndUpdateRegionMarks(Address dst,
+ Address src,
+ int byte_size);
// Check new space expansion criteria and expand semispaces if it was hit.
static void CheckNewSpaceExpansionCriteria();
@@ -1207,12 +1245,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
- // Rebuild remembered set in an old space.
- static void RebuildRSets(PagedSpace* space);
-
- // Rebuild remembered set in the large object space.
- static void RebuildRSets(LargeObjectSpace* space);
-
// Slow part of scavenge object.
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
@@ -1301,11 +1333,11 @@ class LinearAllocationScope {
#ifdef DEBUG
-// Visitor class to verify interior pointers that do not have remembered set
-// bits. All heap object pointers have to point into the heap to a location
-// that has a map pointer at its first word. Caveat: Heap::Contains is an
-// approximation because it can return true for objects in a heap space but
-// above the allocation pointer.
+// Visitor class to verify interior pointers in spaces that do not contain
+// or care about intergenerational references. All heap object pointers have to
+// point into the heap to a location that has a map pointer at its first word.
+// Caveat: Heap::Contains is an approximation because it can return true for
+// objects in a heap space but above the allocation pointer.
class VerifyPointersVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
@@ -1320,10 +1352,11 @@ class VerifyPointersVisitor: public ObjectVisitor {
};
-// Visitor class to verify interior pointers that have remembered set bits.
-// As VerifyPointersVisitor but also checks that remembered set bits are
-// always set for pointers into new space.
-class VerifyPointersAndRSetVisitor: public ObjectVisitor {
+// Visitor class to verify interior pointers in spaces that use region marks
+// to keep track of intergenerational references.
+// As VerifyPointersVisitor but also checks that dirty marks are set
+// for regions covering intergenerational references.
+class VerifyPointersAndDirtyRegionsVisitor: public ObjectVisitor {
public:
void VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
@@ -1332,7 +1365,9 @@ class VerifyPointersAndRSetVisitor: public ObjectVisitor {
ASSERT(Heap::Contains(object));
ASSERT(object->map()->IsMap());
if (Heap::InNewSpace(object)) {
- ASSERT(Page::IsRSetSet(reinterpret_cast<Address>(current), 0));
+ ASSERT(Heap::InToSpace(object));
+ Address addr = reinterpret_cast<Address>(current);
+ ASSERT(Page::FromAddress(addr)->IsRegionDirty(addr));
}
}
}
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 608625817a..2275b7c3b9 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -226,8 +226,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// edx: number of elements
// ecx: start of next object
__ mov(eax, Factory::fixed_array_map());
- __ mov(Operand(edi, JSObject::kMapOffset), eax); // setup the map
- __ mov(Operand(edi, Array::kLengthOffset), edx); // and length
+ __ mov(Operand(edi, FixedArray::kMapOffset), eax); // setup the map
+ __ SmiTag(edx);
+ __ mov(Operand(edi, FixedArray::kLengthOffset), edx); // and length
// Initialize the fields to undefined.
// ebx: JSObject
@@ -548,6 +549,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx,
FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ SmiUntag(ebx);
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
__ lea(edx, FieldOperand(edx, Code::kHeaderSize));
__ cmp(eax, Operand(ebx));
@@ -752,15 +754,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), scratch1);
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
- __ mov(FieldOperand(scratch1, JSObject::kMapOffset),
+ __ mov(FieldOperand(scratch1, FixedArray::kMapOffset),
Factory::fixed_array_map());
- __ mov(FieldOperand(scratch1, Array::kLengthOffset),
- Immediate(initial_capacity));
+ __ mov(FieldOperand(scratch1, FixedArray::kLengthOffset),
+ Immediate(Smi::FromInt(initial_capacity)));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@@ -847,23 +849,22 @@ static void AllocateJSArray(MacroAssembler* masm,
__ lea(elements_array, Operand(result, JSArray::kSize));
__ mov(FieldOperand(result, JSArray::kElementsOffset), elements_array);
- // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
// array_size: size of array (smi)
- ASSERT(kSmiTag == 0);
- __ SmiUntag(array_size); // Convert from smi to value.
- __ mov(FieldOperand(elements_array, JSObject::kMapOffset),
+ __ mov(FieldOperand(elements_array, FixedArray::kMapOffset),
Factory::fixed_array_map());
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
- __ mov(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+ __ mov(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
// elements_array: elements array
if (fill_with_hole) {
+ __ SmiUntag(array_size);
__ lea(edi, Operand(elements_array,
FixedArray::kHeaderSize - kHeapObjectTag));
__ mov(eax, Factory::the_hole_value());
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index c55ec7b203..95aeba6817 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -46,12 +46,12 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
+// Platform-specific FrameRegisterState functions.
-void DeferredCode::SaveRegisters() {
+void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
@@ -63,7 +63,7 @@ void DeferredCode::SaveRegisters() {
}
-void DeferredCode::RestoreRegisters() {
+void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
@@ -77,6 +77,45 @@ void DeferredCode::RestoreRegisters() {
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ frame_state_.Save(masm_);
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ frame_state_.Restore(masm_);
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->Save(masm);
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ frame_state_->Restore(masm);
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
+}
+
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -4198,7 +4237,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@@ -4210,7 +4248,6 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -6020,29 +6057,67 @@ void CodeGenerator::GenerateIsNonNegativeSmi(ZoneList<Expression*>* args) {
}
-// This generates code that performs a charCodeAt() call or returns
-// undefined in order to trigger the slow case, Runtime_StringCharCodeAt.
-// It can handle flat, 8 and 16 bit characters and cons strings where the
-// answer is found in the left hand branch of the cons. The slow case will
-// flatten the string, which will ensure that the answer is in the left hand
-// side the next time around.
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateFastCharCodeAt");
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ Set(result_, Immediate(Factory::undefined_value()));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Set(result_, Immediate(Factory::nan_value()));
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
-
- // We will mutate the index register and possibly the object register.
- // The case where they are somehow the same register is handled
- // because we only mutate them in the case where the receiver is a
- // heap object and the index is not.
object.ToRegister();
index.ToRegister();
+ // We might mutate the object register.
frame_->Spill(object.reg());
- frame_->Spill(index.reg());
// We need two extra registers.
Result result = allocator()->Allocate();
@@ -6050,33 +6125,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
- // There is no virtual frame effect from here up to the final result
- // push.
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg(),
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
-
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ Set(result.reg(), Immediate(Factory::undefined_value()));
-
- __ bind(&exit);
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -6085,16 +6167,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
code.ToRegister();
ASSERT(code.is_valid());
- // StringHelper::GenerateCharFromCode may do a runtime call.
- frame_->SpillAll();
-
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
- StringHelper::GenerateCharFromCode(masm_,
- code.reg(),
- result.reg(),
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Set(result_, Immediate(Smi::FromInt(0)));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Set(result_, Immediate(Factory::empty_string()));
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
@@ -6600,9 +6763,9 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ mov(FieldOperand(ebx, HeapObject::kMapOffset),
Immediate(Factory::fixed_array_map()));
// Set length.
- __ SmiUntag(ecx);
__ mov(FieldOperand(ebx, FixedArray::kLengthOffset), ecx);
// Fill contents of fixed-array with the-hole.
+ __ SmiUntag(ecx);
__ mov(edx, Immediate(Factory::the_hole_value()));
__ lea(ebx, FieldOperand(ebx, FixedArray::kHeaderSize));
// Fill fixed array elements with hole.
@@ -6706,7 +6869,6 @@ void DeferredSearchCache::Generate() {
// Check if we could add new entry to cache.
__ mov(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
- __ SmiTag(ebx);
__ cmp(ebx, FieldOperand(ecx, JSFunctionResultCache::kCacheSizeOffset));
__ j(greater, &add_new_entry);
@@ -6904,12 +7066,8 @@ void CodeGenerator::GenerateSwapElements(ZoneList<Expression*>* args) {
// (or them and test against Smi mask.)
__ mov(tmp2.reg(), tmp1.reg());
- RecordWriteStub recordWrite1(tmp2.reg(), index1.reg(), object.reg());
- __ CallStub(&recordWrite1);
-
- RecordWriteStub recordWrite2(tmp1.reg(), index2.reg(), object.reg());
- __ CallStub(&recordWrite2);
-
+ __ RecordWriteHelper(tmp2.reg(), index1.reg(), object.reg());
+ __ RecordWriteHelper(tmp1.reg(), index2.reg(), object.reg());
__ bind(&done);
deferred->BindExit();
@@ -8608,13 +8766,8 @@ Result CodeGenerator::EmitKeyedLoad() {
key.ToRegister();
receiver.ToRegister();
- // Use a fresh temporary for the index and later the loaded
- // value.
- result = allocator()->Allocate();
- ASSERT(result.is_valid());
-
DeferredReferenceGetKeyedValue* deferred =
- new DeferredReferenceGetKeyedValue(result.reg(),
+ new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
key.reg());
@@ -8647,19 +8800,20 @@ Result CodeGenerator::EmitKeyedLoad() {
Immediate(Factory::fixed_array_map()));
deferred->Branch(not_equal);
- // Shift the key to get the actual index value and check that
- // it is within bounds. Use unsigned comparison to handle negative keys.
- __ mov(result.reg(), key.reg());
- __ SmiUntag(result.reg());
- __ cmp(result.reg(),
+ // Check that the key is within bounds.
+ __ cmp(key.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
- __ mov(result.reg(), Operand(elements.reg(),
- result.reg(),
- times_4,
- FixedArray::kHeaderSize - kHeapObjectTag));
- elements.Unuse();
+ // Load and check that the result is not the hole.
+ // Key holds a smi.
+ ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+ __ mov(elements.reg(),
+ FieldOperand(elements.reg(),
+ key.reg(),
+ times_2,
+ FixedArray::kHeaderSize));
+ result = elements;
__ cmp(Operand(result.reg()), Immediate(Factory::the_hole_value()));
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@@ -8744,7 +8898,7 @@ Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
// Check whether it is possible to omit the write barrier. If the elements
// array is in new space or the value written is a smi we can safely update
- // the elements array without updating the remembered set.
+ // the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@@ -9014,7 +9168,8 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ mov(FieldOperand(eax, HeapObject::kMapOffset), Factory::context_map());
- __ mov(FieldOperand(eax, Array::kLengthOffset), Immediate(length));
+ __ mov(FieldOperand(eax, Context::kLengthOffset),
+ Immediate(Smi::FromInt(length)));
// Setup the fixed slots.
__ xor_(ebx, Operand(ebx)); // Set to NULL.
@@ -10977,9 +11132,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ test(ecx, Operand(ecx));
__ j(zero, &done);
- // Get the parameters pointer from the stack and untag the length.
+ // Get the parameters pointer from the stack.
__ mov(edx, Operand(esp, 2 * kPointerSize));
- __ SmiUntag(ecx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -10988,6 +11142,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ mov(FieldOperand(edi, FixedArray::kMapOffset),
Immediate(Factory::fixed_array_map()));
__ mov(FieldOperand(edi, FixedArray::kLengthOffset), ecx);
+ // Untag the length for the loop below.
+ __ SmiUntag(ecx);
// Copy the fixed array slots.
Label loop;
@@ -11116,6 +11272,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information.
__ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ SmiUntag(eax);
__ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
__ cmp(edx, Operand(eax));
__ j(greater, &runtime);
@@ -11359,7 +11516,7 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
__ mov(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shr(mask, 1); // Divide length by two (length is not a smi).
+ __ shr(mask, kSmiTagSize + 1); // Untag length and divide it by two.
__ sub(Operand(mask), Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
@@ -11450,12 +11607,6 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-void RecordWriteStub::Generate(MacroAssembler* masm) {
- masm->RecordWriteHelper(object_, addr_, scratch_);
- masm->ret(0);
-}
-
-
static int NegativeComparisonResult(Condition cc) {
ASSERT(cc != equal);
ASSERT((cc == less) || (cc == less_equal)
@@ -12390,152 +12541,204 @@ const char* CompareStub::GetName() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
ASSERT(kSmiTag == 0);
- __ test(object, Immediate(kSmiTagMask));
- __ j(zero, receiver_not_string);
+ __ test(object_, Immediate(kSmiTagMask));
+ __ j(zero, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ test(result, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string);
+ __ test(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
ASSERT(kSmiTag == 0);
- __ test(index, Immediate(kSmiTagMask));
- __ j(not_zero, index_not_smi);
+ __ test(index_, Immediate(kSmiTagMask));
+ __ j(not_zero, &index_not_smi_);
- // Check for index out of range.
- __ cmp(index, FieldOperand(object, String::kLengthOffset));
- __ j(above_equal, index_out_of_range);
+ // Put smi-tagged index into scratch register.
+ __ mov(scratch_, index_);
+ __ bind(&got_smi_index_);
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ // Check for index out of range.
+ __ cmp(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
ASSERT(kSeqStringTag == 0);
- __ test(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, &not_a_flat_string);
-
- // Check for 1-byte or 2-byte string.
- ASSERT(kAsciiStringTag != 0);
- __ test(result, Immediate(kStringEncodingMask));
- __ j(not_zero, &ascii_string);
-
- // 2-byte string.
- // Load the 2-byte character code into the result register.
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); // index is smi (powered by 2).
- __ movzx_w(result, FieldOperand(object,
- index, times_1,
- SeqTwoByteString::kHeaderSize));
- __ jmp(&got_char_code);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
// Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, kStringRepresentationMask);
- __ cmp(result, kConsStringTag);
- __ j(not_equal, slow_case);
+ __ test(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
// ConsString.
// Check whether the right hand side is the empty string (i.e. if
// this is really a flat string in a cons string). If that is not
// the case we would rather go to the runtime system now to flatten
// the string.
- __ mov(result, FieldOperand(object, ConsString::kSecondOffset));
- __ cmp(Operand(result), Factory::empty_string());
- __ j(not_equal, slow_case);
+ __ cmp(FieldOperand(object_, ConsString::kSecondOffset),
+ Immediate(Factory::empty_string()));
+ __ j(not_equal, &call_runtime_);
// Get the first of the two strings and load its instance type.
- __ mov(object, FieldOperand(object, ConsString::kFirstOffset));
- __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
+ __ mov(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ test(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
- // ASCII string.
- __ bind(&ascii_string);
- // Put untagged index into scratch register.
- __ mov(scratch, index);
- __ SmiUntag(scratch);
+ // Check for 1-byte or 2-byte string.
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ test(result_, Immediate(kStringEncodingMask));
+ __ j(not_zero, &ascii_string);
+ // 2-byte string.
+ // Load the 2-byte character code into the result register.
+ ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+ __ movzx_w(result_, FieldOperand(object_,
+ scratch_, times_1, // Scratch is smi-tagged.
+ SeqTwoByteString::kHeaderSize));
+ __ jmp(&got_char_code);
+
+ // ASCII string.
// Load the byte into the result register.
- __ movzx_b(result, FieldOperand(object,
- scratch, times_1,
- SeqAsciiString::kHeaderSize));
+ __ bind(&ascii_string);
+ __ SmiUntag(scratch_);
+ __ movzx_b(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ SmiTag(result);
+ __ SmiTag(result_);
+ __ bind(&exit_);
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
+
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(result_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(eax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ mov(scratch_, eax);
+ }
+ __ pop(result_);
+ __ pop(index_);
+ __ pop(object_);
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ ASSERT(kSmiTag == 0);
+ __ test(scratch_, Immediate(kSmiTagMask));
+ __ j(not_zero, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
- Label slow_case;
- Label exit;
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
ASSERT(kSmiTag == 0);
ASSERT(kSmiShiftSize == 0);
ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(code,
+ __ test(code_,
Immediate(kSmiTagMask |
((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case, not_taken);
+ __ j(not_zero, &slow_case_, not_taken);
- __ Set(result, Immediate(Factory::single_character_string_cache()));
+ __ Set(result_, Immediate(Factory::single_character_string_cache()));
ASSERT(kSmiTag == 0);
ASSERT(kSmiTagSize == 1);
ASSERT(kSmiShiftSize == 0);
// At this point code register contains smi tagged ascii char code.
- __ mov(result, FieldOperand(result,
- code, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(result, Factory::undefined_value());
- __ j(equal, &slow_case, not_taken);
- __ jmp(&exit);
+ __ mov(result_, FieldOperand(result_,
+ code_, times_half_pointer_size,
+ FixedArray::kHeaderSize));
+ __ cmp(result_, Factory::undefined_value());
+ __ j(equal, &slow_case_, not_taken);
+ __ bind(&exit_);
+}
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(eax)) {
- __ mov(result, eax);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(eax));
- __ pop(eax); // Save return address.
- __ push(code);
- __ push(eax); // Restore return address.
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(eax));
- __ ret(0);
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(eax)) {
+ __ mov(result_, eax);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index a098dc3859..ea182ab9cc 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -38,8 +38,10 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
+class FrameRegisterState;
class RegisterAllocator;
class RegisterFile;
+class RuntimeCallHelper;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@@ -621,10 +623,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -910,37 +915,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be eax register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
@@ -1083,42 +1057,6 @@ class NumberToStringStub: public CodeStub {
};
-class RecordWriteStub : public CodeStub {
- public:
- RecordWriteStub(Register object, Register addr, Register scratch)
- : object_(object), addr_(addr), scratch_(scratch) { }
-
- void Generate(MacroAssembler* masm);
-
- private:
- Register object_;
- Register addr_;
- Register scratch_;
-
-#ifdef DEBUG
- void Print() {
- PrintF("RecordWriteStub (object reg %d), (addr reg %d), (scratch reg %d)\n",
- object_.code(), addr_.code(), scratch_.code());
- }
-#endif
-
- // Minor key encoding in 12 bits. 4 bits for each of the three
- // registers (object, address and scratch) OOOOAAAASSSS.
- class ScratchBits: public BitField<uint32_t, 0, 4> {};
- class AddressBits: public BitField<uint32_t, 4, 4> {};
- class ObjectBits: public BitField<uint32_t, 8, 4> {};
-
- Major MajorKey() { return RecordWrite; }
-
- int MinorKey() {
- // Encode the registers.
- return ObjectBits::encode(object_.code()) |
- AddressBits::encode(addr_.code()) |
- ScratchBits::encode(scratch_.code());
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 1b78772f74..ae64d023c0 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -1009,7 +1009,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(eax); // Map.
__ push(edx); // Enumeration cache.
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ SmiTag(eax);
__ push(eax); // Enumeration cache length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
__ jmp(&loop);
@@ -1019,7 +1018,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(Immediate(Smi::FromInt(0))); // Map (0) - force slow check.
__ push(eax);
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ SmiTag(eax);
__ push(eax); // Fixed array length (as smi).
__ push(Immediate(Smi::FromInt(0))); // Initial index.
@@ -1904,76 +1902,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2432,50 +2360,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiShiftSize == 0);
- ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
- __ test(eax,
- Immediate(kSmiTagMask |
- ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
- __ j(not_zero, &slow_case);
- __ Set(ebx, Immediate(Factory::single_character_string_cache()));
- ASSERT(kSmiTag == 0);
- ASSERT(kSmiTagSize == 1);
- ASSERT(kSmiShiftSize == 0);
- // At this point code register contains smi tagged ascii char code.
- __ mov(ebx, FieldOperand(ebx,
- eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- __ cmp(ebx, Factory::undefined_value());
- __ j(equal, &slow_case);
- __ mov(eax, ebx);
+ Label done;
+ StringCharFromCodeGenerator generator(eax, ebx);
+ generator.GenerateFast(masm_);
__ jmp(&done);
- __ bind(&slow_case);
- __ push(eax);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, eax);
+ Apply(context_, ebx);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch = ecx;
+ Register result = edx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ Set(result, Immediate(Factory::nan_value()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
// Move the undefined value into the result register, which will
- // trigger the slow case.
- __ Set(eax, Immediate(Factory::undefined_value()));
- Apply(context_, eax);
+ // trigger conversion.
+ __ Set(result, Immediate(Factory::undefined_value()));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = ebx;
+ Register index = eax;
+ Register scratch1 = ecx;
+ Register scratch2 = edx;
+ Register result = eax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ Set(result, Immediate(Factory::empty_string()));
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Set(result, Immediate(Smi::FromInt(0)));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
+}
+
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 644d20072e..2ba64dcae0 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -304,7 +304,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- edx : receiver
// -- esp[0] : return address
// -----------------------------------
- Label slow, check_string, index_int, index_string;
+ Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@@ -329,18 +329,17 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ test(eax, Immediate(kSmiTagMask));
__ j(not_zero, &check_string, not_taken);
- __ mov(ebx, eax);
- __ SmiUntag(ebx);
// Get the elements array of the object.
- __ bind(&index_int);
+ __ bind(&index_smi);
__ mov(ecx, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(ecx, Factory::fixed_array_map(), &check_pixel_array, true);
// Check that the key (index) is within bounds.
- __ cmp(ebx, FieldOperand(ecx, FixedArray::kLengthOffset));
+ __ cmp(eax, FieldOperand(ecx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
// Fast case: Do the load.
- __ mov(ecx, FieldOperand(ecx, ebx, times_4, FixedArray::kHeaderSize));
+ ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
__ cmp(Operand(ecx), Immediate(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@@ -352,9 +351,10 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_pixel_array);
// Check whether the elements is a pixel array.
// edx: receiver
- // ebx: untagged index
// eax: key
// ecx: elements
+ __ mov(ebx, eax);
+ __ SmiUntag(ebx);
__ CheckMap(ecx, Factory::pixel_array_map(), &check_number_dictionary, true);
__ cmp(ebx, FieldOperand(ecx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
@@ -485,9 +485,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- __ and_(ebx, String::kArrayIndexHashMask);
- __ shr(ebx, String::kHashShift);
- __ jmp(&index_int);
+ // We want the smi-tagged index in eax. kArrayIndexValueMask has zeros in
+ // the low kHashShift bits.
+ ASSERT(String::kHashShift >= kSmiTagSize);
+ __ and_(ebx, String::kArrayIndexValueMask);
+ __ shr(ebx, String::kHashShift - kSmiTagSize);
+ __ mov(eax, ebx);
+ __ jmp(&index_smi);
}
@@ -498,60 +502,29 @@ void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// -- esp[0] : return address
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
Register receiver = edx;
Register index = eax;
- Register code = ebx;
- Register scratch = ecx;
-
- StringHelper::GenerateFastCharCodeAt(masm,
- receiver,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, eax, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CheckMap(index, Factory::heap_number_map(), &miss, true);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ push(receiver);
- __ push(index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(eax));
- __ mov(code, eax);
- __ LeaveInternalFrame();
+ Register scratch1 = ebx;
+ Register scratch2 = ecx;
+ Register result = eax;
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- ASSERT(kSmiTag == 0);
- __ test(code, Immediate(kSmiTagMask));
- __ j(zero, &got_char_code);
- __ mov(scratch, FieldOperand(code, HeapObject::kMapOffset));
- __ cmp(scratch, Factory::heap_number_map());
- __ Assert(equal, "StringCharCodeAt must return smi or heap number");
- }
- __ cmp(code, Factory::nan_value());
- __ j(not_equal, &got_char_code);
__ bind(&index_out_of_range);
__ Set(eax, Immediate(Factory::undefined_value()));
__ ret(0);
@@ -792,9 +765,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CheckMap(edi, Factory::fixed_array_map(), &check_pixel_array, true);
- __ mov(ebx, Operand(ecx));
- __ SmiUntag(ebx);
- __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(below, &fast, taken);
// Slow case: call runtime.
@@ -804,7 +775,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// Check whether the elements is a pixel array.
__ bind(&check_pixel_array);
// eax: value
- // ecx: key
+ // ecx: key (a smi)
// edx: receiver
// edi: elements array
__ CheckMap(edi, Factory::pixel_array_map(), &slow, true);
@@ -840,13 +811,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// edi: receiver->elements, a FixedArray
// flags: compare (ecx, edx.length())
__ j(not_equal, &slow, not_taken); // do not leave holes in the array
- __ mov(ebx, ecx);
- __ SmiUntag(ebx); // untag
- __ cmp(ebx, FieldOperand(edi, Array::kLengthOffset));
+ __ cmp(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
__ j(above_equal, &slow, not_taken);
// Add 1 to receiver->length, and go to fast array write.
__ add(FieldOperand(edx, JSArray::kLengthOffset),
- Immediate(1 << kSmiTagSize));
+ Immediate(Smi::FromInt(1)));
__ jmp(&fast);
// Array case: Get the length and the elements array from the JS
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index ba2fe2dd4e..5caa4c4186 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -60,49 +60,17 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- Label fast;
-
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
and_(object, ~Page::kPageAlignmentMask);
- Register page_start = object;
-
- // Compute the bit addr in the remembered set/index of the pointer in the
- // page. Reuse 'addr' as pointer_offset.
- sub(addr, Operand(page_start));
- shr(addr, kObjectAlignmentBits);
- Register pointer_offset = addr;
-
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- cmp(pointer_offset, Page::kPageSize / kPointerSize);
- j(less, &fast);
-
- // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
- // extra remembered set after the large object.
-
- // Find the length of the large object (FixedArray).
- mov(scratch, Operand(page_start, Page::kObjectStartOffset
- + FixedArray::kLengthOffset));
- Register array_length = scratch;
-
- // Extra remembered set starts right after the large object (a FixedArray), at
- // page_start + kObjectStartOffset + objectSize
- // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
- // Add the delta between the end of the normal RSet and the start of the
- // extra RSet to 'page_start', so that addressing the bit using
- // 'pointer_offset' hits the extra RSet words.
- lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bind(&fast);
- bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
+
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ and_(addr, Page::kPageAlignmentMask);
+ shr(addr, Page::kRegionSizeLog2);
+
+ // Set dirty mark for region.
+ bts(Operand(object, Page::kDirtyFlagOffset), addr);
}
@@ -130,7 +98,7 @@ void MacroAssembler::InNewSpace(Register object,
}
-// Set the remembered set bit for [object+offset].
+// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@@ -142,9 +110,8 @@ void MacroAssembler::RecordWrite(Register object, int offset,
// registers are esi.
ASSERT(!object.is(esi) && !value.is(esi) && !scratch.is(esi));
- // First, check if a remembered set write is even needed. The tests below
- // catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits).
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
Label done;
// Skip barrier if writing a smi.
@@ -160,47 +127,19 @@ void MacroAssembler::RecordWrite(Register object, int offset,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- // We use optimized write barrier code if the word being written to is not in
- // a large object chunk or is in the first page of a large object chunk.
- // We make sure that an offset is inside the right limits whether it is
- // tagged or untagged.
- if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
- // Compute the bit offset in the remembered set, leave it in 'value'.
- lea(value, Operand(object, offset));
- and_(value, Page::kPageAlignmentMask);
- shr(value, kPointerSizeLog2);
-
- // Compute the page address from the heap object pointer, leave it in
- // 'object'.
- and_(object, ~Page::kPageAlignmentMask);
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), value);
+ Register dst = scratch;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
} else {
- Register dst = scratch;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
- // into an array of words.
- ASSERT_EQ(1, kSmiTagSize);
- ASSERT_EQ(0, kSmiTag);
- lea(dst, Operand(object, dst, times_half_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
- }
- // If we are already generating a shared stub, not inlining the
- // record write code isn't going to save us any memory.
- if (generating_stub()) {
- RecordWriteHelper(object, dst, value);
- } else {
- RecordWriteStub stub(object, dst, value);
- CallStub(&stub);
- }
+ // Array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric. Multiply a smi by 2 to get an offset
+ // into an array of words.
+ ASSERT_EQ(1, kSmiTagSize);
+ ASSERT_EQ(0, kSmiTag);
+ lea(dst, Operand(object, dst, times_half_pointer_size,
+ FixedArray::kHeaderSize - kHeapObjectTag));
}
+ RecordWriteHelper(object, dst, value);
bind(&done);
@@ -1384,6 +1323,7 @@ void MacroAssembler::InvokeFunction(Register fun,
mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
+ SmiUntag(ebx);
mov(edx, FieldOperand(edx, SharedFunctionInfo::kCodeOffset));
lea(edx, FieldOperand(edx, Code::kHeaderSize));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 9c8dfb2803..387426ff94 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -59,8 +59,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
- // Set the remebered set bit for an address which points into an
- // object. RecordWriteHelper only works if the object is not in new
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@@ -73,7 +73,7 @@ class MacroAssembler: public Assembler {
Condition cc, // equal for new space, not_equal otherwise.
Label* branch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index eb555d705d..5bb5be617f 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -300,203 +300,6 @@ static void CompileCallLoadPropertyWithInterceptor(MacroAssembler* masm,
}
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ test(receiver, Immediate(kSmiTagMask));
- __ j(zero, miss, not_taken);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS) {
- __ push(receiver);
- }
- __ push(holder);
- __ push(name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ cmp(eax, Factory::no_interceptor_result_sentinel());
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- if (lookup->type() == CALLBACKS) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm, eax,
- holder, lookup->holder(),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call: push receiver to stack after return address.
- Label cleanup;
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(scratch2);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters after
- // return address.
- __ pop(scratch2); // return address
- __ push(holder);
- __ mov(holder, Immediate(Handle<AccessorInfo>(callback)));
- __ push(holder);
- __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
- __ push(name_);
- __ push(scratch2); // restore return address
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver after return address and
- // need to remove it from there.
- __ bind(&cleanup);
- __ pop(scratch1); // return address.
- __ pop(scratch2); // receiver.
- __ push(scratch1);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- __ pop(scratch); // save old return address
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ push(scratch); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -683,9 +486,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -698,10 +501,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(),
- scratch1, scratch2, name,
- depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -1060,7 +870,7 @@ void StubCompiler::GenerateLoadConstant(JSObject* object,
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -1068,18 +878,130 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ test(receiver, Immediate(kSmiTagMask));
+ __ j(zero, miss, not_taken);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ cmp(eax, Factory::no_interceptor_result_sentinel());
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into holder_reg.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), eax, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ pop(scratch2); // return address
+ __ push(receiver);
+ __ push(holder_reg);
+ __ mov(holder_reg, Immediate(Handle<AccessorInfo>(callback)));
+ __ push(holder_reg);
+ __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ push(name_reg);
+ __ push(scratch2); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg =
+ CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ __ pop(scratch2); // save old return address
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
@@ -1206,7 +1128,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+ Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1216,7 +1138,6 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Get the element's length into ecx.
__ mov(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
- __ SmiTag(ecx);
// Check if we could survive without allocation.
__ cmp(eax, Operand(ecx));
@@ -1234,17 +1155,16 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Check if value is a smi.
__ test(ecx, Immediate(kSmiTagMask));
- __ j(not_zero, &with_rset_update);
+ __ j(not_zero, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
- __ bind(&with_rset_update);
+ __ bind(&with_write_barrier);
__ InNewSpace(ebx, ecx, equal, &exit);
- RecordWriteStub stub(ebx, edx, ecx);
- __ CallStub(&stub);
+ __ RecordWriteHelper(ebx, edx, ecx);
__ ret((argc + 1) * kPointerSize);
__ bind(&attempt_to_grow_elements);
@@ -1284,10 +1204,10 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
// Increment element's and array's sizes.
__ add(FieldOperand(ebx, FixedArray::kLengthOffset),
- Immediate(kAllocationDelta));
+ Immediate(Smi::FromInt(kAllocationDelta)));
__ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- // Elements are in new space, so no remembered set updates are necessary.
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@@ -1389,6 +1309,140 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : function name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+
+ Register receiver = ebx;
+ Register index = ecx;
+ Register scratch = edx;
+ Register result = eax;
+ __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+ } else {
+ __ Set(index, Immediate(Factory::undefined_value()));
+ }
+
+ StringCharCodeAtGenerator char_code_at_generator(receiver,
+ index,
+ scratch,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_code_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_code_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::nan_value()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ // Restore function name in ecx.
+ __ Set(ecx, Immediate(Handle<String>(name)));
+
+ Handle<Code> ic = ComputeCallMiss(argc);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // ----------- S t a t e -------------
+ // -- ecx : function name
+ // -- esp[0] : return address
+ // -- esp[(argc - n) * 4] : arg[n] (zero-based)
+ // -- ...
+ // -- esp[(argc + 1) * 4] : receiver
+ // -----------------------------------
+
+ const int argc = arguments().immediate();
+
+ Label miss;
+ Label index_out_of_range;
+
+ // Check that the maps starting from the prototype haven't changed.
+ GenerateLoadGlobalFunctionPrototype(masm(),
+ Context::STRING_FUNCTION_INDEX,
+ eax);
+ CheckPrototypes(JSObject::cast(object->GetPrototype()), eax, holder,
+ ebx, edx, name, &miss);
+
+ Register receiver = eax;
+ Register index = ecx;
+ Register scratch1 = ebx;
+ Register scratch2 = edx;
+ Register result = eax;
+ __ mov(receiver, Operand(esp, (argc + 1) * kPointerSize));
+ if (argc > 0) {
+ __ mov(index, Operand(esp, (argc - 0) * kPointerSize));
+ } else {
+ __ Set(index, Immediate(Factory::undefined_value()));
+ }
+
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ char_at_generator.GenerateFast(masm());
+ __ ret((argc + 1) * kPointerSize);
+
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm(), call_helper);
+
+ __ bind(&index_out_of_range);
+ __ Set(eax, Immediate(Factory::empty_string()));
+ __ ret((argc + 1) * kPointerSize);
+
+ __ bind(&miss);
+ // Restore function name in ecx.
+ __ Set(ecx, Immediate(Handle<String>(name)));
+
+ Handle<Code> ic = ComputeCallMiss(argc);
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+
+ // Return the generated code.
+ return GetCode(function);
+}
+
+
Object* CallStubCompiler::CompileCallConstant(Object* object,
JSObject* holder,
JSFunction* function,
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index a8f23b0cc8..48d0fa2471 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -615,7 +615,7 @@ class VirtualFrame: public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
- friend class DeferredCode;
+ friend class FrameRegisterState;
friend class JumpTarget;
};
diff --git a/deps/v8/src/jump-target-heavy.cc b/deps/v8/src/jump-target-heavy.cc
index 468cf4a542..e0585e7942 100644
--- a/deps/v8/src/jump-target-heavy.cc
+++ b/deps/v8/src/jump-target-heavy.cc
@@ -332,22 +332,10 @@ void JumpTarget::ComputeEntryFrame() {
}
-DeferredCode::DeferredCode()
- : masm_(CodeGeneratorScope::Current()->masm()),
- statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
- ASSERT(statement_position_ != RelocInfo::kNoPosition);
- ASSERT(position_ != RelocInfo::kNoPosition);
-
- CodeGeneratorScope::Current()->AddDeferred(this);
-#ifdef DEBUG
- comment_ = "";
-#endif
-
+FrameRegisterState::FrameRegisterState(VirtualFrame* frame) {
// Copy the register locations from the code generator's frame.
// These are the registers that will be spilled on entry to the
// deferred code and restored on exit.
- VirtualFrame* frame = CodeGeneratorScope::Current()->frame();
int sp_offset = frame->fp_relative(frame->stack_pointer_);
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int loc = frame->register_location(i);
@@ -423,4 +411,19 @@ void BreakTarget::Branch(Condition cc, Hint hint) {
}
}
+
+DeferredCode::DeferredCode()
+ : masm_(CodeGeneratorScope::Current()->masm()),
+ statement_position_(masm_->current_statement_position()),
+ position_(masm_->current_position()),
+ frame_state_(CodeGeneratorScope::Current()->frame()) {
+ ASSERT(statement_position_ != RelocInfo::kNoPosition);
+ ASSERT(position_ != RelocInfo::kNoPosition);
+
+ CodeGeneratorScope::Current()->AddDeferred(this);
+#ifdef DEBUG
+ comment_ = "";
+#endif
+}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/jump-target-light.cc b/deps/v8/src/jump-target-light.cc
index 76c3cb7f6b..19f7bfec0a 100644
--- a/deps/v8/src/jump-target-light.cc
+++ b/deps/v8/src/jump-target-light.cc
@@ -37,14 +37,15 @@ namespace internal {
DeferredCode::DeferredCode()
: masm_(CodeGeneratorScope::Current()->masm()),
statement_position_(masm_->current_statement_position()),
- position_(masm_->current_position()) {
+ position_(masm_->current_position()),
+ frame_state_(*CodeGeneratorScope::Current()->frame()) {
ASSERT(statement_position_ != RelocInfo::kNoPosition);
ASSERT(position_ != RelocInfo::kNoPosition);
CodeGeneratorScope::Current()->AddDeferred(this);
#ifdef DEBUG
- CodeGeneratorScope::Current()->frame()->AssertIsSpilled();
+ comment_ = "";
#endif
}
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 153374131e..7d97918245 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -159,3 +159,13 @@ macro LAST_INPUT(array) = ((array)[2]);
macro CAPTURE(index) = (3 + (index));
const CAPTURE0 = 3;
const CAPTURE1 = 4;
+
+# PropertyDescriptor return value indices - must match
+# PropertyDescriptorIndices in runtime.cc.
+const IS_ACCESSOR_INDEX = 0;
+const VALUE_INDEX = 1;
+const GETTER_INDEX = 2;
+const SETTER_INDEX = 3;
+const WRITABLE_INDEX = 4;
+const ENUMERABLE_INDEX = 5;
+const CONFIGURABLE_INDEX = 6;
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 554b5795de..0dc3e0f019 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -84,9 +84,6 @@ void MarkCompactCollector::CollectGarbage() {
UpdatePointers();
RelocateObjects();
-
- RebuildRSets();
-
} else {
SweepSpaces();
}
@@ -121,14 +118,6 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
-#ifdef DEBUG
- if (compacting_collection_) {
- // We will write bookkeeping information to the remembered set area
- // starting now.
- Page::set_rset_state(Page::NOT_IN_USE);
- }
-#endif
-
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -150,7 +139,7 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
void MarkCompactCollector::Finish() {
#ifdef DEBUG
- ASSERT(state_ == SWEEP_SPACES || state_ == REBUILD_RSETS);
+ ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
state_ = IDLE;
#endif
// The stub cache is not traversed during GC; clear the cache to
@@ -244,8 +233,8 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
}
// Since we don't have the object's start, it is impossible to update the
- // remembered set. Therefore, we only replace the string with its left
- // substring when the remembered set does not change.
+ // page dirty marks. Therefore, we only replace the string with its left
+ // substring when page dirty marks do not change.
Object* first = reinterpret_cast<ConsString*>(object)->unchecked_first();
if (!Heap::InNewSpace(object) && Heap::InNewSpace(first)) return object;
@@ -776,6 +765,7 @@ void MarkCompactCollector::SweepLargeObjectSpace() {
Heap::lo_space()->FreeUnmarkedObjects();
}
+
// Safe to use during marking phase only.
bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
MapWord metamap = object->map_word();
@@ -783,6 +773,7 @@ bool MarkCompactCollector::SafeIsMap(HeapObject* object) {
return metamap.ToMap()->instance_type() == MAP_TYPE;
}
+
void MarkCompactCollector::ClearNonLiveTransitions() {
HeapObjectIterator map_iterator(Heap::map_space(), &CountMarkedCallback);
// Iterate over the map space, setting map transitions that go from
@@ -1078,13 +1069,18 @@ void MarkCompactCollector::EncodeForwardingAddressesInPagedSpace(
// first word of object without any encoding. If object is dead we are writing
// NULL as a forwarding address.
// The second pass updates pointers to new space in all spaces. It is possible
-// to encounter pointers to dead objects during traversal of remembered set for
-// map space because remembered set bits corresponding to dead maps are cleared
-// later during map space sweeping.
-static void MigrateObject(Address dst, Address src, int size) {
- Heap::CopyBlock(reinterpret_cast<Object**>(dst),
- reinterpret_cast<Object**>(src),
- size);
+// to encounter pointers to dead objects during traversal of dirty regions we
+// should clear them to avoid encountering them during next dirty regions
+// iteration.
+static void MigrateObject(Address dst,
+ Address src,
+ int size,
+ bool to_old_space) {
+ if (to_old_space) {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(dst, src, size);
+ } else {
+ Heap::CopyBlock(dst, src, size);
+ }
Memory::Address_at(src) = dst;
}
@@ -1131,6 +1127,7 @@ class PointersToNewGenUpdatingVisitor: public ObjectVisitor {
}
};
+
// Visitor for updating pointers from live objects in old spaces to new space.
// It can encounter pointers to dead objects in new space when traversing map
// space (see comment for MigrateObject).
@@ -1142,10 +1139,13 @@ static void UpdatePointerToNewGen(HeapObject** p) {
Address new_addr = Memory::Address_at(old_addr);
- // Object pointed by *p is dead. Update is not required.
- if (new_addr == NULL) return;
-
- *p = HeapObject::FromAddress(new_addr);
+ if (new_addr == NULL) {
+ // We encountered pointer to a dead object. Clear it so we will
+ // not visit it again during next iteration of dirty regions.
+ *p = NULL;
+ } else {
+ *p = HeapObject::FromAddress(new_addr);
+ }
}
@@ -1163,8 +1163,7 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = Heap::lo_space()->AllocateRawFixedArray(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- Heap::UpdateRSet(target);
+ MigrateObject(target->address(), object->address(), object_size, true);
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@@ -1177,10 +1176,10 @@ static bool TryPromoteObject(HeapObject* object, int object_size) {
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
- MigrateObject(target->address(), object->address(), object_size);
- if (target_space == Heap::old_pointer_space()) {
- Heap::UpdateRSet(target);
- }
+ MigrateObject(target->address(),
+ object->address(),
+ object_size,
+ target_space == Heap::old_pointer_space());
MarkCompactCollector::tracer()->
increment_promoted_objects_size(object_size);
return true;
@@ -1222,14 +1221,16 @@ static void SweepNewSpace(NewSpace* space) {
continue;
}
- // Promotion either failed or not required.
- // Copy the content of the object.
+ // Promotion failed. Just migrate object to another semispace.
Object* target = space->AllocateRaw(size);
// Allocation cannot fail at this point: semispaces are of equal size.
ASSERT(!target->IsFailure());
- MigrateObject(HeapObject::cast(target)->address(), current, size);
+ MigrateObject(HeapObject::cast(target)->address(),
+ current,
+ size,
+ false);
} else {
size = object->Size();
Memory::Address_at(current) = NULL;
@@ -1255,9 +1256,12 @@ static void SweepNewSpace(NewSpace* space) {
Heap::IterateRoots(&updating_visitor, VISIT_ALL_IN_SCAVENGE);
// Update pointers in old spaces.
- Heap::IterateRSet(Heap::old_pointer_space(), &UpdatePointerToNewGen);
- Heap::IterateRSet(Heap::map_space(), &UpdatePointerToNewGen);
- Heap::lo_space()->IterateRSet(&UpdatePointerToNewGen);
+ Heap::IterateDirtyRegions(Heap::old_pointer_space(),
+ &Heap::IteratePointersInDirtyRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
+ Heap::lo_space()->IterateDirtyRegions(&UpdatePointerToNewGen);
// Update pointers from cells.
HeapObjectIterator cell_iterator(Heap::cell_space());
@@ -1323,7 +1327,10 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
MarkCompactCollector::tracer()->decrement_marked_count();
if (!is_previous_alive) { // Transition from free to live.
- dealloc(free_start, static_cast<int>(current - free_start), true);
+ dealloc(free_start,
+ static_cast<int>(current - free_start),
+ true,
+ false);
is_previous_alive = true;
}
} else {
@@ -1353,7 +1360,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// without putting anything into free list.
int size_in_bytes = static_cast<int>(p->AllocationTop() - free_start);
if (size_in_bytes > 0) {
- dealloc(free_start, size_in_bytes, false);
+ dealloc(free_start, size_in_bytes, false, true);
}
}
} else {
@@ -1367,7 +1374,9 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// If there is a free ending area on one of the previous pages we have
// deallocate that area and put it on the free list.
if (last_free_size > 0) {
- dealloc(last_free_start, last_free_size, true);
+ Page::FromAddress(last_free_start)->
+ SetAllocationWatermark(last_free_start);
+ dealloc(last_free_start, last_free_size, true, true);
last_free_start = NULL;
last_free_size = 0;
}
@@ -1398,7 +1407,7 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
// There was a free ending area on the previous page.
// Deallocate it without putting it into freelist and move allocation
// top to the beginning of this free area.
- dealloc(last_free_start, last_free_size, false);
+ dealloc(last_free_start, last_free_size, false, true);
new_allocation_top = last_free_start;
}
@@ -1421,34 +1430,36 @@ static void SweepSpace(PagedSpace* space, DeallocateFunction dealloc) {
void MarkCompactCollector::DeallocateOldPointerBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
- Heap::ClearRSetRange(start, size_in_bytes);
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_pointer_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateOldDataBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::old_data_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateCodeBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
Heap::code_space()->Free(start, size_in_bytes, add_to_freelist);
}
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += Map::kSize) {
Heap::map_space()->Free(a, add_to_freelist);
@@ -1458,13 +1469,13 @@ void MarkCompactCollector::DeallocateMapBlock(Address start,
void MarkCompactCollector::DeallocateCellBlock(Address start,
int size_in_bytes,
- bool add_to_freelist) {
+ bool add_to_freelist,
+ bool last_on_page) {
// Free-list elements in cell space are assumed to have a fixed size.
// We break the free block into chunks and add them to the free list
// individually.
int size = Heap::cell_space()->object_size_in_bytes();
ASSERT(size_in_bytes % size == 0);
- Heap::ClearRSetRange(start, size_in_bytes);
Address end = start + size_in_bytes;
for (Address a = start; a < end; a += size) {
Heap::cell_space()->Free(a, add_to_freelist);
@@ -1563,20 +1574,6 @@ class MapCompact {
GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
}
- void FinishMapSpace() {
- // Iterate through to space and finish move.
- MapIterator it;
- HeapObject* o = it.next();
- for (; o != first_map_to_evacuate_; o = it.next()) {
- ASSERT(o != NULL);
- Map* map = reinterpret_cast<Map*>(o);
- ASSERT(!map->IsMarked());
- ASSERT(!map->IsOverflowed());
- ASSERT(map->IsMap());
- Heap::UpdateRSet(map);
- }
- }
-
void UpdateMapPointersInPagedSpace(PagedSpace* space) {
ASSERT(space != Heap::map_space());
@@ -1669,9 +1666,9 @@ class MapCompact {
ASSERT(Map::kSize % 4 == 0);
- Heap::CopyBlock(reinterpret_cast<Object**>(vacant_map->address()),
- reinterpret_cast<Object**>(map_to_evacuate->address()),
- Map::kSize);
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(vacant_map->address(),
+ map_to_evacuate->address(),
+ Map::kSize);
ASSERT(vacant_map->IsMap()); // Due to memcpy above.
@@ -1756,6 +1753,12 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepNewSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+
+ Heap::IterateDirtyRegions(Heap::map_space(),
+ &Heap::IteratePointersInDirtyMapsRegion,
+ &UpdatePointerToNewGen,
+ Heap::WATERMARK_SHOULD_BE_VALID);
+
int live_maps_size = Heap::map_space()->Size();
int live_maps = live_maps_size / Map::kSize;
ASSERT(live_map_objects_size_ == live_maps_size);
@@ -1766,7 +1769,6 @@ void MarkCompactCollector::SweepSpaces() {
map_compact.CompactMaps();
map_compact.UpdateMapPointersInRoots();
- map_compact.FinishMapSpace();
PagedSpaces spaces;
for (PagedSpace* space = spaces.next();
space != NULL; space = spaces.next()) {
@@ -2039,9 +2041,8 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
Page* forwarded_page = Page::FromAddress(first_forwarded);
int forwarded_offset = forwarded_page->Offset(first_forwarded);
- // Find end of allocation of in the page of first_forwarded.
- Address mc_top = forwarded_page->mc_relocation_top;
- int mc_top_offset = forwarded_page->Offset(mc_top);
+ // Find end of allocation in the page of first_forwarded.
+ int mc_top_offset = forwarded_page->AllocationWatermarkOffset();
// Check if current object's forward pointer is in the same page
// as the first live object's forwarding pointer
@@ -2058,7 +2059,7 @@ Address MarkCompactCollector::GetForwardingAddressInOldSpace(HeapObject* obj) {
offset += Page::kObjectStartOffset;
ASSERT_PAGE_OFFSET(offset);
- ASSERT(next_page->OffsetToAddress(offset) < next_page->mc_relocation_top);
+ ASSERT(next_page->OffsetToAddress(offset) < next_page->AllocationTop());
return next_page->OffsetToAddress(offset);
}
@@ -2103,16 +2104,12 @@ void MarkCompactCollector::RelocateObjects() {
// Flip from and to spaces
Heap::new_space()->Flip();
+ Heap::new_space()->MCCommitRelocationInfo();
+
// Set age_mark to bottom in to space
Address mark = Heap::new_space()->bottom();
Heap::new_space()->set_age_mark(mark);
- Heap::new_space()->MCCommitRelocationInfo();
-#ifdef DEBUG
- // It is safe to write to the remembered sets as remembered sets on a
- // page-by-page basis after committing the m-c forwarding pointer.
- Page::set_rset_state(Page::IN_USE);
-#endif
PagedSpaces spaces;
for (PagedSpace* space = spaces.next(); space != NULL; space = spaces.next())
space->MCCommitRelocationInfo();
@@ -2139,9 +2136,9 @@ int MarkCompactCollector::RelocateMapObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- Map::kSize);
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ Map::kSize);
}
#ifdef DEBUG
@@ -2198,9 +2195,13 @@ int MarkCompactCollector::RelocateOldNonCodeObject(HeapObject* obj,
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (space == Heap::old_data_space()) {
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::MoveBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
}
ASSERT(!HeapObject::FromAddress(new_addr)->IsCode());
@@ -2245,9 +2246,7 @@ int MarkCompactCollector::RelocateCodeObject(HeapObject* obj) {
if (new_addr != old_addr) {
// Move contents.
- Heap::MoveBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ Heap::MoveBlock(new_addr, old_addr, obj_size);
}
HeapObject* copied_to = HeapObject::FromAddress(new_addr);
@@ -2283,9 +2282,13 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
#endif
// New and old addresses cannot overlap.
- Heap::CopyBlock(reinterpret_cast<Object**>(new_addr),
- reinterpret_cast<Object**>(old_addr),
- obj_size);
+ if (Heap::InNewSpace(HeapObject::FromAddress(new_addr))) {
+ Heap::CopyBlock(new_addr, old_addr, obj_size);
+ } else {
+ Heap::CopyBlockToOldSpaceAndUpdateRegionMarks(new_addr,
+ old_addr,
+ obj_size);
+ }
#ifdef DEBUG
if (FLAG_gc_verbose) {
@@ -2302,18 +2305,6 @@ int MarkCompactCollector::RelocateNewObject(HeapObject* obj) {
}
-// -------------------------------------------------------------------------
-// Phase 5: rebuild remembered sets
-
-void MarkCompactCollector::RebuildRSets() {
-#ifdef DEBUG
- ASSERT(state_ == RELOCATE_OBJECTS);
- state_ = REBUILD_RSETS;
-#endif
- Heap::RebuildRSets();
-}
-
-
void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj) {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (obj->IsCode()) {
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 3950e7538b..1d289a7592 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -41,7 +41,8 @@ typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
// no attempt to add area to free list is made.
typedef void (*DeallocateFunction)(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
// Forward declarations.
@@ -131,8 +132,7 @@ class MarkCompactCollector: public AllStatic {
SWEEP_SPACES,
ENCODE_FORWARDING_ADDRESSES,
UPDATE_POINTERS,
- RELOCATE_OBJECTS,
- REBUILD_RSETS
+ RELOCATE_OBJECTS
};
// The current stage of the collector.
@@ -269,22 +269,22 @@ class MarkCompactCollector: public AllStatic {
// written to their map word's offset in the inactive
// semispace.
//
- // Bookkeeping data is written to the remembered-set are of
+ // Bookkeeping data is written to the page header of
// eached paged-space page that contains live objects after
// compaction:
//
- // The 3rd word of the page (first word of the remembered
- // set) contains the relocation top address, the address of
- // the first word after the end of the last live object in
- // the page after compaction.
+ // The allocation watermark field is used to track the
+ // relocation top address, the address of the first word
+ // after the end of the last live object in the page after
+ // compaction.
//
- // The 4th word contains the zero-based index of the page in
- // its space. This word is only used for map space pages, in
+ // The Page::mc_page_index field contains the zero-based index of the
+ // page in its space. This word is only used for map space pages, in
// order to encode the map addresses in 21 bits to free 11
// bits per map word for the forwarding address.
//
- // The 5th word contains the (nonencoded) forwarding address
- // of the first live object in the page.
+ // The Page::mc_first_forwarded field contains the (nonencoded)
+ // forwarding address of the first live object in the page.
//
// In both the new space and the paged spaces, a linked list
// of live regions is constructructed (linked through
@@ -319,23 +319,28 @@ class MarkCompactCollector: public AllStatic {
// generation.
static void DeallocateOldPointerBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateOldDataBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateCodeBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateMapBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
static void DeallocateCellBlock(Address start,
int size_in_bytes,
- bool add_to_freelist);
+ bool add_to_freelist,
+ bool last_on_page);
// If we are not compacting the heap, we simply sweep the spaces except
// for the large object space, clearing mark bits and adding unmarked
@@ -349,9 +354,7 @@ class MarkCompactCollector: public AllStatic {
//
// After: All pointers in live objects, including encoded map
// pointers, are updated to point to their target's new
- // location. The remembered set area of each paged-space
- // page containing live objects still contains bookkeeping
- // information.
+ // location.
friend class UpdatingVisitor; // helper for updating visited objects
@@ -373,13 +376,9 @@ class MarkCompactCollector: public AllStatic {
// Phase 4: Relocating objects.
//
// Before: Pointers to live objects are updated to point to their
- // target's new location. The remembered set area of each
- // paged-space page containing live objects still contains
- // bookkeeping information.
+ // target's new location.
//
- // After: Objects have been moved to their new addresses. The
- // remembered set area of each paged-space page containing
- // live objects still contains bookkeeping information.
+ // After: Objects have been moved to their new addresses.
// Relocates objects in all spaces.
static void RelocateObjects();
@@ -408,17 +407,6 @@ class MarkCompactCollector: public AllStatic {
// Copy a new object.
static int RelocateNewObject(HeapObject* obj);
- // -----------------------------------------------------------------------
- // Phase 5: Rebuilding remembered sets.
- //
- // Before: The heap is in a normal state except that remembered sets
- // in the paged spaces are not correct.
- //
- // After: The heap is in a normal state.
-
- // Rebuild remembered set in old and map spaces.
- static void RebuildRSets();
-
#ifdef DEBUG
// -----------------------------------------------------------------------
// Debugging variables, functions and classes
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index b0a3fd62fb..f9b20a4b49 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -806,7 +806,8 @@ void JSGlobalProxy::JSGlobalProxyVerify() {
VerifyObjectField(JSGlobalProxy::kContextOffset);
// Make sure that this object has no properties, elements.
CHECK_EQ(0, properties()->length());
- CHECK_EQ(0, elements()->length());
+ CHECK(HasFastElements());
+ CHECK_EQ(0, FixedArray::cast(elements())->length());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index d82d73ec50..c10c930d31 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -759,7 +759,8 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
ASSERT(mode == SKIP_WRITE_BARRIER); \
ASSERT(Heap::InNewSpace(object) || \
!Heap::InNewSpace(READ_FIELD(object, offset)) || \
- Page::IsRSetSet(object->address(), offset)); \
+ Page::FromAddress(object->address())-> \
+ IsRegionDirty(object->address() + offset)); \
}
#define READ_DOUBLE_FIELD(p, offset) \
@@ -1045,6 +1046,10 @@ Address MapWord::ToEncodedAddress() {
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
+
+void HeapObject::VerifySmiField(int offset) {
+ ASSERT(READ_FIELD(this, offset)->IsSmi());
+}
#endif
@@ -1064,7 +1069,7 @@ MapWord HeapObject::map_word() {
void HeapObject::set_map_word(MapWord map_word) {
- // WRITE_FIELD does not update the remembered set, but there is no need
+ // WRITE_FIELD does not invoke write barrier, but there is no need
// here.
WRITE_FIELD(this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
@@ -1162,16 +1167,16 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
-Array* JSObject::elements() {
+HeapObject* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
// In the assert below Dictionary is covered under FixedArray.
ASSERT(array->IsFixedArray() || array->IsPixelArray() ||
array->IsExternalArray());
- return reinterpret_cast<Array*>(array);
+ return reinterpret_cast<HeapObject*>(array);
}
-void JSObject::set_elements(Array* value, WriteBarrierMode mode) {
+void JSObject::set_elements(HeapObject* value, WriteBarrierMode mode) {
// In the assert below Dictionary is covered under FixedArray.
ASSERT(value->IsFixedArray() || value->IsPixelArray() ||
value->IsExternalArray());
@@ -1342,15 +1347,15 @@ bool JSObject::HasFastProperties() {
}
-bool Array::IndexFromObject(Object* object, uint32_t* index) {
- if (object->IsSmi()) {
- int value = Smi::cast(object)->value();
+bool Object::ToArrayIndex(uint32_t* index) {
+ if (IsSmi()) {
+ int value = Smi::cast(this)->value();
if (value < 0) return false;
*index = value;
return true;
}
- if (object->IsHeapNumber()) {
- double value = HeapNumber::cast(object)->value();
+ if (IsHeapNumber()) {
+ double value = HeapNumber::cast(this)->value();
uint32_t uint_value = static_cast<uint32_t>(value);
if (value == static_cast<double>(uint_value)) {
*index = uint_value;
@@ -1665,7 +1670,11 @@ HashTable<Shape, Key>* HashTable<Shape, Key>::cast(Object* obj) {
}
-INT_ACCESSORS(Array, length, kLengthOffset)
+SMI_ACCESSORS(FixedArray, length, kLengthOffset)
+SMI_ACCESSORS(ByteArray, length, kLengthOffset)
+
+INT_ACCESSORS(PixelArray, length, kLengthOffset)
+INT_ACCESSORS(ExternalArray, length, kLengthOffset)
SMI_ACCESSORS(String, length, kLengthOffset)
@@ -1678,6 +1687,9 @@ uint32_t String::hash_field() {
void String::set_hash_field(uint32_t value) {
WRITE_UINT32_FIELD(this, kHashFieldOffset, value);
+#if V8_HOST_ARCH_64_BIT
+ WRITE_UINT32_FIELD(this, kHashFieldOffset + kIntSize, 0);
+#endif
}
@@ -2456,22 +2468,65 @@ BOOL_ACCESSORS(SharedFunctionInfo,
try_full_codegen,
kTryFullCodegen)
-INT_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
-INT_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
+#if V8_HOST_ARCH_32_BIT
+SMI_ACCESSORS(SharedFunctionInfo, length, kLengthOffset)
+SMI_ACCESSORS(SharedFunctionInfo, formal_parameter_count,
kFormalParameterCountOffset)
-INT_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
+SMI_ACCESSORS(SharedFunctionInfo, expected_nof_properties,
kExpectedNofPropertiesOffset)
-INT_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
-INT_ACCESSORS(SharedFunctionInfo, start_position_and_type,
+SMI_ACCESSORS(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
+SMI_ACCESSORS(SharedFunctionInfo, start_position_and_type,
kStartPositionAndTypeOffset)
-INT_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, function_token_position,
+SMI_ACCESSORS(SharedFunctionInfo, end_position, kEndPositionOffset)
+SMI_ACCESSORS(SharedFunctionInfo, function_token_position,
kFunctionTokenPositionOffset)
-INT_ACCESSORS(SharedFunctionInfo, compiler_hints,
+SMI_ACCESSORS(SharedFunctionInfo, compiler_hints,
kCompilerHintsOffset)
-INT_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
+SMI_ACCESSORS(SharedFunctionInfo, this_property_assignments_count,
kThisPropertyAssignmentsCountOffset)
+#else
+
+#define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset) \
+ int holder::name() { \
+ int value = READ_INT_FIELD(this, offset); \
+ ASSERT(kHeapObjectTag == 1); \
+ ASSERT((value & kHeapObjectTag) == 0); \
+ return value >> 1; \
+ } \
+ void holder::set_##name(int value) { \
+ ASSERT(kHeapObjectTag == 1); \
+ ASSERT((value & 0xC0000000) == 0xC0000000 || \
+ (value & 0xC0000000) == 0x000000000); \
+ WRITE_INT_FIELD(this, \
+ offset, \
+ (value << 1) & ~kHeapObjectTag); \
+ }
+
+#define PSEUDO_SMI_ACCESSORS_HI(holder, name, offset) \
+ INT_ACCESSORS(holder, name, offset)
+
+
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, length, kLengthOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, formal_parameter_count,
+ kFormalParameterCountOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, expected_nof_properties,
+ kExpectedNofPropertiesOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, num_literals, kNumLiteralsOffset)
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, start_position_and_type,
+ kStartPositionAndTypeOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, end_position, kEndPositionOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, function_token_position,
+ kFunctionTokenPositionOffset)
+PSEUDO_SMI_ACCESSORS_HI(SharedFunctionInfo, compiler_hints,
+ kCompilerHintsOffset)
+
+PSEUDO_SMI_ACCESSORS_LO(SharedFunctionInfo, this_property_assignments_count,
+ kThisPropertyAssignmentsCountOffset)
+#endif
ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -2785,7 +2840,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
JSObject::ElementsKind JSObject::GetElementsKind() {
- Array* array = elements();
+ HeapObject* array = elements();
if (array->IsFixedArray()) {
// FAST_ELEMENTS or DICTIONARY_ELEMENTS are both stored in a FixedArray.
if (array->map() == Heap::fixed_array_map()) {
@@ -2908,15 +2963,20 @@ NumberDictionary* JSObject::element_dictionary() {
}
+bool String::IsHashFieldComputed(uint32_t field) {
+ return (field & kHashNotComputedMask) == 0;
+}
+
+
bool String::HasHashCode() {
- return (hash_field() & kHashComputedMask) != 0;
+ return IsHashFieldComputed(hash_field());
}
uint32_t String::Hash() {
// Fast case: has hash code already been computed?
uint32_t field = hash_field();
- if (field & kHashComputedMask) return field >> kHashShift;
+ if (IsHashFieldComputed(field)) return field >> kHashShift;
// Slow case: compute hash code and set it.
return ComputeAndSetHash();
}
@@ -2989,7 +3049,7 @@ uint32_t StringHasher::GetHash() {
bool String::AsArrayIndex(uint32_t* index) {
uint32_t field = hash_field();
- if ((field & kHashComputedMask) && !(field & kIsArrayIndexMask)) return false;
+ if (IsHashFieldComputed(field) && !(field & kIsArrayIndexMask)) return false;
return SlowAsArrayIndex(index);
}
@@ -3113,7 +3173,7 @@ void Map::ClearCodeCache() {
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
- Array* elts = elements();
+ FixedArray* elts = FixedArray::cast(elements());
const int kArraySizeThatFitsComfortablyInNewSpace = 128;
if (elts->length() < required_size) {
// Doubling in size would be overkill, but leave some slack to avoid
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index ab678cb539..e2c5bc99a4 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -2037,7 +2037,7 @@ PropertyAttributes JSObject::GetPropertyAttributeWithInterceptor(
VMState state(EXTERNAL);
result = getter(v8::Utils::ToLocal(name_handle), info);
}
- if (!result.IsEmpty()) return NONE;
+ if (!result.IsEmpty()) return DONT_ENUM;
}
return holder_handle->GetPropertyAttributePostInterceptor(*receiver_handle,
*name_handle,
@@ -4784,7 +4784,7 @@ static inline uint32_t HashSequentialString(const schar* chars, int length) {
uint32_t String::ComputeAndSetHash() {
// Should only be called if hash code has not yet been computed.
- ASSERT(!(hash_field() & kHashComputedMask));
+ ASSERT(!HasHashCode());
const int len = length();
@@ -4803,7 +4803,7 @@ uint32_t String::ComputeAndSetHash() {
set_hash_field(field);
// Check the hash code is there.
- ASSERT(hash_field() & kHashComputedMask);
+ ASSERT(HasHashCode());
uint32_t result = field >> kHashShift;
ASSERT(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
@@ -4858,8 +4858,7 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
static inline uint32_t HashField(uint32_t hash,
bool is_array_index,
int length = -1) {
- uint32_t result =
- (hash << String::kHashShift) | String::kHashComputedMask;
+ uint32_t result = (hash << String::kHashShift);
if (is_array_index) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -5639,7 +5638,7 @@ Object* JSObject::SetElementsLength(Object* len) {
// General slow case.
if (len->IsNumber()) {
uint32_t length;
- if (Array::IndexFromObject(len, &length)) {
+ if (len->ToArrayIndex(&length)) {
return SetSlowElements(len);
} else {
return ArrayLengthRangeError();
@@ -6063,8 +6062,7 @@ Object* JSObject::SetFastElement(uint32_t index, Object* value) {
if (IsJSArray()) {
// Update the length of the array if needed.
uint32_t array_length = 0;
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
- &array_length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&array_length));
if (index >= array_length) {
JSArray::cast(this)->set_length(Smi::FromInt(index + 1));
}
@@ -6202,8 +6200,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
if (ShouldConvertToFastElements()) {
uint32_t new_length = 0;
if (IsJSArray()) {
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(),
- &new_length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
JSArray::cast(this)->set_length(Smi::FromInt(new_length));
} else {
new_length = NumberDictionary::cast(elements())->max_number_key() + 1;
@@ -6234,7 +6231,7 @@ Object* JSObject::SetElementWithoutInterceptor(uint32_t index, Object* value) {
Object* JSArray::JSArrayUpdateLengthFromIndex(uint32_t index, Object* value) {
uint32_t old_len = 0;
- CHECK(Array::IndexFromObject(length(), &old_len));
+ CHECK(length()->ToArrayIndex(&old_len));
// Check to see if we need to update the length. For now, we make
// sure that the length stays within 32-bits (unsigned).
if (index >= old_len && index != 0xffffffff) {
@@ -6516,7 +6513,7 @@ bool JSObject::ShouldConvertToFastElements() {
// fast elements.
uint32_t length = 0;
if (IsJSArray()) {
- CHECK(Array::IndexFromObject(JSArray::cast(this)->length(), &length));
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
} else {
length = dictionary->max_number_key();
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 8e89e8f0f4..622bc26d36 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -54,29 +54,28 @@
// - JSGlobalObject
// - JSBuiltinsObject
// - JSGlobalProxy
-// - JSValue
-// - Array
-// - ByteArray
-// - PixelArray
-// - ExternalArray
-// - ExternalByteArray
-// - ExternalUnsignedByteArray
-// - ExternalShortArray
-// - ExternalUnsignedShortArray
-// - ExternalIntArray
-// - ExternalUnsignedIntArray
-// - ExternalFloatArray
-// - FixedArray
-// - DescriptorArray
-// - HashTable
-// - Dictionary
-// - SymbolTable
-// - CompilationCacheTable
-// - CodeCacheHashTable
-// - MapCache
-// - Context
-// - GlobalContext
-// - JSFunctionResultCache
+// - JSValue
+// - ByteArray
+// - PixelArray
+// - ExternalArray
+// - ExternalByteArray
+// - ExternalUnsignedByteArray
+// - ExternalShortArray
+// - ExternalUnsignedShortArray
+// - ExternalIntArray
+// - ExternalUnsignedIntArray
+// - ExternalFloatArray
+// - FixedArray
+// - DescriptorArray
+// - HashTable
+// - Dictionary
+// - SymbolTable
+// - CompilationCacheTable
+// - CodeCacheHashTable
+// - MapCache
+// - Context
+// - GlobalContext
+// - JSFunctionResultCache
// - String
// - SeqString
// - SeqAsciiString
@@ -411,6 +410,7 @@ enum StringRepresentationTag {
kConsStringTag = 0x1,
kExternalStringTag = 0x3
};
+const uint32_t kIsConsStringMask = 0x1;
// A ConsString with an empty string as the right side is a candidate
@@ -676,6 +676,10 @@ class Object BASE_EMBEDDED {
// Return the object's prototype (might be Heap::null_value()).
Object* GetPrototype();
+ // Tries to convert an object to an array index. Returns true and sets
+ // the output parameter if it succeeds.
+ inline bool ToArrayIndex(uint32_t* index);
+
// Returns true if this is a JSValue containing a string and the index is
// < the length of the string. Used to implement [] on strings.
inline bool IsStringObjectWithCharacterAt(uint32_t index);
@@ -1026,7 +1030,7 @@ class HeapObject: public Object {
// Returns the field at offset in obj, as a read/write Object* reference.
// Does no checking, and is safe to use during GC, while maps are invalid.
- // Does not update remembered sets, so should only be assigned to
+ // Does not invoke write barrier, so should only be assigned to
// during marking GC.
static inline Object** RawField(HeapObject* obj, int offset);
@@ -1046,6 +1050,7 @@ class HeapObject: public Object {
void HeapObjectPrint();
void HeapObjectVerify();
inline void VerifyObjectField(int offset);
+ inline void VerifySmiField(int offset);
void PrintHeader(const char* id);
@@ -1150,7 +1155,7 @@ class JSObject: public HeapObject {
};
// [properties]: Backing storage for properties.
- // properties is a FixedArray in the fast case, and a Dictionary in the
+ // properties is a FixedArray in the fast case and a Dictionary in the
// slow case.
DECL_ACCESSORS(properties, FixedArray) // Get and set fast properties.
inline void initialize_properties();
@@ -1158,9 +1163,9 @@ class JSObject: public HeapObject {
inline StringDictionary* property_dictionary(); // Gets slow properties.
// [elements]: The elements (properties with names that are integers).
- // elements is a FixedArray in the fast case, and a Dictionary in the slow
- // case or a PixelArray in a special case.
- DECL_ACCESSORS(elements, Array) // Get and set fast elements.
+ // elements is a FixedArray in the fast case, a Dictionary in the slow
+ // case, and a PixelArray or ExternalArray in special cases.
+ DECL_ACCESSORS(elements, HeapObject)
inline void initialize_elements();
inline ElementsKind GetElementsKind();
inline bool HasFastElements();
@@ -1594,37 +1599,13 @@ class JSObject: public HeapObject {
};
-// Abstract super class arrays. It provides length behavior.
-class Array: public HeapObject {
+// FixedArray describes fixed-sized arrays with element type Object*.
+class FixedArray: public HeapObject {
public:
// [length]: length of the array.
inline int length();
inline void set_length(int value);
- // Convert an object to an array index.
- // Returns true if the conversion succeeded.
- static inline bool IndexFromObject(Object* object, uint32_t* index);
-
- // Layout descriptor.
- static const int kLengthOffset = HeapObject::kHeaderSize;
-
- protected:
- // No code should use the Array class directly, only its subclasses.
- // Use the kHeaderSize of the appropriate subclass, which may be aligned.
- static const int kHeaderSize = kLengthOffset + kIntSize;
- static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(Array);
-};
-
-
-// FixedArray describes fixed sized arrays where element
-// type is Object*.
-
-class FixedArray: public Array {
- public:
-
// Setter and getter for elements.
inline Object* get(int index);
// Setter that uses write barrier.
@@ -1665,7 +1646,10 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
- static const int kHeaderSize = Array::kAlignedSize;
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
// Maximal allowed size, in bytes, of a single FixedArray.
// Prevents overflowing size computations, as well as extreme memory
@@ -2364,8 +2348,12 @@ class JSFunctionResultCache: public FixedArray {
// ByteArray represents fixed sized byte arrays. Used by the outside world,
// such as PCRE, and also by the memory allocator and garbage collector to
// fill in free blocks in the heap.
-class ByteArray: public Array {
+class ByteArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// Setter and getter.
inline byte get(int index);
inline void set(int index, byte value);
@@ -2374,7 +2362,7 @@ class ByteArray: public Array {
inline int get_int(int index);
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length);
}
// We use byte arrays for free blocks in the heap. Given a desired size in
// bytes that is a multiple of the word size and big enough to hold a byte
@@ -2402,9 +2390,12 @@ class ByteArray: public Array {
void ByteArrayVerify();
#endif
- // ByteArray headers are not quadword aligned.
- static const int kHeaderSize = Array::kHeaderSize;
- static const int kAlignedSize = Array::kAlignedSize;
+ // Layout description.
+ // Length is smi tagged when it is stored.
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kHeaderSize = kLengthOffset + kPointerSize;
+
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
// Maximal memory consumption for a single ByteArray.
static const int kMaxSize = 512 * MB;
@@ -2423,8 +2414,12 @@ class ByteArray: public Array {
// multipage/the-canvas-element.html#canvaspixelarray
// In particular, write access clamps the value written to 0 or 255 if the
// value written is outside this range.
-class PixelArray: public Array {
+class PixelArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// [external_pointer]: The pointer to the external memory area backing this
// pixel array.
DECL_ACCESSORS(external_pointer, uint8_t) // Pointer to the data store.
@@ -2449,9 +2444,11 @@ class PixelArray: public Array {
static const int kMaxLength = 0x3fffffff;
// PixelArray headers are not quadword aligned.
- static const int kExternalPointerOffset = Array::kAlignedSize;
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kExternalPointerOffset =
+ POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(PixelArray);
@@ -2469,8 +2466,12 @@ class PixelArray: public Array {
// Out-of-range values passed to the setter are converted via a C
// cast, not clamping. Out-of-range indices cause exceptions to be
// raised rather than being silently ignored.
-class ExternalArray: public Array {
+class ExternalArray: public HeapObject {
public:
+ // [length]: length of the array.
+ inline int length();
+ inline void set_length(int value);
+
// [external_pointer]: The pointer to the external memory area backing this
// external array.
DECL_ACCESSORS(external_pointer, void) // Pointer to the data store.
@@ -2482,9 +2483,11 @@ class ExternalArray: public Array {
static const int kMaxLength = 0x3fffffff;
// ExternalArray headers are not quadword aligned.
- static const int kExternalPointerOffset = Array::kAlignedSize;
+ static const int kLengthOffset = HeapObject::kHeaderSize;
+ static const int kExternalPointerOffset =
+ POINTER_SIZE_ALIGN(kLengthOffset + kIntSize);
static const int kHeaderSize = kExternalPointerOffset + kPointerSize;
- static const int kAlignedSize = OBJECT_SIZE_ALIGN(kHeaderSize);
+ static const int kAlignedSize = OBJECT_POINTER_ALIGN(kHeaderSize);
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalArray);
@@ -3038,7 +3041,13 @@ class Map: public HeapObject {
kConstructorOffset + kPointerSize;
static const int kCodeCacheOffset = kInstanceDescriptorsOffset + kPointerSize;
static const int kPadStart = kCodeCacheOffset + kPointerSize;
- static const int kSize = MAP_SIZE_ALIGN(kPadStart);
+ static const int kSize = MAP_POINTER_ALIGN(kPadStart);
+
+ // Layout of pointer fields. Heap iteration code relies on them
+ // being continiously allocated.
+ static const int kPointerFieldsBeginOffset = Map::kPrototypeOffset;
+ static const int kPointerFieldsEndOffset =
+ Map::kCodeCacheOffset + kPointerSize;
// Byte offsets within kInstanceSizesOffset.
static const int kInstanceSizeOffset = kInstanceSizesOffset + 0;
@@ -3350,23 +3359,64 @@ class SharedFunctionInfo: public HeapObject {
static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
static const int kThisPropertyAssignmentsOffset =
kInferredNameOffset + kPointerSize;
- // Integer fields.
+#if V8_HOST_ARCH_32_BIT
+ // Smi fields.
static const int kLengthOffset =
kThisPropertyAssignmentsOffset + kPointerSize;
- static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
+ static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
static const int kExpectedNofPropertiesOffset =
- kFormalParameterCountOffset + kIntSize;
- static const int kNumLiteralsOffset = kExpectedNofPropertiesOffset + kIntSize;
+ kFormalParameterCountOffset + kPointerSize;
+ static const int kNumLiteralsOffset =
+ kExpectedNofPropertiesOffset + kPointerSize;
static const int kStartPositionAndTypeOffset =
+ kNumLiteralsOffset + kPointerSize;
+ static const int kEndPositionOffset =
+ kStartPositionAndTypeOffset + kPointerSize;
+ static const int kFunctionTokenPositionOffset =
+ kEndPositionOffset + kPointerSize;
+ static const int kCompilerHintsOffset =
+ kFunctionTokenPositionOffset + kPointerSize;
+ static const int kThisPropertyAssignmentsCountOffset =
+ kCompilerHintsOffset + kPointerSize;
+ // Total size.
+ static const int kSize = kThisPropertyAssignmentsCountOffset + kPointerSize;
+#else
+ // The only reason to use smi fields instead of int fields
+ // is to allow interation without maps decoding during
+ // garbage collections.
+ // To avoid wasting space on 64-bit architectures we use
+ // the following trick: we group integer fields into pairs
+ // First integer in each pair is shifted left by 1.
+ // By doing this we guarantee that LSB of each kPointerSize aligned
+ // word is not set and thus this word cannot be treated as pointer
+ // to HeapObject during old space traversal.
+ static const int kLengthOffset =
+ kThisPropertyAssignmentsOffset + kPointerSize;
+ static const int kFormalParameterCountOffset =
+ kLengthOffset + kIntSize;
+
+ static const int kExpectedNofPropertiesOffset =
+ kFormalParameterCountOffset + kIntSize;
+ static const int kNumLiteralsOffset =
+ kExpectedNofPropertiesOffset + kIntSize;
+
+ static const int kEndPositionOffset =
kNumLiteralsOffset + kIntSize;
- static const int kEndPositionOffset = kStartPositionAndTypeOffset + kIntSize;
- static const int kFunctionTokenPositionOffset = kEndPositionOffset + kIntSize;
+ static const int kStartPositionAndTypeOffset =
+ kEndPositionOffset + kIntSize;
+
+ static const int kFunctionTokenPositionOffset =
+ kStartPositionAndTypeOffset + kIntSize;
static const int kCompilerHintsOffset =
kFunctionTokenPositionOffset + kIntSize;
+
static const int kThisPropertyAssignmentsCountOffset =
kCompilerHintsOffset + kIntSize;
+
// Total size.
static const int kSize = kThisPropertyAssignmentsCountOffset + kIntSize;
+
+#endif
static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
private:
@@ -4122,8 +4172,7 @@ class String: public HeapObject {
// Layout description.
static const int kLengthOffset = HeapObject::kHeaderSize;
static const int kHashFieldOffset = kLengthOffset + kPointerSize;
- static const int kSize = kHashFieldOffset + kIntSize;
- // Notice: kSize is not pointer-size aligned if pointers are 64-bit.
+ static const int kSize = kHashFieldOffset + kPointerSize;
// Maximum number of characters to consider when trying to convert a string
// value into an array index.
@@ -4142,7 +4191,7 @@ class String: public HeapObject {
// whether a hash code has been computed. If the hash code has been
// computed the 2nd bit tells whether the string can be used as an
// array index.
- static const int kHashComputedMask = 1;
+ static const int kHashNotComputedMask = 1;
static const int kIsArrayIndexMask = 1 << 1;
static const int kNofLengthBitFields = 2;
@@ -4160,9 +4209,14 @@ class String: public HeapObject {
static const int kArrayIndexHashMask = (1 << kArrayIndexHashLengthShift) - 1;
static const int kArrayIndexValueBits =
kArrayIndexHashLengthShift - kHashShift;
+ static const int kArrayIndexValueMask =
+ ((1 << kArrayIndexValueBits) - 1) << kHashShift;
// Value of empty hash field indicating that the hash is not computed.
- static const int kEmptyHashField = 0;
+ static const int kEmptyHashField = kHashNotComputedMask;
+
+ // Value of hash field containing computed hash equal to zero.
+ static const int kZeroHash = 0;
// Maximal string length.
static const int kMaxLength = (1 << (32 - 2)) - 1;
@@ -4230,6 +4284,8 @@ class String: public HeapObject {
// mutates the ConsString and might return a failure.
Object* SlowTryFlatten(PretenureFlag pretenure);
+ static inline bool IsHashFieldComputed(uint32_t field);
+
// Slow case of String::Equals. This implementation works on any strings
// but it is most efficient on strings that are almost flat.
bool SlowEquals(String* other);
@@ -4279,7 +4335,7 @@ class SeqAsciiString: public SeqString {
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length * kCharSize);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
}
// Layout description.
@@ -4331,7 +4387,7 @@ class SeqTwoByteString: public SeqString {
// Computes the size for a TwoByteString instance of a given length.
static int SizeFor(int length) {
- return OBJECT_SIZE_ALIGN(kHeaderSize + length * kShortSize);
+ return OBJECT_POINTER_ALIGN(kHeaderSize + length * kShortSize);
}
// Layout description.
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 72fe088d8a..9b8b20675f 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -84,6 +84,12 @@ void OS::Setup() {
}
+void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
+ __asm__ __volatile__("" : : : "memory");
+ *ptr = value;
+}
+
+
uint64_t OS::CpuFeaturesImpliedByPlatform() {
return 0; // FreeBSD runs on anything.
}
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index ff1ecb13ce..7e8a5586f8 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -177,7 +177,8 @@ LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
#endif
void OS::ReleaseStore(volatile AtomicWord* ptr, AtomicWord value) {
-#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__) // don't use on a simulator
+#if defined(V8_TARGET_ARCH_ARM) && defined(__arm__)
+ // Only use on ARM hardware.
pLinuxKernelMemoryBarrier();
#else
__asm__ __volatile__("" : : : "memory");
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index ad8867ced1..105c1a8435 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -572,7 +572,8 @@ int CpuProfilesCollection::TokenToIndex(int security_token_id) {
List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
int security_token_id) {
const int index = TokenToIndex(security_token_id);
- profiles_by_token_.AddBlock(NULL, profiles_by_token_.length() - index + 1);
+ const int lists_to_add = index - profiles_by_token_.length() + 1;
+ if (lists_to_add > 0) profiles_by_token_.AddBlock(NULL, lists_to_add);
List<CpuProfile*>* unabridged_list =
profiles_by_token_[TokenToIndex(CodeEntry::kNoSecurityToken)];
const int current_count = unabridged_list->length();
@@ -580,7 +581,8 @@ List<CpuProfile*>* CpuProfilesCollection::GetProfilesList(
profiles_by_token_[index] = new List<CpuProfile*>(current_count);
}
List<CpuProfile*>* list = profiles_by_token_[index];
- list->AddBlock(NULL, current_count - list->length());
+ const int profiles_to_add = current_count - list->length();
+ if (profiles_to_add > 0) list->AddBlock(NULL, profiles_to_add);
return list;
}
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index b421ac7147..d53b13d07f 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -291,7 +291,7 @@ static Handle<Object> CreateObjectLiteralBoilerplate(
Handle<String> name(String::cast(*key));
ASSERT(!name->AsArrayIndex(&element_index));
result = SetProperty(boilerplate, name, value, NONE);
- } else if (Array::IndexFromObject(*key, &element_index)) {
+ } else if (key->ToArrayIndex(&element_index)) {
// Array index (uint32).
result = SetElement(boilerplate, element_index, value);
} else {
@@ -569,6 +569,18 @@ static void GetOwnPropertyImplementation(JSObject* obj,
}
+// Enumerator used as indices into the array returned from GetOwnProperty
+enum PropertyDescriptorIndices {
+ IS_ACCESSOR_INDEX,
+ VALUE_INDEX,
+ GETTER_INDEX,
+ SETTER_INDEX,
+ WRITABLE_INDEX,
+ ENUMERABLE_INDEX,
+ CONFIGURABLE_INDEX,
+ DESCRIPTOR_SIZE
+};
+
// Returns an array with the property description:
// if args[1] is not a property on args[0]
// returns undefined
@@ -579,18 +591,63 @@ static void GetOwnPropertyImplementation(JSObject* obj,
static Object* Runtime_GetOwnProperty(Arguments args) {
ASSERT(args.length() == 2);
HandleScope scope;
- Handle<FixedArray> elms = Factory::NewFixedArray(5);
+ Handle<FixedArray> elms = Factory::NewFixedArray(DESCRIPTOR_SIZE);
Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
LookupResult result;
CONVERT_CHECKED(JSObject, obj, args[0]);
CONVERT_CHECKED(String, name, args[1]);
+ // This could be an element.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ if (!obj->HasLocalElement(index)) {
+ return Heap::undefined_value();
+ }
+
+ // Special handling of string objects according to ECMAScript 5 15.5.5.2.
+ // Note that this might be a string object with elements other than the
+ // actual string value. This is covered by the subsequent cases.
+ if (obj->IsStringObjectWithCharacterAt(index)) {
+ JSValue* js_value = JSValue::cast(obj);
+ String* str = String::cast(js_value->value());
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, str->SubString(index, index+1));
+ elms->set(WRITABLE_INDEX, Heap::false_value());
+ elms->set(ENUMERABLE_INDEX, Heap::false_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::false_value());
+ return *desc;
+ }
+
+ // This can potentially be an element in the elements dictionary or
+ // a fast element.
+ if (obj->HasDictionaryElements()) {
+ NumberDictionary* dictionary = obj->element_dictionary();
+ int entry = dictionary->FindEntry(index);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, dictionary->ValueAt(entry));
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!details.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!details.IsReadOnly()));
+ return *desc;
+ } else {
+ // Elements that are stored as array elements always has:
+ // writable: true, configurable: true, enumerable: true.
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, obj->GetElement(index));
+ elms->set(WRITABLE_INDEX, Heap::true_value());
+ elms->set(ENUMERABLE_INDEX, Heap::true_value());
+ elms->set(CONFIGURABLE_INDEX, Heap::true_value());
+ return *desc;
+ }
+ }
+
// Use recursive implementation to also traverse hidden prototypes
GetOwnPropertyImplementation(obj, name, &result);
- if (!result.IsProperty())
+ if (!result.IsProperty()) {
return Heap::undefined_value();
-
+ }
if (result.type() == CALLBACKS) {
Object* structure = result.GetCallbackObject();
if (structure->IsProxy() || structure->IsAccessorInfo()) {
@@ -598,25 +655,25 @@ static Object* Runtime_GetOwnProperty(Arguments args) {
// an API defined callback.
Object* value = obj->GetPropertyWithCallback(
obj, structure, name, result.holder());
- elms->set(0, Heap::false_value());
- elms->set(1, value);
- elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, value);
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
} else if (structure->IsFixedArray()) {
// __defineGetter__/__defineSetter__ callback.
- elms->set(0, Heap::true_value());
- elms->set(1, FixedArray::cast(structure)->get(0));
- elms->set(2, FixedArray::cast(structure)->get(1));
+ elms->set(IS_ACCESSOR_INDEX, Heap::true_value());
+ elms->set(GETTER_INDEX, FixedArray::cast(structure)->get(0));
+ elms->set(SETTER_INDEX, FixedArray::cast(structure)->get(1));
} else {
return Heap::undefined_value();
}
} else {
- elms->set(0, Heap::false_value());
- elms->set(1, result.GetLazyValue());
- elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ elms->set(IS_ACCESSOR_INDEX, Heap::false_value());
+ elms->set(VALUE_INDEX, result.GetLazyValue());
+ elms->set(WRITABLE_INDEX, Heap::ToBoolean(!result.IsReadOnly()));
}
- elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
- elms->set(4, Heap::ToBoolean(!result.IsDontDelete()));
+ elms->set(ENUMERABLE_INDEX, Heap::ToBoolean(!result.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, Heap::ToBoolean(!result.IsDontDelete()));
return *desc;
}
@@ -1583,7 +1640,7 @@ static Object* Runtime_SetCode(Arguments args) {
static Object* CharCodeAt(String* subject, Object* index) {
uint32_t i = 0;
- if (!Array::IndexFromObject(index, &i)) return Heap::nan_value();
+ if (!index->ToArrayIndex(&i)) return Heap::nan_value();
// Flatten the string. If someone wants to get a char at an index
// in a cons string, it is likely that more indices will be
// accessed.
@@ -1599,7 +1656,7 @@ static Object* CharCodeAt(String* subject, Object* index) {
static Object* CharFromCode(Object* char_code) {
uint32_t code;
- if (Array::IndexFromObject(char_code, &code)) {
+ if (char_code->ToArrayIndex(&code)) {
if (code <= 0xffff) {
return Heap::LookupSingleCharacterStringFromCode(code);
}
@@ -2780,7 +2837,7 @@ static Object* Runtime_StringIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
- if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
RUNTIME_ASSERT(start_index <= static_cast<uint32_t>(sub->length()));
int position = Runtime::StringMatch(sub, pat, start_index);
@@ -2830,7 +2887,7 @@ static Object* Runtime_StringLastIndexOf(Arguments args) {
Object* index = args[2];
uint32_t start_index;
- if (!Array::IndexFromObject(index, &start_index)) return Smi::FromInt(-1);
+ if (!index->ToArrayIndex(&start_index)) return Smi::FromInt(-1);
uint32_t pat_length = pat->length();
uint32_t sub_length = sub->length();
@@ -3657,7 +3714,7 @@ Object* Runtime::GetObjectProperty(Handle<Object> object, Handle<Object> key) {
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
return GetElementOrCharAt(object, index);
}
@@ -3843,7 +3900,7 @@ Object* Runtime::SetObjectProperty(Handle<Object> object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -3895,7 +3952,7 @@ Object* Runtime::ForceSetObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the characters
// of a string using [] notation. We need to support this too in
// JavaScript.
@@ -3942,7 +3999,7 @@ Object* Runtime::ForceDeleteObjectProperty(Handle<JSObject> js_object,
// Check if the given key is an array index.
uint32_t index;
- if (Array::IndexFromObject(*key, &index)) {
+ if (key->ToArrayIndex(&index)) {
// In Firefox/SpiderMonkey, Safari and Opera you can access the
// characters of a string using [] notation. In the case of a
// String object we just need to redirect the deletion to the
@@ -4355,7 +4412,7 @@ static Object* Runtime_GetArgumentsProperty(Arguments args) {
// Try to convert the key to an index. If successful and within
// index return the the argument from the frame.
uint32_t index;
- if (Array::IndexFromObject(args[0], &index) && index < n) {
+ if (args[0]->ToArrayIndex(&index) && index < n) {
return frame->GetParameter(index);
}
@@ -5287,6 +5344,25 @@ static Object* Runtime_NumberToInteger(Arguments args) {
}
+static Object* Runtime_NumberToIntegerMapMinusZero(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_DOUBLE_CHECKED(number, args[0]);
+
+ // We do not include 0 so that we don't have to treat +0 / -0 cases.
+ if (number > 0 && number <= Smi::kMaxValue) {
+ return Smi::FromInt(static_cast<int>(number));
+ }
+
+ double double_value = DoubleToInteger(number);
+ // Map both -0 and +0 to +0.
+ if (double_value == 0) double_value = 0;
+
+ return Heap::NumberFromDouble(double_value);
+}
+
+
static Object* Runtime_NumberToJSUint32(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
@@ -6457,8 +6533,8 @@ static Object* Runtime_NewArgumentsFast(Arguments args) {
if (obj->IsFailure()) return obj;
AssertNoAllocation no_gc;
- reinterpret_cast<Array*>(obj)->set_map(Heap::fixed_array_map());
- FixedArray* array = FixedArray::cast(obj);
+ FixedArray* array = reinterpret_cast<FixedArray*>(obj);
+ array->set_map(Heap::fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -7747,8 +7823,8 @@ static Object* Runtime_SwapElements(Arguments args) {
Handle<Object> key2 = args.at<Object>(2);
uint32_t index1, index2;
- if (!Array::IndexFromObject(*key1, &index1)
- || !Array::IndexFromObject(*key2, &index2)) {
+ if (!key1->ToArrayIndex(&index1)
+ || !key2->ToArrayIndex(&index2)) {
return Top::ThrowIllegalOperation();
}
@@ -7779,17 +7855,19 @@ static Object* Runtime_GetArrayKeys(Arguments args) {
for (int i = 0; i < keys_length; i++) {
Object* key = keys->get(i);
uint32_t index;
- if (!Array::IndexFromObject(key, &index) || index >= length) {
+ if (!key->ToArrayIndex(&index) || index >= length) {
// Zap invalid keys.
keys->set_undefined(i);
}
}
return *Factory::NewJSArrayWithElements(keys);
} else {
+ ASSERT(array->HasFastElements());
Handle<FixedArray> single_interval = Factory::NewFixedArray(2);
// -1 means start of array.
single_interval->set(0, Smi::FromInt(-1));
- uint32_t actual_length = static_cast<uint32_t>(array->elements()->length());
+ uint32_t actual_length =
+ static_cast<uint32_t>(FixedArray::cast(array->elements())->length());
uint32_t min_length = actual_length < length ? actual_length : length;
Handle<Object> length_object =
Factory::NewNumber(static_cast<double>(min_length));
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index a7f0bf37b1..c8447cbd1f 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -102,6 +102,7 @@ namespace internal {
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
F(NumberToInteger, 1, 1) \
+ F(NumberToIntegerMapMinusZero, 1, 1) \
F(NumberToJSUint32, 1, 1) \
F(NumberToJSInt32, 1, 1) \
F(NumberToSmi, 1, 1) \
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 66894c4f1c..862d5bee5a 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -66,99 +66,183 @@ Address Page::AllocationTop() {
}
-void Page::ClearRSet() {
- // This method can be called in all rset states.
- memset(RSetStart(), 0, kRSetEndOffset - kRSetStartOffset);
-}
-
-
-// Given a 32-bit address, separate its bits into:
-// | page address | words (6) | bit offset (5) | pointer alignment (2) |
-// The address of the rset word containing the bit for this word is computed as:
-// page_address + words * 4
-// For a 64-bit address, if it is:
-// | page address | words(5) | bit offset(5) | pointer alignment (3) |
-// The address of the rset word containing the bit for this word is computed as:
-// page_address + words * 4 + kRSetOffset.
-// The rset is accessed as 32-bit words, and bit offsets in a 32-bit word,
-// even on the X64 architecture.
-
-Address Page::ComputeRSetBitPosition(Address address, int offset,
- uint32_t* bitmask) {
- ASSERT(Page::is_rset_in_use());
-
- Page* page = Page::FromAddress(address);
- uint32_t bit_offset = ArithmeticShiftRight(page->Offset(address) + offset,
- kPointerSizeLog2);
- *bitmask = 1 << (bit_offset % kBitsPerInt);
-
- Address rset_address =
- page->address() + kRSetOffset + (bit_offset / kBitsPerInt) * kIntSize;
- // The remembered set address is either in the normal remembered set range
- // of a page or else we have a large object page.
- ASSERT((page->RSetStart() <= rset_address && rset_address < page->RSetEnd())
- || page->IsLargeObjectPage());
-
- if (rset_address >= page->RSetEnd()) {
- // We have a large object page, and the remembered set address is actually
- // past the end of the object.
-
- // The first part of the remembered set is still located at the start of
- // the page, but anything after kRSetEndOffset must be relocated to after
- // the large object, i.e. after
- // (page->ObjectAreaStart() + object size)
- // We do that by adding the difference between the normal RSet's end and
- // the object's end.
- ASSERT(HeapObject::FromAddress(address)->IsFixedArray());
- int fixedarray_length =
- FixedArray::SizeFor(Memory::int_at(page->ObjectAreaStart()
- + Array::kLengthOffset));
- rset_address += kObjectStartOffset - kRSetEndOffset + fixedarray_length;
+Address Page::AllocationWatermark() {
+ PagedSpace* owner = MemoryAllocator::PageOwner(this);
+ if (this == owner->AllocationTopPage()) {
+ return owner->top();
}
- return rset_address;
+ return address() + AllocationWatermarkOffset();
}
-void Page::SetRSet(Address address, int offset) {
- uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- Memory::uint32_at(rset_address) |= bitmask;
+uint32_t Page::AllocationWatermarkOffset() {
+ return static_cast<uint32_t>((flags_ & kAllocationWatermarkOffsetMask) >>
+ kAllocationWatermarkOffsetShift);
+}
+
- ASSERT(IsRSetSet(address, offset));
+void Page::SetAllocationWatermark(Address allocation_watermark) {
+ if ((Heap::gc_state() == Heap::SCAVENGE) && IsWatermarkValid()) {
+ // When iterating intergenerational references during scavenge
+ // we might decide to promote an encountered young object.
+ // We will allocate a space for such an object and put it
+ // into the promotion queue to process it later.
+ // If space for object was allocated somewhere beyond allocation
+ // watermark this might cause garbage pointers to appear under allocation
+ // watermark. To avoid visiting them during dirty regions iteration
+ // which might be still in progress we store a valid allocation watermark
+ // value and mark this page as having an invalid watermark.
+ SetCachedAllocationWatermark(AllocationWatermark());
+ InvalidateWatermark(true);
+ }
+
+ flags_ = (flags_ & kFlagsMask) |
+ Offset(allocation_watermark) << kAllocationWatermarkOffsetShift;
+ ASSERT(AllocationWatermarkOffset()
+ == static_cast<uint32_t>(Offset(allocation_watermark)));
}
-// Clears the corresponding remembered set bit for a given address.
-void Page::UnsetRSet(Address address, int offset) {
- uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- Memory::uint32_at(rset_address) &= ~bitmask;
+void Page::SetCachedAllocationWatermark(Address allocation_watermark) {
+ mc_first_forwarded = allocation_watermark;
+}
- ASSERT(!IsRSetSet(address, offset));
+
+Address Page::CachedAllocationWatermark() {
+ return mc_first_forwarded;
+}
+
+
+uint32_t Page::GetRegionMarks() {
+ return dirty_regions_;
}
-bool Page::IsRSetSet(Address address, int offset) {
+void Page::SetRegionMarks(uint32_t marks) {
+ dirty_regions_ = marks;
+}
+
+
+int Page::GetRegionNumberForAddress(Address addr) {
+ // Each page is divided into 256 byte regions. Each region has a corresponding
+ // dirty mark bit in the page header. Region can contain intergenerational
+ // references iff its dirty mark is set.
+ // A normal 8K page contains exactly 32 regions so all region marks fit
+ // into 32-bit integer field. To calculate a region number we just divide
+ // offset inside page by region size.
+ // A large page can contain more then 32 regions. But we want to avoid
+ // additional write barrier code for distinguishing between large and normal
+ // pages so we just ignore the fact that addr points into a large page and
+ // calculate region number as if addr pointed into a normal 8K page. This way
+ // we get a region number modulo 32 so for large pages several regions might
+ // be mapped to a single dirty mark.
+ ASSERT_PAGE_ALIGNED(this->address());
+ STATIC_ASSERT((kPageAlignmentMask >> kRegionSizeLog2) < kBitsPerInt);
+
+ // We are using masking with kPageAlignmentMask instead of Page::Offset()
+ // to get an offset to the beginning of 8K page containing addr not to the
+ // beginning of actual page which can be bigger then 8K.
+ intptr_t offset_inside_normal_page = OffsetFrom(addr) & kPageAlignmentMask;
+ return static_cast<int>(offset_inside_normal_page >> kRegionSizeLog2);
+}
+
+
+uint32_t Page::GetRegionMaskForAddress(Address addr) {
+ return 1 << GetRegionNumberForAddress(addr);
+}
+
+
+void Page::MarkRegionDirty(Address address) {
+ SetRegionMarks(GetRegionMarks() | GetRegionMaskForAddress(address));
+}
+
+
+bool Page::IsRegionDirty(Address address) {
+ return GetRegionMarks() & GetRegionMaskForAddress(address);
+}
+
+
+void Page::ClearRegionMarks(Address start, Address end, bool reaches_limit) {
+ int rstart = GetRegionNumberForAddress(start);
+ int rend = GetRegionNumberForAddress(end);
+
+ if (reaches_limit) {
+ end += 1;
+ }
+
+ if ((rend - rstart) == 0) {
+ return;
+ }
+
uint32_t bitmask = 0;
- Address rset_address = ComputeRSetBitPosition(address, offset, &bitmask);
- return (Memory::uint32_at(rset_address) & bitmask) != 0;
+
+ if ((OffsetFrom(start) & kRegionAlignmentMask) == 0
+ || (start == ObjectAreaStart())) {
+ // First region is fully covered
+ bitmask = 1 << rstart;
+ }
+
+ while (++rstart < rend) {
+ bitmask |= 1 << rstart;
+ }
+
+ if (bitmask) {
+ SetRegionMarks(GetRegionMarks() & ~bitmask);
+ }
+}
+
+
+void Page::FlipMeaningOfInvalidatedWatermarkFlag() {
+ watermark_invalidated_mark_ ^= WATERMARK_INVALIDATED;
+}
+
+
+bool Page::IsWatermarkValid() {
+ return (flags_ & WATERMARK_INVALIDATED) != watermark_invalidated_mark_;
+}
+
+
+void Page::InvalidateWatermark(bool value) {
+ if (value) {
+ flags_ = (flags_ & ~WATERMARK_INVALIDATED) | watermark_invalidated_mark_;
+ } else {
+ flags_ = (flags_ & ~WATERMARK_INVALIDATED) |
+ (watermark_invalidated_mark_ ^ WATERMARK_INVALIDATED);
+ }
+
+ ASSERT(IsWatermarkValid() == !value);
}
bool Page::GetPageFlag(PageFlag flag) {
- return (flags & flag) != 0;
+ return (flags_ & flag) != 0;
}
void Page::SetPageFlag(PageFlag flag, bool value) {
if (value) {
- flags |= flag;
+ flags_ |= flag;
} else {
- flags &= ~flag;
+ flags_ &= ~flag;
}
}
+void Page::ClearPageFlags() {
+ flags_ = 0;
+}
+
+
+void Page::ClearGCFields() {
+ InvalidateWatermark(true);
+ SetAllocationWatermark(ObjectAreaStart());
+ if (Heap::gc_state() == Heap::SCAVENGE) {
+ SetCachedAllocationWatermark(ObjectAreaStart());
+ }
+ SetRegionMarks(kAllRegionsCleanMarks);
+}
+
+
bool Page::WasInUseBeforeMC() {
return GetPageFlag(WAS_IN_USE_BEFORE_MC);
}
@@ -343,14 +427,6 @@ HeapObject* LargeObjectChunk::GetObject() {
// -----------------------------------------------------------------------------
// LargeObjectSpace
-int LargeObjectSpace::ExtraRSetBytesFor(int object_size) {
- int extra_rset_bits =
- RoundUp((object_size - Page::kObjectAreaSize) / kPointerSize,
- kBitsPerInt);
- return extra_rset_bits / kBitsPerByte;
-}
-
-
Object* NewSpace::AllocateRawInternal(int size_in_bytes,
AllocationInfo* alloc_info) {
Address new_top = alloc_info->top + size_in_bytes;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 6b6d926e25..1d868e9ac7 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -41,6 +41,7 @@ namespace internal {
&& (info).top <= (space).high() \
&& (info).limit == (space).high())
+intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED;
// ----------------------------------------------------------------------------
// HeapObjectIterator
@@ -139,13 +140,6 @@ PageIterator::PageIterator(PagedSpace* space, Mode mode) : space_(space) {
// -----------------------------------------------------------------------------
-// Page
-
-#ifdef DEBUG
-Page::RSetState Page::rset_state_ = Page::IN_USE;
-#endif
-
-// -----------------------------------------------------------------------------
// CodeRange
List<CodeRange::FreeBlock> CodeRange::free_list_(0);
@@ -524,7 +518,10 @@ Page* MemoryAllocator::InitializePagesInChunk(int chunk_id, int pages_in_chunk,
for (int i = 0; i < pages_in_chunk; i++) {
Page* p = Page::FromAddress(page_addr);
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
+ p->InvalidateWatermark(true);
p->SetIsLargeObjectPage(false);
+ p->SetAllocationWatermark(p->ObjectAreaStart());
+ p->SetCachedAllocationWatermark(p->ObjectAreaStart());
page_addr += Page::kPageSize;
}
@@ -681,6 +678,7 @@ Page* MemoryAllocator::RelinkPagesInChunk(int chunk_id,
p->opaque_header = OffsetFrom(page_addr + Page::kPageSize) | chunk_id;
page_addr += Page::kPageSize;
+ p->InvalidateWatermark(true);
if (p->WasInUseBeforeMC()) {
*last_page_in_use = p;
}
@@ -744,10 +742,10 @@ bool PagedSpace::Setup(Address start, size_t size) {
accounting_stats_.ExpandSpace(num_pages * Page::kObjectAreaSize);
ASSERT(Capacity() <= max_capacity_);
- // Sequentially initialize remembered sets in the newly allocated
+ // Sequentially clear region marks in the newly allocated
// pages and cache the current last page in the space.
for (Page* p = first_page_; p->is_valid(); p = p->next_page()) {
- p->ClearRSet();
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
}
@@ -794,10 +792,10 @@ void PagedSpace::Unprotect() {
#endif
-void PagedSpace::ClearRSet() {
+void PagedSpace::MarkAllPagesClean() {
PageIterator it(this, PageIterator::ALL_PAGES);
while (it.has_next()) {
- it.next()->ClearRSet();
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
}
@@ -900,7 +898,8 @@ HeapObject* PagedSpace::SlowMCAllocateRaw(int size_in_bytes) {
// of forwarding addresses is as an offset in terms of live bytes, so we
// need quick access to the allocation top of each page to decode
// forwarding addresses.
- current_page->mc_relocation_top = mc_forwarding_info_.top;
+ current_page->SetAllocationWatermark(mc_forwarding_info_.top);
+ current_page->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&mc_forwarding_info_, current_page->next_page());
return AllocateLinearly(&mc_forwarding_info_, size_in_bytes);
}
@@ -928,10 +927,10 @@ bool PagedSpace::Expand(Page* last_page) {
MemoryAllocator::SetNextPage(last_page, p);
- // Sequentially clear remembered set of new pages and and cache the
+ // Sequentially clear region marks of new pages and and cache the
// new last page in the space.
while (p->is_valid()) {
- p->ClearRSet();
+ p->SetRegionMarks(Page::kAllRegionsCleanMarks);
last_page_ = p;
p = p->next_page();
}
@@ -1030,16 +1029,11 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
if (above_allocation_top) {
// We don't care what's above the allocation top.
} else {
- // Unless this is the last page in the space containing allocated
- // objects, the allocation top should be at a constant offset from the
- // object area end.
Address top = current_page->AllocationTop();
if (current_page == top_page) {
ASSERT(top == allocation_info_.top);
// The next page will be above the allocation top.
above_allocation_top = true;
- } else {
- ASSERT(top == PageAllocationLimit(current_page));
}
// It should be packed with objects from the bottom to the top.
@@ -1060,8 +1054,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
object->Verify();
// All the interior pointers should be contained in the heap and
- // have their remembered set bits set if required as determined
- // by the visitor.
+ // have page regions covering intergenerational references should be
+ // marked dirty.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
@@ -1120,7 +1114,7 @@ bool NewSpace::Setup(Address start, int size) {
start_ = start;
address_mask_ = ~(size - 1);
- object_mask_ = address_mask_ | kHeapObjectTag;
+ object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
allocation_info_.top = to_space_.low();
@@ -1324,7 +1318,7 @@ bool SemiSpace::Setup(Address start,
start_ = start;
address_mask_ = ~(maximum_capacity - 1);
- object_mask_ = address_mask_ | kHeapObjectTag;
+ object_mask_ = address_mask_ | kHeapObjectTagMask;
object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
age_mark_ = start_;
@@ -1634,7 +1628,7 @@ void FreeListNode::set_size(int size_in_bytes) {
// If the block is too small (eg, one or two words), to hold both a size
// field and a next pointer, we give it a filler map that gives it the
// correct size.
- if (size_in_bytes > ByteArray::kAlignedSize) {
+ if (size_in_bytes > ByteArray::kHeaderSize) {
set_map(Heap::raw_unchecked_byte_array_map());
// Can't use ByteArray::cast because it fails during deserialization.
ByteArray* this_as_byte_array = reinterpret_cast<ByteArray*>(this);
@@ -1831,7 +1825,7 @@ FixedSizeFreeList::FixedSizeFreeList(AllocationSpace owner, int object_size)
void FixedSizeFreeList::Reset() {
available_ = 0;
- head_ = NULL;
+ head_ = tail_ = NULL;
}
@@ -1843,8 +1837,13 @@ void FixedSizeFreeList::Free(Address start) {
ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
- node->set_next(head_);
- head_ = node->address();
+ node->set_next(NULL);
+ if (head_ == NULL) {
+ tail_ = head_ = node->address();
+ } else {
+ FreeListNode::FromAddress(tail_)->set_next(node->address());
+ tail_ = node->address();
+ }
available_ += object_size_;
}
@@ -1907,15 +1906,14 @@ void OldSpace::MCCommitRelocationInfo() {
Page* p = it.next();
// Space below the relocation pointer is allocated.
computed_size +=
- static_cast<int>(p->mc_relocation_top - p->ObjectAreaStart());
+ static_cast<int>(p->AllocationWatermark() - p->ObjectAreaStart());
if (it.has_next()) {
- // Free the space at the top of the page. We cannot use
- // p->mc_relocation_top after the call to Free (because Free will clear
- // remembered set bits).
+ // Free the space at the top of the page.
int extra_size =
- static_cast<int>(p->ObjectAreaEnd() - p->mc_relocation_top);
+ static_cast<int>(p->ObjectAreaEnd() - p->AllocationWatermark());
if (extra_size > 0) {
- int wasted_bytes = free_list_.Free(p->mc_relocation_top, extra_size);
+ int wasted_bytes = free_list_.Free(p->AllocationWatermark(),
+ extra_size);
// The bytes we have just "freed" to add to the free list were
// already accounted as available.
accounting_stats_.WasteBytes(wasted_bytes);
@@ -1963,7 +1961,10 @@ void PagedSpace::FreePages(Page* prev, Page* last) {
// Clean them up.
do {
- first->ClearRSet();
+ first->InvalidateWatermark(true);
+ first->SetAllocationWatermark(first->ObjectAreaStart());
+ first->SetCachedAllocationWatermark(first->ObjectAreaStart());
+ first->SetRegionMarks(Page::kAllRegionsCleanMarks);
first = first->next_page();
} while (first != NULL);
@@ -2003,6 +2004,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
// Current allocation top points to a page which is now in the middle
// of page list. We should move allocation top forward to the new last
// used page so various object iterators will continue to work properly.
+ last_in_use->SetAllocationWatermark(last_in_use->AllocationTop());
int size_in_bytes = static_cast<int>(PageAllocationLimit(last_in_use) -
last_in_use->AllocationTop());
@@ -2035,6 +2037,7 @@ void PagedSpace::PrepareForMarkCompact(bool will_compact) {
int size_in_bytes = static_cast<int>(PageAllocationLimit(p) -
p->ObjectAreaStart());
+ p->SetAllocationWatermark(p->ObjectAreaStart());
Heap::CreateFillerObjectAt(p->ObjectAreaStart(), size_in_bytes);
}
}
@@ -2066,6 +2069,7 @@ bool PagedSpace::ReserveSpace(int bytes) {
if (!reserved_page->is_valid()) return false;
}
ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+ TopPageOf(allocation_info_)->next_page()->InvalidateWatermark(true);
SetAllocationInfo(&allocation_info_,
TopPageOf(allocation_info_)->next_page());
return true;
@@ -2100,7 +2104,20 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
accounting_stats_.WasteBytes(wasted_bytes);
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
}
}
@@ -2123,6 +2140,7 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
@@ -2133,6 +2151,7 @@ void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ current_page->SetAllocationWatermark(allocation_info_.top);
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
// In the fixed space free list all the free list items have the right size.
@@ -2152,8 +2171,10 @@ void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
int size_in_bytes) {
ASSERT(current_page->next_page()->is_valid());
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
PutRestOfCurrentPageOnFreeList(current_page);
- SetAllocationInfo(&allocation_info_, current_page->next_page());
+ SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@@ -2296,160 +2317,12 @@ void OldSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- // Report remembered set statistics.
- int rset_marked_pointers = 0;
- int rset_marked_arrays = 0;
- int rset_marked_array_elements = 0;
- int cross_gen_pointers = 0;
- int cross_gen_array_elements = 0;
-
- PageIterator page_it(this, PageIterator::PAGES_IN_USE);
- while (page_it.has_next()) {
- Page* p = page_it.next();
-
- for (Address rset_addr = p->RSetStart();
- rset_addr < p->RSetEnd();
- rset_addr += kIntSize) {
- int rset = Memory::int_at(rset_addr);
- if (rset != 0) {
- // Bits were set
- int intoff =
- static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
- int bitoff = 0;
- for (; bitoff < kBitsPerInt; ++bitoff) {
- if ((rset & (1 << bitoff)) != 0) {
- int bitpos = intoff*kBitsPerByte + bitoff;
- Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
- Object** obj = reinterpret_cast<Object**>(slot);
- if (*obj == Heap::raw_unchecked_fixed_array_map()) {
- rset_marked_arrays++;
- FixedArray* fa = FixedArray::cast(HeapObject::FromAddress(slot));
-
- rset_marked_array_elements += fa->length();
- // Manually inline FixedArray::IterateBody
- Address elm_start = slot + FixedArray::kHeaderSize;
- Address elm_stop = elm_start + fa->length() * kPointerSize;
- for (Address elm_addr = elm_start;
- elm_addr < elm_stop; elm_addr += kPointerSize) {
- // Filter non-heap-object pointers
- Object** elm_p = reinterpret_cast<Object**>(elm_addr);
- if (Heap::InNewSpace(*elm_p))
- cross_gen_array_elements++;
- }
- } else {
- rset_marked_pointers++;
- if (Heap::InNewSpace(*obj))
- cross_gen_pointers++;
- }
- }
- }
- }
- }
- }
-
- pct = rset_marked_pointers == 0 ?
- 0 : cross_gen_pointers * 100 / rset_marked_pointers;
- PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
- rset_marked_pointers, cross_gen_pointers, pct);
- PrintF(" rset_marked arrays %d, ", rset_marked_arrays);
- PrintF(" elements %d, ", rset_marked_array_elements);
- pct = rset_marked_array_elements == 0 ? 0
- : cross_gen_array_elements * 100 / rset_marked_array_elements;
- PrintF(" pointers to new space %d (%%%d)\n", cross_gen_array_elements, pct);
- PrintF(" total rset-marked bits %d\n",
- (rset_marked_pointers + rset_marked_arrays));
- pct = (rset_marked_pointers + rset_marked_array_elements) == 0 ? 0
- : (cross_gen_pointers + cross_gen_array_elements) * 100 /
- (rset_marked_pointers + rset_marked_array_elements);
- PrintF(" total rset pointers %d, true cross generation ones %d (%%%d)\n",
- (rset_marked_pointers + rset_marked_array_elements),
- (cross_gen_pointers + cross_gen_array_elements),
- pct);
-
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(true);
}
-
-
-// Dump the range of remembered set words between [start, end) corresponding
-// to the pointers starting at object_p. The allocation_top is an object
-// pointer which should not be read past. This is important for large object
-// pages, where some bits in the remembered set range do not correspond to
-// allocated addresses.
-static void PrintRSetRange(Address start, Address end, Object** object_p,
- Address allocation_top) {
- Address rset_address = start;
-
- // If the range starts on on odd numbered word (eg, for large object extra
- // remembered set ranges), print some spaces.
- if ((reinterpret_cast<uintptr_t>(start) / kIntSize) % 2 == 1) {
- PrintF(" ");
- }
-
- // Loop over all the words in the range.
- while (rset_address < end) {
- uint32_t rset_word = Memory::uint32_at(rset_address);
- int bit_position = 0;
-
- // Loop over all the bits in the word.
- while (bit_position < kBitsPerInt) {
- if (object_p == reinterpret_cast<Object**>(allocation_top)) {
- // Print a bar at the allocation pointer.
- PrintF("|");
- } else if (object_p > reinterpret_cast<Object**>(allocation_top)) {
- // Do not dereference object_p past the allocation pointer.
- PrintF("#");
- } else if ((rset_word & (1 << bit_position)) == 0) {
- // Print a dot for zero bits.
- PrintF(".");
- } else if (Heap::InNewSpace(*object_p)) {
- // Print an X for one bits for pointers to new space.
- PrintF("X");
- } else {
- // Print a circle for one bits for pointers to old space.
- PrintF("o");
- }
-
- // Print a space after every 8th bit except the last.
- if (bit_position % 8 == 7 && bit_position != (kBitsPerInt - 1)) {
- PrintF(" ");
- }
-
- // Advance to next bit.
- bit_position++;
- object_p++;
- }
-
- // Print a newline after every odd numbered word, otherwise a space.
- if ((reinterpret_cast<uintptr_t>(rset_address) / kIntSize) % 2 == 1) {
- PrintF("\n");
- } else {
- PrintF(" ");
- }
-
- // Advance to next remembered set word.
- rset_address += kIntSize;
- }
-}
-
-
-void PagedSpace::DoPrintRSet(const char* space_name) {
- PageIterator it(this, PageIterator::PAGES_IN_USE);
- while (it.has_next()) {
- Page* p = it.next();
- PrintF("%s page 0x%x:\n", space_name, p);
- PrintRSetRange(p->RSetStart(), p->RSetEnd(),
- reinterpret_cast<Object**>(p->ObjectAreaStart()),
- p->AllocationTop());
- PrintF("\n");
- }
-}
-
-
-void OldSpace::PrintRSet() { DoPrintRSet("old"); }
#endif
// -----------------------------------------------------------------------------
@@ -2499,6 +2372,7 @@ void FixedSpace::MCCommitRelocationInfo() {
if (it.has_next()) {
accounting_stats_.WasteBytes(
static_cast<int>(page->ObjectAreaEnd() - page_top));
+ page->SetAllocationWatermark(page_top);
}
}
@@ -2528,7 +2402,19 @@ HeapObject* FixedSpace::SlowAllocateRaw(int size_in_bytes) {
Object* result = free_list_.Allocate();
if (!result->IsFailure()) {
accounting_stats_.AllocateBytes(size_in_bytes);
- return HeapObject::cast(result);
+ HeapObject* obj = HeapObject::cast(result);
+ Page* p = Page::FromAddress(obj->address());
+
+ if (obj->address() >= p->AllocationWatermark()) {
+ // There should be no hole between the allocation watermark
+ // and allocated object address.
+ // Memory above the allocation watermark was not swept and
+ // might contain garbage pointers to new space.
+ ASSERT(obj->address() == p->AllocationWatermark());
+ p->SetAllocationWatermark(obj->address() + size_in_bytes);
+ }
+
+ return obj;
}
}
@@ -2558,8 +2444,11 @@ HeapObject* FixedSpace::AllocateInNextPage(Page* current_page,
ASSERT(current_page->next_page()->is_valid());
ASSERT(allocation_info_.top == PageAllocationLimit(current_page));
ASSERT_EQ(object_size_in_bytes_, size_in_bytes);
+ Page* next_page = current_page->next_page();
+ next_page->ClearGCFields();
+ current_page->SetAllocationWatermark(allocation_info_.top);
accounting_stats_.WasteBytes(page_extra_);
- SetAllocationInfo(&allocation_info_, current_page->next_page());
+ SetAllocationInfo(&allocation_info_, next_page);
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
@@ -2570,51 +2459,12 @@ void FixedSpace::ReportStatistics() {
PrintF(" capacity: %d, waste: %d, available: %d, %%%d\n",
Capacity(), Waste(), Available(), pct);
- // Report remembered set statistics.
- int rset_marked_pointers = 0;
- int cross_gen_pointers = 0;
-
- PageIterator page_it(this, PageIterator::PAGES_IN_USE);
- while (page_it.has_next()) {
- Page* p = page_it.next();
-
- for (Address rset_addr = p->RSetStart();
- rset_addr < p->RSetEnd();
- rset_addr += kIntSize) {
- int rset = Memory::int_at(rset_addr);
- if (rset != 0) {
- // Bits were set
- int intoff =
- static_cast<int>(rset_addr - p->address() - Page::kRSetOffset);
- int bitoff = 0;
- for (; bitoff < kBitsPerInt; ++bitoff) {
- if ((rset & (1 << bitoff)) != 0) {
- int bitpos = intoff*kBitsPerByte + bitoff;
- Address slot = p->OffsetToAddress(bitpos << kObjectAlignmentBits);
- Object** obj = reinterpret_cast<Object**>(slot);
- rset_marked_pointers++;
- if (Heap::InNewSpace(*obj))
- cross_gen_pointers++;
- }
- }
- }
- }
- }
-
- pct = rset_marked_pointers == 0 ?
- 0 : cross_gen_pointers * 100 / rset_marked_pointers;
- PrintF(" rset-marked pointers %d, to-new-space %d (%%%d)\n",
- rset_marked_pointers, cross_gen_pointers, pct);
-
ClearHistograms();
HeapObjectIterator obj_it(this);
for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next())
CollectHistogramInfo(obj);
ReportHistogram(false);
}
-
-
-void FixedSpace::PrintRSet() { DoPrintRSet(name_); }
#endif
@@ -2793,8 +2643,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
chunk->set_size(chunk_size);
first_chunk_ = chunk;
- // Set the object address and size in the page header and clear its
- // remembered set.
+ // Initialize page header.
Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
Address object_address = page->ObjectAreaStart();
// Clear the low order bit of the second word in the page to flag it as a
@@ -2802,13 +2651,7 @@ Object* LargeObjectSpace::AllocateRawInternal(int requested_size,
// low order bit should already be clear.
ASSERT((chunk_size & 0x1) == 0);
page->SetIsLargeObjectPage(true);
- page->ClearRSet();
- int extra_bytes = requested_size - object_size;
- if (extra_bytes > 0) {
- // The extra memory for the remembered set should be cleared.
- memset(object_address + object_size, 0, extra_bytes);
- }
-
+ page->SetRegionMarks(Page::kAllRegionsCleanMarks);
return HeapObject::FromAddress(object_address);
}
@@ -2823,8 +2666,7 @@ Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
Object* LargeObjectSpace::AllocateRawFixedArray(int size_in_bytes) {
ASSERT(0 < size_in_bytes);
- int extra_rset_bytes = ExtraRSetBytesFor(size_in_bytes);
- return AllocateRawInternal(size_in_bytes + extra_rset_bytes,
+ return AllocateRawInternal(size_in_bytes,
size_in_bytes,
NOT_EXECUTABLE);
}
@@ -2851,59 +2693,61 @@ Object* LargeObjectSpace::FindObject(Address a) {
return Failure::Exception();
}
-
-void LargeObjectSpace::ClearRSet() {
- ASSERT(Page::is_rset_in_use());
-
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- // We only have code, sequential strings, or fixed arrays in large
- // object space, and only fixed arrays need remembered set support.
- if (object->IsFixedArray()) {
- // Clear the normal remembered set region of the page;
- Page* page = Page::FromAddress(object->address());
- page->ClearRSet();
-
- // Clear the extra remembered set.
- int size = object->Size();
- int extra_rset_bytes = ExtraRSetBytesFor(size);
- memset(object->address() + size, 0, extra_rset_bytes);
- }
- }
-}
-
-
-void LargeObjectSpace::IterateRSet(ObjectSlotCallback copy_object_func) {
- ASSERT(Page::is_rset_in_use());
-
- static void* lo_rset_histogram = StatsTable::CreateHistogram(
- "V8.RSetLO",
- 0,
- // Keeping this histogram's buckets the same as the paged space histogram.
- Page::kObjectAreaSize / kPointerSize,
- 30);
-
+void LargeObjectSpace::IterateDirtyRegions(ObjectSlotCallback copy_object) {
LargeObjectIterator it(this);
for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
// We only have code, sequential strings, or fixed arrays in large
// object space, and only fixed arrays can possibly contain pointers to
// the young generation.
if (object->IsFixedArray()) {
- // Iterate the normal page remembered set range.
Page* page = Page::FromAddress(object->address());
- Address object_end = object->address() + object->Size();
- int count = Heap::IterateRSetRange(page->ObjectAreaStart(),
- Min(page->ObjectAreaEnd(), object_end),
- page->RSetStart(),
- copy_object_func);
-
- // Iterate the extra array elements.
- if (object_end > page->ObjectAreaEnd()) {
- count += Heap::IterateRSetRange(page->ObjectAreaEnd(), object_end,
- object_end, copy_object_func);
- }
- if (lo_rset_histogram != NULL) {
- StatsTable::AddHistogramSample(lo_rset_histogram, count);
+ uint32_t marks = page->GetRegionMarks();
+ uint32_t newmarks = Page::kAllRegionsCleanMarks;
+
+ if (marks != Page::kAllRegionsCleanMarks) {
+ // For a large page a single dirty mark corresponds to several
+ // regions (modulo 32). So we treat a large page as a sequence of
+ // normal pages of size Page::kPageSize having same dirty marks
+ // and subsequently iterate dirty regions on each of these pages.
+ Address start = object->address();
+ Address end = page->ObjectAreaEnd();
+ Address object_end = start + object->Size();
+
+ // Iterate regions of the first normal page covering object.
+ uint32_t first_region_number = page->GetRegionNumberForAddress(start);
+ newmarks |=
+ Heap::IterateDirtyRegions(marks >> first_region_number,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object) << first_region_number;
+
+ start = end;
+ end = start + Page::kPageSize;
+ while (end <= object_end) {
+ // Iterate next 32 regions.
+ newmarks |=
+ Heap::IterateDirtyRegions(marks,
+ start,
+ end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ start = end;
+ end = start + Page::kPageSize;
+ }
+
+ if (start != object_end) {
+ // Iterate the last piece of an object which is less than
+ // Page::kPageSize.
+ newmarks |=
+ Heap::IterateDirtyRegions(marks,
+ start,
+ object_end,
+ &Heap::IteratePointersInDirtyRegion,
+ copy_object);
+ }
+
+ page->SetRegionMarks(newmarks);
}
}
}
@@ -2995,7 +2839,7 @@ void LargeObjectSpace::Verify() {
} else if (object->IsFixedArray()) {
// We loop over fixed arrays ourselves, rather then using the visitor,
// because the visitor doesn't support the start/offset iteration
- // needed for IsRSetSet.
+ // needed for IsRegionDirty.
FixedArray* array = FixedArray::cast(object);
for (int j = 0; j < array->length(); j++) {
Object* element = array->get(j);
@@ -3004,8 +2848,11 @@ void LargeObjectSpace::Verify() {
ASSERT(Heap::Contains(element_object));
ASSERT(element_object->map()->IsMap());
if (Heap::InNewSpace(element_object)) {
- ASSERT(Page::IsRSetSet(object->address(),
- FixedArray::kHeaderSize + j * kPointerSize));
+ Address array_addr = object->address();
+ Address element_addr = array_addr + FixedArray::kHeaderSize +
+ j * kPointerSize;
+
+ ASSERT(Page::FromAddress(array_addr)->IsRegionDirty(element_addr));
}
}
}
@@ -3046,33 +2893,6 @@ void LargeObjectSpace::CollectCodeStatistics() {
}
}
}
-
-
-void LargeObjectSpace::PrintRSet() {
- LargeObjectIterator it(this);
- for (HeapObject* object = it.next(); object != NULL; object = it.next()) {
- if (object->IsFixedArray()) {
- Page* page = Page::FromAddress(object->address());
-
- Address allocation_top = object->address() + object->Size();
- PrintF("large page 0x%x:\n", page);
- PrintRSetRange(page->RSetStart(), page->RSetEnd(),
- reinterpret_cast<Object**>(object->address()),
- allocation_top);
- int extra_array_bytes = object->Size() - Page::kObjectAreaSize;
- int extra_rset_bits = RoundUp(extra_array_bytes / kPointerSize,
- kBitsPerInt);
- PrintF("------------------------------------------------------------"
- "-----------\n");
- PrintRSetRange(allocation_top,
- allocation_top + extra_rset_bits / kBitsPerByte,
- reinterpret_cast<Object**>(object->address()
- + Page::kObjectAreaSize),
- allocation_top);
- PrintF("\n");
- }
- }
-}
#endif // DEBUG
} } // namespace v8::internal
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index df42d515d4..32a3e6cb0e 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -45,23 +45,46 @@ namespace internal {
// The old generation is collected by a mark-sweep-compact collector.
//
// The semispaces of the young generation are contiguous. The old and map
-// spaces consists of a list of pages. A page has a page header, a remembered
-// set area, and an object area. A page size is deliberately chosen as 8K
-// bytes. The first word of a page is an opaque page header that has the
+// spaces consists of a list of pages. A page has a page header and an object
+// area. A page size is deliberately chosen as 8K bytes.
+// The first word of a page is an opaque page header that has the
// address of the next page and its ownership information. The second word may
-// have the allocation top address of this page. The next 248 bytes are
-// remembered sets. Heap objects are aligned to the pointer size (4 bytes). A
-// remembered set bit corresponds to a pointer in the object area.
+// have the allocation top address of this page. Heap objects are aligned to the
+// pointer size.
//
// There is a separate large object space for objects larger than
// Page::kMaxHeapObjectSize, so that they do not have to move during
-// collection. The large object space is paged and uses the same remembered
-// set implementation. Pages in large object space may be larger than 8K.
+// collection. The large object space is paged. Pages in large object space
+// may be larger than 8K.
+//
+// A card marking write barrier is used to keep track of intergenerational
+// references. Old space pages are divided into regions of Page::kRegionSize
+// size. Each region has a corresponding dirty bit in the page header which is
+// set if the region might contain pointers to new space. For details about
+// dirty bits encoding see comments in the Page::GetRegionNumberForAddress()
+// method body.
+//
+// During scavenges and mark-sweep collections we iterate intergenerational
+// pointers without decoding heap object maps so if the page belongs to old
+// pointer space or large object space it is essential to guarantee that
+// the page does not contain any garbage pointers to new space: every pointer
+// aligned word which satisfies the Heap::InNewSpace() predicate must be a
+// pointer to a live heap object in new space. Thus objects in old pointer
+// and large object spaces should have a special layout (e.g. no bare integer
+// fields). This requirement does not apply to map space which is iterated in
+// a special fashion. However we still require pointer fields of dead maps to
+// be cleaned.
+//
+// To enable lazy cleaning of old space pages we use a notion of allocation
+// watermark. Every pointer under watermark is considered to be well formed.
+// Page allocation watermark is not necessarily equal to page allocation top but
+// all alive objects on page should reside under allocation watermark.
+// During scavenge allocation watermark might be bumped and invalid pointers
+// might appear below it. To avoid following them we store a valid watermark
+// into special field in the page header and set a page WATERMARK_INVALIDATED
+// flag. For details see comments in the Page::SetAllocationWatermark() method
+// body.
//
-// NOTE: The mark-compact collector rebuilds the remembered set after a
-// collection. It reuses first a few words of the remembered set for
-// bookkeeping relocation information.
-
// Some assertion macros used in the debugging mode.
@@ -91,25 +114,13 @@ class AllocationInfo;
// -----------------------------------------------------------------------------
// A page normally has 8K bytes. Large object pages may be larger. A page
-// address is always aligned to the 8K page size. A page is divided into
-// three areas: the first two words are used for bookkeeping, the next 248
-// bytes are used as remembered set, and the rest of the page is the object
-// area.
-//
-// Pointers are aligned to the pointer size (4), only 1 bit is needed
-// for a pointer in the remembered set. Given an address, its remembered set
-// bit position (offset from the start of the page) is calculated by dividing
-// its page offset by 32. Therefore, the object area in a page starts at the
-// 256th byte (8K/32). Bytes 0 to 255 do not need the remembered set, so that
-// the first two words (64 bits) in a page can be used for other purposes.
+// address is always aligned to the 8K page size.
//
-// On the 64-bit platform, we add an offset to the start of the remembered set,
-// and pointers are aligned to 8-byte pointer size. This means that we need
-// only 128 bytes for the RSet, and only get two bytes free in the RSet's RSet.
-// For this reason we add an offset to get room for the Page data at the start.
+// Each page starts with a header of Page::kPageHeaderSize size which contains
+// bookkeeping data.
//
// The mark-compact collector transforms a map pointer into a page index and a
-// page offset. The excact encoding is described in the comments for
+// page offset. The exact encoding is described in the comments for
// class MapWord in objects.h.
//
// The only way to get a page pointer is by calling factory methods:
@@ -150,18 +161,25 @@ class Page {
// Return the end of allocation in this page. Undefined for unused pages.
inline Address AllocationTop();
+ // Return the allocation watermark for the page.
+ // For old space pages it is guaranteed that the area under the watermark
+ // does not contain any garbage pointers to new space.
+ inline Address AllocationWatermark();
+
+ // Return the allocation watermark offset from the beginning of the page.
+ inline uint32_t AllocationWatermarkOffset();
+
+ inline void SetAllocationWatermark(Address allocation_watermark);
+
+ inline void SetCachedAllocationWatermark(Address allocation_watermark);
+ inline Address CachedAllocationWatermark();
+
// Returns the start address of the object area in this page.
Address ObjectAreaStart() { return address() + kObjectStartOffset; }
// Returns the end address (exclusive) of the object area in this page.
Address ObjectAreaEnd() { return address() + Page::kPageSize; }
- // Returns the start address of the remembered set area.
- Address RSetStart() { return address() + kRSetStartOffset; }
-
- // Returns the end address of the remembered set area (exclusive).
- Address RSetEnd() { return address() + kRSetEndOffset; }
-
// Checks whether an address is page aligned.
static bool IsAlignedToPageSize(Address a) {
return 0 == (OffsetFrom(a) & kPageAlignmentMask);
@@ -193,33 +211,23 @@ class Page {
}
// ---------------------------------------------------------------------
- // Remembered set support
+ // Card marking support
- // Clears remembered set in this page.
- inline void ClearRSet();
+ static const uint32_t kAllRegionsCleanMarks = 0x0;
+ static const uint32_t kAllRegionsDirtyMarks = 0xFFFFFFFF;
- // Return the address of the remembered set word corresponding to an
- // object address/offset pair, and the bit encoded as a single-bit
- // mask in the output parameter 'bitmask'.
- INLINE(static Address ComputeRSetBitPosition(Address address, int offset,
- uint32_t* bitmask));
+ inline uint32_t GetRegionMarks();
+ inline void SetRegionMarks(uint32_t dirty);
- // Sets the corresponding remembered set bit for a given address.
- INLINE(static void SetRSet(Address address, int offset));
+ inline uint32_t GetRegionMaskForAddress(Address addr);
+ inline int GetRegionNumberForAddress(Address addr);
- // Clears the corresponding remembered set bit for a given address.
- static inline void UnsetRSet(Address address, int offset);
+ inline void MarkRegionDirty(Address addr);
+ inline bool IsRegionDirty(Address addr);
- // Checks whether the remembered set bit for a given address is set.
- static inline bool IsRSetSet(Address address, int offset);
-
-#ifdef DEBUG
- // Use a state to mark whether remembered set space can be used for other
- // purposes.
- enum RSetState { IN_USE, NOT_IN_USE };
- static bool is_rset_in_use() { return rset_state_ == IN_USE; }
- static void set_rset_state(RSetState state) { rset_state_ = state; }
-#endif
+ inline void ClearRegionMarks(Address start,
+ Address end,
+ bool reaches_limit);
// Page size in bytes. This must be a multiple of the OS page size.
static const int kPageSize = 1 << kPageSizeBits;
@@ -227,25 +235,11 @@ class Page {
// Page size mask.
static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
- // The offset of the remembered set in a page, in addition to the empty bytes
- // formed as the remembered bits of the remembered set itself.
-#ifdef V8_TARGET_ARCH_X64
- static const int kRSetOffset = 4 * kPointerSize; // Room for four pointers.
-#else
- static const int kRSetOffset = 0;
-#endif
- // The end offset of the remembered set in a page
- // (heaps are aligned to pointer size).
- static const int kRSetEndOffset = kRSetOffset + kPageSize / kBitsPerPointer;
+ static const int kPageHeaderSize = kPointerSize + kPointerSize + kIntSize +
+ kIntSize + kPointerSize;
// The start offset of the object area in a page.
- // This needs to be at least (bits per uint32_t) * kBitsPerPointer,
- // to align start of rset to a uint32_t address.
- static const int kObjectStartOffset = 256;
-
- // The start offset of the used part of the remembered set in a page.
- static const int kRSetStartOffset = kRSetOffset +
- kObjectStartOffset / kBitsPerPointer;
+ static const int kObjectStartOffset = MAP_POINTER_ALIGN(kPageHeaderSize);
// Object area size in bytes.
static const int kObjectAreaSize = kPageSize - kObjectStartOffset;
@@ -253,13 +247,65 @@ class Page {
// Maximum object size that fits in a page.
static const int kMaxHeapObjectSize = kObjectAreaSize;
+ static const int kDirtyFlagOffset = 2 * kPointerSize;
+ static const int kRegionSizeLog2 = 8;
+ static const int kRegionSize = 1 << kRegionSizeLog2;
+ static const intptr_t kRegionAlignmentMask = (kRegionSize - 1);
+
+ STATIC_CHECK(kRegionSize == kPageSize / kBitsPerInt);
+
enum PageFlag {
IS_NORMAL_PAGE = 1 << 0,
- WAS_IN_USE_BEFORE_MC = 1 << 1
+ WAS_IN_USE_BEFORE_MC = 1 << 1,
+
+ // Page allocation watermark was bumped by preallocation during scavenge.
+ // Correct watermark can be retrieved by CachedAllocationWatermark() method
+ WATERMARK_INVALIDATED = 1 << 2
};
+ // To avoid an additional WATERMARK_INVALIDATED flag clearing pass during
+ // scavenge we just invalidate the watermark on each old space page after
+ // processing it. And then we flip the meaning of the WATERMARK_INVALIDATED
+ // flag at the beginning of the next scavenge and each page becomes marked as
+ // having a valid watermark.
+ //
+ // The following invariant must hold for pages in old pointer and map spaces:
+ // If page is in use then page is marked as having invalid watermark at
+ // the beginning and at the end of any GC.
+ //
+ // This invariant guarantees that after flipping flag meaning at the
+ // beginning of scavenge all pages in use will be marked as having valid
+ // watermark.
+ static inline void FlipMeaningOfInvalidatedWatermarkFlag();
+
+ // Returns true if the page allocation watermark was not altered during
+ // scavenge.
+ inline bool IsWatermarkValid();
+
+ inline void InvalidateWatermark(bool value);
+
inline bool GetPageFlag(PageFlag flag);
inline void SetPageFlag(PageFlag flag, bool value);
+ inline void ClearPageFlags();
+
+ inline void ClearGCFields();
+
+ static const int kAllocationWatermarkOffsetShift = 3;
+ static const int kAllocationWatermarkOffsetBits = kPageSizeBits + 1;
+ static const uint32_t kAllocationWatermarkOffsetMask =
+ ((1 << kAllocationWatermarkOffsetBits) - 1) <<
+ kAllocationWatermarkOffsetShift;
+
+ static const uint32_t kFlagsMask =
+ ((1 << kAllocationWatermarkOffsetShift) - 1);
+
+ STATIC_CHECK(kBitsPerInt - kAllocationWatermarkOffsetShift >=
+ kAllocationWatermarkOffsetBits);
+
+ // This field contains the meaning of the WATERMARK_INVALIDATED flag.
+ // Instead of clearing this flag from all pages we just flip
+ // its meaning at the beginning of a scavenge.
+ static intptr_t watermark_invalidated_mark_;
//---------------------------------------------------------------------------
// Page header description.
@@ -279,26 +325,24 @@ class Page {
// second word *may* (if the page start and large object chunk start are
// the same) contain the large object chunk size. In either case, the
// low-order bit for large object pages will be cleared.
- // For normal pages this word is used to store various page flags.
- int flags;
+ // For normal pages this word is used to store page flags and
+ // offset of allocation top.
+ intptr_t flags_;
- // The following fields may overlap with remembered set, they can only
- // be used in the mark-compact collector when remembered set is not
- // used.
+ // This field contains dirty marks for regions covering the page. Only dirty
+ // regions might contain intergenerational references.
+ // Only 32 dirty marks are supported so for large object pages several regions
+ // might be mapped to a single dirty mark.
+ uint32_t dirty_regions_;
// The index of the page in its owner space.
int mc_page_index;
- // The allocation pointer after relocating objects to this page.
- Address mc_relocation_top;
-
- // The forwarding address of the first live object in this page.
+ // During mark-compact collections this field contains the forwarding address
+ // of the first live object in this page.
+ // During scavenge collection this field is used to store allocation watermark
+ // if it is altered during scavenge.
Address mc_first_forwarded;
-
-#ifdef DEBUG
- private:
- static RSetState rset_state_; // state of the remembered set
-#endif
};
@@ -921,8 +965,7 @@ class PagedSpace : public Space {
// Checks whether page is currently in use by this space.
bool IsUsed(Page* page);
- // Clears remembered sets of pages in this space.
- void ClearRSet();
+ void MarkAllPagesClean();
// Prepares for a mark-compact GC.
virtual void PrepareForMarkCompact(bool will_compact);
@@ -936,6 +979,11 @@ class PagedSpace : public Space {
// The limit of allocation for a page in this space.
virtual Address PageAllocationLimit(Page* page) = 0;
+ void FlushTopPageWatermark() {
+ AllocationTopPage()->SetCachedAllocationWatermark(top());
+ AllocationTopPage()->InvalidateWatermark(true);
+ }
+
// Current capacity without growing (Size() + Available() + Waste()).
int Capacity() { return accounting_stats_.Capacity(); }
@@ -990,7 +1038,8 @@ class PagedSpace : public Space {
// Writes relocation info to the top page.
void MCWriteRelocationInfoToPage() {
- TopPageOf(mc_forwarding_info_)->mc_relocation_top = mc_forwarding_info_.top;
+ TopPageOf(mc_forwarding_info_)->
+ SetAllocationWatermark(mc_forwarding_info_.top);
}
// Computes the offset of a given address in this space to the beginning
@@ -1108,8 +1157,6 @@ class PagedSpace : public Space {
#ifdef DEBUG
// Returns the number of total pages in this space.
int CountTotalPages();
-
- void DoPrintRSet(const char* space_name);
#endif
private:
@@ -1702,6 +1749,9 @@ class FixedSizeFreeList BASE_EMBEDDED {
// The head of the free list.
Address head_;
+ // The tail of the free list.
+ Address tail_;
+
// The identity of the owning space, for building allocation Failure
// objects.
AllocationSpace owner_;
@@ -1762,8 +1812,6 @@ class OldSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
protected:
@@ -1828,9 +1876,6 @@ class FixedSpace : public PagedSpace {
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
-
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
protected:
@@ -1899,11 +1944,11 @@ class MapSpace : public FixedSpace {
PageIterator it(this, PageIterator::ALL_PAGES);
while (pages_left-- > 0) {
ASSERT(it.has_next());
- it.next()->ClearRSet();
+ it.next()->SetRegionMarks(Page::kAllRegionsCleanMarks);
}
ASSERT(it.has_next());
Page* top_page = it.next();
- top_page->ClearRSet();
+ top_page->SetRegionMarks(Page::kAllRegionsCleanMarks);
ASSERT(top_page->is_valid());
int offset = live_maps % kMapsPerPage * Map::kSize;
@@ -1994,9 +2039,8 @@ class LargeObjectChunk {
public:
// Allocates a new LargeObjectChunk that contains a large object page
// (Page::kPageSize aligned) that has at least size_in_bytes (for a large
- // object and possibly extra remembered set words) bytes after the object
- // area start of that page. The allocated chunk size is set in the output
- // parameter chunk_size.
+ // object) bytes after the object area start of that page.
+ // The allocated chunk size is set in the output parameter chunk_size.
static LargeObjectChunk* New(int size_in_bytes,
size_t* chunk_size,
Executability executable);
@@ -2019,16 +2063,12 @@ class LargeObjectChunk {
// Returns the object in this chunk.
inline HeapObject* GetObject();
- // Given a requested size (including any extra remembered set words),
- // returns the physical size of a chunk to be allocated.
+ // Given a requested size returns the physical size of a chunk to be
+ // allocated.
static int ChunkSizeFor(int size_in_bytes);
- // Given a chunk size, returns the object size it can accommodate (not
- // including any extra remembered set words). Used by
- // LargeObjectSpace::Available. Note that this can overestimate the size
- // of object that will fit in a chunk---if the object requires extra
- // remembered set words (eg, for large fixed arrays), the actual object
- // size for the chunk will be smaller than reported by this function.
+ // Given a chunk size, returns the object size it can accommodate. Used by
+ // LargeObjectSpace::Available.
static int ObjectSizeFor(int chunk_size) {
if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
@@ -2064,8 +2104,7 @@ class LargeObjectSpace : public Space {
// Allocates a large FixedArray.
Object* AllocateRawFixedArray(int size_in_bytes);
- // Available bytes for objects in this space, not including any extra
- // remembered set words.
+ // Available bytes for objects in this space.
int Available() {
return LargeObjectChunk::ObjectSizeFor(MemoryAllocator::Available());
}
@@ -2083,11 +2122,8 @@ class LargeObjectSpace : public Space {
// space, may be slow.
Object* FindObject(Address a);
- // Clears remembered sets.
- void ClearRSet();
-
- // Iterates objects whose remembered set bits are set.
- void IterateRSet(ObjectSlotCallback func);
+ // Iterates objects covered by dirty regions.
+ void IterateDirtyRegions(ObjectSlotCallback func);
// Frees unmarked objects.
void FreeUnmarkedObjects();
@@ -2114,8 +2150,6 @@ class LargeObjectSpace : public Space {
virtual void Print();
void ReportStatistics();
void CollectCodeStatistics();
- // Dump the remembered sets in the space to stdout.
- void PrintRSet();
#endif
// Checks whether an address is in the object area in this space. It
// iterates all objects in the space. May be slow.
@@ -2134,10 +2168,6 @@ class LargeObjectSpace : public Space {
int object_size,
Executability executable);
- // Returns the number of extra bytes (rounded up to the nearest full word)
- // required for extra_object_bytes of extra pointers (in bytes).
- static inline int ExtraRSetBytesFor(int extra_object_bytes);
-
friend class LargeObjectIterator;
public:
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 59a501f9ee..cc6504fef3 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -62,26 +62,21 @@ function StringValueOf() {
// ECMA-262, section 15.5.4.4
function StringCharAt(pos) {
- var char_code = %_FastCharCodeAt(this, pos);
- if (!%_IsSmi(char_code)) {
- var subject = TO_STRING_INLINE(this);
- var index = TO_INTEGER(pos);
- if (index >= subject.length || index < 0) return "";
- char_code = %StringCharCodeAt(subject, index);
+ var result = %_StringCharAt(this, pos);
+ if (%_IsSmi(result)) {
+ result = %_StringCharAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
- return %_CharFromCode(char_code);
+ return result;
}
// ECMA-262 section 15.5.4.5
function StringCharCodeAt(pos) {
- var fast_answer = %_FastCharCodeAt(this, pos);
- if (%_IsSmi(fast_answer)) {
- return fast_answer;
+ var result = %_StringCharCodeAt(this, pos);
+ if (!%_IsSmi(result)) {
+ result = %_StringCharCodeAt(TO_STRING_INLINE(this), TO_INTEGER(pos));
}
- var subject = TO_STRING_INLINE(this);
- var index = TO_INTEGER(pos);
- return %StringCharCodeAt(subject, index);
+ return result;
}
@@ -214,11 +209,7 @@ function StringMatch(regexp) {
function SubString(string, start, end) {
// Use the one character string cache.
if (start + 1 == end) {
- var char_code = %_FastCharCodeAt(string, start);
- if (!%_IsSmi(char_code)) {
- char_code = %StringCharCodeAt(string, start);
- }
- return %_CharFromCode(char_code);
+ return %_StringCharAt(string, start);
}
return %_SubString(string, start, end);
}
@@ -322,10 +313,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
var expansion = '$';
var position = next + 1;
if (position < length) {
- var peek = %_FastCharCodeAt(string, position);
- if (!%_IsSmi(peek)) {
- peek = %StringCharCodeAt(string, position);
- }
+ var peek = %_StringCharCodeAt(string, position);
if (peek == 36) { // $$
++position;
builder.add('$');
@@ -343,10 +331,7 @@ function ExpandReplacement(string, subject, matchInfo, builder) {
++position;
var n = peek - 48;
if (position < length) {
- peek = %_FastCharCodeAt(string, position);
- if (!%_IsSmi(peek)) {
- peek = %StringCharCodeAt(string, position);
- }
+ peek = %_StringCharCodeAt(string, position);
// $nn, 01 <= nn <= 99
if (n != 0 && peek == 48 || peek >= 49 && peek <= 57) {
var nn = n * 10 + (peek - 48);
@@ -824,7 +809,7 @@ function StringFromCharCode(code) {
var n = %_ArgumentsLength();
if (n == 1) {
if (!%_IsSmi(code)) code = ToNumber(code);
- return %_CharFromCode(code & 0xffff);
+ return %_StringCharFromCode(code & 0xffff);
}
// NOTE: This is not super-efficient, but it is necessary because we
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index 45aaf75c91..7db6eb41d4 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -568,9 +568,11 @@ class KeyedStoreStubCompiler: public StubCompiler {
// a builtin function on its instance prototype (the one the generator
// is set for), and a name of a generator itself (used to build ids
// and generator function names).
-#define CUSTOM_CALL_IC_GENERATORS(V) \
- V(array, push, ArrayPush) \
- V(array, pop, ArrayPop)
+#define CUSTOM_CALL_IC_GENERATORS(V) \
+ V(array, push, ArrayPush) \
+ V(array, pop, ArrayPop) \
+ V(string, charCodeAt, StringCharCodeAt) \
+ V(string, charAt, StringCharAt)
class CallStubCompiler: public StubCompiler {
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 7219d630ce..65ce2e1bc9 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -149,10 +149,10 @@ void V8::TearDown() {
Top::TearDown();
- Heap::TearDown();
-
CpuProfiler::TearDown();
+ Heap::TearDown();
+
Logger::TearDown();
is_running_ = false;
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index ed392e2ed0..1d47eb75b9 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -492,23 +492,23 @@ PropertyDescriptor.prototype.hasSetter = function() {
function GetOwnProperty(obj, p) {
var desc = new PropertyDescriptor();
- // An array with:
- // obj is a data property [false, value, Writeable, Enumerable, Configurable]
- // obj is an accessor [true, Get, Set, Enumerable, Configurable]
+ // GetOwnProperty returns an array indexed by the constants
+ // defined in macros.py.
+ // If p is not a property on obj undefined is returned.
var props = %GetOwnProperty(ToObject(obj), ToString(p));
if (IS_UNDEFINED(props)) return void 0;
// This is an accessor
- if (props[0]) {
- desc.setGet(props[1]);
- desc.setSet(props[2]);
+ if (props[IS_ACCESSOR_INDEX]) {
+ desc.setGet(props[GETTER_INDEX]);
+ desc.setSet(props[SETTER_INDEX]);
} else {
- desc.setValue(props[1]);
- desc.setWritable(props[2]);
+ desc.setValue(props[VALUE_INDEX]);
+ desc.setWritable(props[WRITABLE_INDEX]);
}
- desc.setEnumerable(props[3]);
- desc.setConfigurable(props[4]);
+ desc.setEnumerable(props[ENUMERABLE_INDEX]);
+ desc.setConfigurable(props[CONFIGURABLE_INDEX]);
return desc;
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index adeee595a3..6d8c2febcd 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,7 +34,7 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 2
-#define BUILD_NUMBER 12
+#define BUILD_NUMBER 13
#define PATCH_LEVEL 0
#define CANDIDATE_VERSION false
diff --git a/deps/v8/src/virtual-frame-light-inl.h b/deps/v8/src/virtual-frame-light-inl.h
index 17b1c50439..09a533ec3a 100644
--- a/deps/v8/src/virtual-frame-light-inl.h
+++ b/deps/v8/src/virtual-frame-light-inl.h
@@ -60,7 +60,7 @@ VirtualFrame::VirtualFrame(VirtualFrame* original)
register_allocation_map_(original->register_allocation_map_) { }
-bool VirtualFrame::Equals(VirtualFrame* other) {
+bool VirtualFrame::Equals(const VirtualFrame* other) {
ASSERT(element_count() == other->element_count());
if (top_of_stack_state_ != other->top_of_stack_state_) return false;
if (register_allocation_map_ != other->register_allocation_map_) return false;
@@ -99,7 +99,9 @@ VirtualFrame::RegisterAllocationScope::~RegisterAllocationScope() {
}
-CodeGenerator* VirtualFrame::cgen() { return CodeGeneratorScope::Current(); }
+CodeGenerator* VirtualFrame::cgen() const {
+ return CodeGeneratorScope::Current();
+}
MacroAssembler* VirtualFrame::masm() { return cgen()->masm(); }
@@ -112,15 +114,17 @@ void VirtualFrame::CallStub(CodeStub* stub, int arg_count) {
}
-int VirtualFrame::parameter_count() {
+int VirtualFrame::parameter_count() const {
return cgen()->scope()->num_parameters();
}
-int VirtualFrame::local_count() { return cgen()->scope()->num_stack_slots(); }
+int VirtualFrame::local_count() const {
+ return cgen()->scope()->num_stack_slots();
+}
-int VirtualFrame::frame_pointer() { return parameter_count() + 3; }
+int VirtualFrame::frame_pointer() const { return parameter_count() + 3; }
int VirtualFrame::context_index() { return frame_pointer() - 1; }
@@ -129,7 +133,7 @@ int VirtualFrame::context_index() { return frame_pointer() - 1; }
int VirtualFrame::function_index() { return frame_pointer() - 2; }
-int VirtualFrame::local0_index() { return frame_pointer() + 2; }
+int VirtualFrame::local0_index() const { return frame_pointer() + 2; }
int VirtualFrame::fp_relative(int index) {
@@ -139,12 +143,12 @@ int VirtualFrame::fp_relative(int index) {
}
-int VirtualFrame::expression_base_index() {
+int VirtualFrame::expression_base_index() const {
return local0_index() + local_count();
}
-int VirtualFrame::height() {
+int VirtualFrame::height() const {
return element_count() - expression_base_index();
}
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 3db4d084e2..cceaccfaa0 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -307,7 +307,7 @@ class Operand BASE_EMBEDDED {
private:
byte rex_;
- byte buf_[10];
+ byte buf_[6];
// The number of bytes in buf_.
unsigned int len_;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 8099febb7f..bd8739f152 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -308,7 +308,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// (tail-call) to the code in register edx without checking arguments.
__ movq(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movsxlq(rbx,
- FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
+ FieldOperand(rdx,
+ SharedFunctionInfo::kFormalParameterCountOffset));
__ movq(rdx, FieldOperand(rdx, SharedFunctionInfo::kCodeOffset));
__ lea(rdx, FieldOperand(rdx, Code::kHeaderSize));
__ cmpq(rax, rbx);
@@ -525,15 +526,15 @@ static void AllocateEmptyJSArray(MacroAssembler* masm,
__ lea(scratch1, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), scratch1);
- // Initialize the FixedArray and fill it with holes. FixedArray length is not
+ // Initialize the FixedArray and fill it with holes. FixedArray length is
// stored as a smi.
// result: JSObject
// scratch1: elements array
// scratch2: start of next object
- __ Move(FieldOperand(scratch1, JSObject::kMapOffset),
+ __ Move(FieldOperand(scratch1, HeapObject::kMapOffset),
Factory::fixed_array_map());
- __ movq(FieldOperand(scratch1, Array::kLengthOffset),
- Immediate(initial_capacity));
+ __ Move(FieldOperand(scratch1, FixedArray::kLengthOffset),
+ Smi::FromInt(initial_capacity));
// Fill the FixedArray with the hole value. Inline the code if short.
// Reconsider loop unfolding if kPreallocatedArrayElements gets changed.
@@ -587,7 +588,6 @@ static void AllocateJSArray(MacroAssembler* masm,
JSFunction::kPrototypeOrInitialMapOffset));
// Check whether an empty sized array is requested.
- __ SmiToInteger64(array_size, array_size);
__ testq(array_size, array_size);
__ j(not_zero, &not_empty);
@@ -605,10 +605,11 @@ static void AllocateJSArray(MacroAssembler* masm,
// Allocate the JSArray object together with space for a FixedArray with the
// requested elements.
__ bind(&not_empty);
- ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+ SmiIndex index =
+ masm->SmiToIndex(kScratchRegister, array_size, kPointerSizeLog2);
__ AllocateInNewSpace(JSArray::kSize + FixedArray::kHeaderSize,
- times_pointer_size,
- array_size,
+ index.scale,
+ index.reg,
result,
elements_array_end,
scratch,
@@ -620,43 +621,41 @@ static void AllocateJSArray(MacroAssembler* masm,
// result: JSObject
// elements_array: initial map
// elements_array_end: start of next object
- // array_size: size of array
+ // array_size: size of array (smi)
__ bind(&allocated);
__ movq(FieldOperand(result, JSObject::kMapOffset), elements_array);
__ Move(elements_array, Factory::empty_fixed_array());
__ movq(FieldOperand(result, JSArray::kPropertiesOffset), elements_array);
// Field JSArray::kElementsOffset is initialized later.
- __ Integer32ToSmi(scratch, array_size);
- __ movq(FieldOperand(result, JSArray::kLengthOffset), scratch);
+ __ movq(FieldOperand(result, JSArray::kLengthOffset), array_size);
// Calculate the location of the elements array and set elements array member
// of the JSArray.
// result: JSObject
// elements_array_end: start of next object
- // array_size: size of array
+ // array_size: size of array (smi)
__ lea(elements_array, Operand(result, JSArray::kSize));
__ movq(FieldOperand(result, JSArray::kElementsOffset), elements_array);
- // Initialize the fixed array. FixedArray length is not stored as a smi.
+ // Initialize the fixed array. FixedArray length is stored as a smi.
// result: JSObject
// elements_array: elements array
// elements_array_end: start of next object
- // array_size: size of array
- ASSERT(kSmiTag == 0);
+ // array_size: size of array (smi)
__ Move(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
- __ testq(array_size, array_size);
+ __ SmiTest(array_size);
__ j(not_zero, &not_empty_2);
// Length of the FixedArray is the number of pre-allocated elements even
// though the actual JSArray has length 0.
- __ movq(FieldOperand(elements_array, Array::kLengthOffset),
- Immediate(kPreallocatedArrayElements));
+ __ Move(FieldOperand(elements_array, FixedArray::kLengthOffset),
+ Smi::FromInt(kPreallocatedArrayElements));
__ jmp(&fill_array);
__ bind(&not_empty_2);
// For non-empty JSArrays the length of the FixedArray and the JSArray is the
// same.
- __ movq(FieldOperand(elements_array, Array::kLengthOffset), array_size);
+ __ movq(FieldOperand(elements_array, FixedArray::kLengthOffset), array_size);
// Fill the allocated FixedArray with the hole value if requested.
// result: JSObject
@@ -1039,8 +1038,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// rdx: number of elements
// rax: start of next object
__ LoadRoot(rcx, Heap::kFixedArrayMapRootIndex);
- __ movq(Operand(rdi, JSObject::kMapOffset), rcx); // setup the map
- __ movl(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
+ __ movq(Operand(rdi, HeapObject::kMapOffset), rcx); // setup the map
+ __ Integer32ToSmi(rdx, rdx);
+ __ movq(Operand(rdi, FixedArray::kLengthOffset), rdx); // and length
// Initialize the fields to undefined.
// rbx: JSObject
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 767c33fe6d..182e94b2e2 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -43,12 +43,12 @@
namespace v8 {
namespace internal {
-#define __ ACCESS_MASM(masm_)
+#define __ ACCESS_MASM(masm)
// -------------------------------------------------------------------------
-// Platform-specific DeferredCode functions.
+// Platform-specific FrameRegisterState functions.
-void DeferredCode::SaveRegisters() {
+void FrameRegisterState::Save(MacroAssembler* masm) const {
for (int i = 0; i < RegisterAllocator::kNumRegisters; i++) {
int action = registers_[i];
if (action == kPush) {
@@ -60,7 +60,7 @@ void DeferredCode::SaveRegisters() {
}
-void DeferredCode::RestoreRegisters() {
+void FrameRegisterState::Restore(MacroAssembler* masm) const {
// Restore registers in reverse order due to the stack.
for (int i = RegisterAllocator::kNumRegisters - 1; i >= 0; i--) {
int action = registers_[i];
@@ -74,6 +74,45 @@ void DeferredCode::RestoreRegisters() {
}
+#undef __
+#define __ ACCESS_MASM(masm_)
+
+// -------------------------------------------------------------------------
+// Platform-specific DeferredCode functions.
+
+void DeferredCode::SaveRegisters() {
+ frame_state_.Save(masm_);
+}
+
+
+void DeferredCode::RestoreRegisters() {
+ frame_state_.Restore(masm_);
+}
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void VirtualFrameRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ frame_state_->Save(masm);
+}
+
+
+void VirtualFrameRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ frame_state_->Restore(masm);
+}
+
+
+void ICRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+ masm->EnterInternalFrame();
+}
+
+
+void ICRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+ masm->LeaveInternalFrame();
+}
+
+
// -------------------------------------------------------------------------
// CodeGenState implementation.
@@ -852,10 +891,11 @@ void CodeGenerator::CallApplyLazy(Expression* applicand,
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ SmiToInteger32(rax, rax);
- __ movq(rcx, rax);
- __ cmpq(rax, Immediate(kArgumentsLimit));
+ __ SmiToInteger32(rax,
+ Operand(rdx,
+ ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ movl(rcx, rax);
+ __ cmpl(rax, Immediate(kArgumentsLimit));
__ j(above, &build_args);
// Loop through the arguments pushing them onto the execution
@@ -1890,8 +1930,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 3
frame_->EmitPush(rdx); // <- slot 2
- __ movl(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
entry.Jump();
@@ -1902,8 +1941,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(rax); // <- slot 2
// Push the length of the array and the initial index onto the stack.
- __ movl(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
+ __ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
frame_->EmitPush(rax); // <- slot 1
frame_->EmitPush(Smi::FromInt(0)); // <- slot 0
@@ -3970,23 +4008,67 @@ void CodeGenerator::GenerateArgumentsLength(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateFastCharCodeAt");
+class DeferredStringCharCodeAt : public DeferredCode {
+ public:
+ DeferredStringCharCodeAt(Register object,
+ Register index,
+ Register scratch,
+ Register result)
+ : result_(result),
+ char_code_at_generator_(object,
+ index,
+ scratch,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharCodeAtGenerator* fast_case_generator() {
+ return &char_code_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_code_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result_, Heap::kNanValueRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharCodeAtGenerator char_code_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charCodeAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharCodeAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharCodeAt");
ASSERT(args->length() == 2);
Load(args->at(0));
Load(args->at(1));
Result index = frame_->Pop();
Result object = frame_->Pop();
-
- // We will mutate the index register and possibly the object register.
- // The case where they are somehow the same register is handled
- // because we only mutate them in the case where the receiver is a
- // heap object and the index is not.
object.ToRegister();
index.ToRegister();
+ // We might mutate the object register.
frame_->Spill(object.reg());
- frame_->Spill(index.reg());
// We need two extra registers.
Result result = allocator()->Allocate();
@@ -3994,33 +4076,40 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
Result scratch = allocator()->Allocate();
ASSERT(scratch.is_valid());
- // There is no virtual frame effect from here up to the final result
- // push.
- Label slow_case;
- Label exit;
- StringHelper::GenerateFastCharCodeAt(masm_,
- object.reg(),
- index.reg(),
- scratch.reg(),
- result.reg(),
- &slow_case,
- &slow_case,
- &slow_case,
- &slow_case);
- __ jmp(&exit);
-
- __ bind(&slow_case);
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(result.reg(), Heap::kUndefinedValueRootIndex);
-
- __ bind(&exit);
+ DeferredStringCharCodeAt* deferred =
+ new DeferredStringCharCodeAt(object.reg(),
+ index.reg(),
+ scratch.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
-void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
- Comment(masm_, "[ GenerateCharFromCode");
+class DeferredStringCharFromCode : public DeferredCode {
+ public:
+ DeferredStringCharFromCode(Register code,
+ Register result)
+ : char_from_code_generator_(code, result) {}
+
+ StringCharFromCodeGenerator* fast_case_generator() {
+ return &char_from_code_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_from_code_generator_.GenerateSlow(masm(), call_helper);
+ }
+
+ private:
+ StringCharFromCodeGenerator char_from_code_generator_;
+};
+
+
+// Generates code for creating a one-char string from a char code.
+void CodeGenerator::GenerateStringCharFromCode(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharFromCode");
ASSERT(args->length() == 1);
Load(args->at(0));
@@ -4029,19 +4118,97 @@ void CodeGenerator::GenerateCharFromCode(ZoneList<Expression*>* args) {
code.ToRegister();
ASSERT(code.is_valid());
- // StringHelper::GenerateCharFromCode may do a runtime call.
- frame_->SpillAll();
-
Result result = allocator()->Allocate();
ASSERT(result.is_valid());
- Result scratch = allocator()->Allocate();
- ASSERT(scratch.is_valid());
- StringHelper::GenerateCharFromCode(masm_,
- code.reg(),
- result.reg(),
- scratch.reg(),
- CALL_FUNCTION);
+ DeferredStringCharFromCode* deferred = new DeferredStringCharFromCode(
+ code.reg(), result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
+ frame_->Push(&result);
+}
+
+
+class DeferredStringCharAt : public DeferredCode {
+ public:
+ DeferredStringCharAt(Register object,
+ Register index,
+ Register scratch1,
+ Register scratch2,
+ Register result)
+ : result_(result),
+ char_at_generator_(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion_,
+ &need_conversion_,
+ &index_out_of_range_,
+ STRING_INDEX_IS_NUMBER) {}
+
+ StringCharAtGenerator* fast_case_generator() {
+ return &char_at_generator_;
+ }
+
+ virtual void Generate() {
+ VirtualFrameRuntimeCallHelper call_helper(frame_state());
+ char_at_generator_.GenerateSlow(masm(), call_helper);
+
+ __ bind(&need_conversion_);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result_, Smi::FromInt(0));
+ __ jmp(exit_label());
+
+ __ bind(&index_out_of_range_);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result_, Heap::kEmptyStringRootIndex);
+ __ jmp(exit_label());
+ }
+
+ private:
+ Register result_;
+
+ Label need_conversion_;
+ Label index_out_of_range_;
+
+ StringCharAtGenerator char_at_generator_;
+};
+
+
+// This generates code that performs a String.prototype.charAt() call
+// or returns a smi in order to trigger conversion.
+void CodeGenerator::GenerateStringCharAt(ZoneList<Expression*>* args) {
+ Comment(masm_, "[ GenerateStringCharAt");
+ ASSERT(args->length() == 2);
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Result index = frame_->Pop();
+ Result object = frame_->Pop();
+ object.ToRegister();
+ index.ToRegister();
+ // We might mutate the object register.
+ frame_->Spill(object.reg());
+
+ // We need three extra registers.
+ Result result = allocator()->Allocate();
+ ASSERT(result.is_valid());
+ Result scratch1 = allocator()->Allocate();
+ ASSERT(scratch1.is_valid());
+ Result scratch2 = allocator()->Allocate();
+ ASSERT(scratch2.is_valid());
+
+ DeferredStringCharAt* deferred =
+ new DeferredStringCharAt(object.reg(),
+ index.reg(),
+ scratch1.reg(),
+ scratch2.reg(),
+ result.reg());
+ deferred->fast_case_generator()->GenerateFast(masm_);
+ deferred->BindExit();
frame_->Push(&result);
}
@@ -4467,7 +4634,8 @@ void CodeGenerator::GenerateRegExpConstructResult(ZoneList<Expression*>* args) {
__ Move(FieldOperand(rcx, HeapObject::kMapOffset),
Factory::fixed_array_map());
// Set length.
- __ movl(FieldOperand(rcx, FixedArray::kLengthOffset), rbx);
+ __ Integer32ToSmi(rdx, rbx);
+ __ movq(FieldOperand(rcx, FixedArray::kLengthOffset), rdx);
// Fill contents of fixed-array with the-hole.
__ Move(rdx, Factory::the_hole_value());
__ lea(rcx, FieldOperand(rcx, FixedArray::kHeaderSize));
@@ -4507,7 +4675,7 @@ class DeferredSearchCache: public DeferredCode {
virtual void Generate();
private:
- Register dst_; // on invocation index of finger (as Smi), on exit
+ Register dst_; // on invocation index of finger (as int32), on exit
// holds value being looked up.
Register cache_; // instance of JSFunctionResultCache.
Register key_; // key being looked up.
@@ -4531,11 +4699,10 @@ void DeferredSearchCache::Generate() {
Immediate kEntriesIndexImm = Immediate(JSFunctionResultCache::kEntriesIndex);
Immediate kEntrySizeImm = Immediate(JSFunctionResultCache::kEntrySize);
- __ SmiToInteger32(dst_, dst_);
// Check the cache from finger to start of the cache.
__ bind(&first_loop);
- __ subq(dst_, kEntrySizeImm);
- __ cmpq(dst_, kEntriesIndexImm);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, kEntriesIndexImm);
__ j(less, &search_further);
__ cmpq(ArrayElement(cache_, dst_), key_);
@@ -4549,14 +4716,15 @@ void DeferredSearchCache::Generate() {
__ bind(&search_further);
// Check the cache from end of cache up to finger.
- __ movq(dst_, FieldOperand(cache_, JSFunctionResultCache::kCacheSizeOffset));
- __ movq(scratch_, FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
- __ SmiToInteger32(dst_, dst_);
- __ SmiToInteger32(scratch_, scratch_);
+ __ SmiToInteger32(dst_,
+ FieldOperand(cache_,
+ JSFunctionResultCache::kCacheSizeOffset));
+ __ SmiToInteger32(scratch_,
+ FieldOperand(cache_, JSFunctionResultCache::kFingerOffset));
__ bind(&second_loop);
- __ subq(dst_, kEntrySizeImm);
- __ cmpq(dst_, scratch_);
+ __ subl(dst_, kEntrySizeImm);
+ __ cmpl(dst_, scratch_);
__ j(less_equal, &cache_miss);
__ cmpq(ArrayElement(cache_, dst_), key_);
@@ -4586,29 +4754,28 @@ void DeferredSearchCache::Generate() {
// cache miss this optimization would hardly matter much.
// Check if we could add new entry to cache.
- __ movl(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ movq(rbx, FieldOperand(rcx, FixedArray::kLengthOffset));
__ movq(r9, FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset));
- __ SmiToInteger32(r9, r9);
- __ cmpq(rbx, r9);
+ __ SmiCompare(rbx, r9);
__ j(greater, &add_new_entry);
// Check if we could evict entry after finger.
__ movq(rdx, FieldOperand(rcx, JSFunctionResultCache::kFingerOffset));
__ SmiToInteger32(rdx, rdx);
+ __ SmiToInteger32(rbx, rbx);
__ addq(rdx, kEntrySizeImm);
Label forward;
__ cmpq(rbx, rdx);
__ j(greater, &forward);
// Need to wrap over the cache.
- __ movq(rdx, kEntriesIndexImm);
+ __ movl(rdx, kEntriesIndexImm);
__ bind(&forward);
__ Integer32ToSmi(r9, rdx);
__ jmp(&update_cache);
__ bind(&add_new_entry);
- // r9 holds cache size as int.
- __ movq(rdx, r9);
- __ Integer32ToSmi(r9, r9);
+ // r9 holds cache size as smi.
+ __ SmiToInteger32(rdx, r9);
__ SmiAddConstant(rbx, r9, Smi::FromInt(JSFunctionResultCache::kEntrySize));
__ movq(FieldOperand(rcx, JSFunctionResultCache::kCacheSizeOffset), rbx);
@@ -4680,16 +4847,13 @@ void CodeGenerator::GenerateGetFromCache(ZoneList<Expression*>* args) {
const int kFingerOffset =
FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
// tmp.reg() now holds finger offset as a smi.
- __ movq(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
- SmiIndex index =
- masm()->SmiToIndex(kScratchRegister, tmp.reg(), kPointerSizeLog2);
+ __ SmiToInteger32(tmp.reg(), FieldOperand(cache.reg(), kFingerOffset));
__ cmpq(key.reg(), FieldOperand(cache.reg(),
- index.reg, index.scale,
+ tmp.reg(), times_pointer_size,
FixedArray::kHeaderSize));
- // Do NOT alter index.reg or tmp.reg() before cmpq below.
deferred->Branch(not_equal);
__ movq(tmp.reg(), FieldOperand(cache.reg(),
- index.reg, index.scale,
+ tmp.reg(), times_pointer_size,
FixedArray::kHeaderSize + kPointerSize));
deferred->BindExit();
@@ -7279,16 +7443,11 @@ Result CodeGenerator::EmitKeyedLoad() {
Result elements = allocator()->Allocate();
ASSERT(elements.is_valid());
-
Result key = frame_->Pop();
Result receiver = frame_->Pop();
key.ToRegister();
receiver.ToRegister();
- // Use a fresh temporary for the index
- Result index = allocator()->Allocate();
- ASSERT(index.is_valid());
-
DeferredReferenceGetKeyedValue* deferred =
new DeferredReferenceGetKeyedValue(elements.reg(),
receiver.reg(),
@@ -7321,15 +7480,11 @@ Result CodeGenerator::EmitKeyedLoad() {
Factory::fixed_array_map());
deferred->Branch(not_equal);
- // Shift the key to get the actual index value and check that
- // it is within bounds.
- __ SmiToInteger32(index.reg(), key.reg());
- __ cmpl(index.reg(),
- FieldOperand(elements.reg(), FixedArray::kLengthOffset));
+ // Check that key is within bounds.
+ __ SmiCompare(key.reg(),
+ FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
- // The index register holds the un-smi-tagged key. It has been
- // zero-extended to 64-bits, so it can be used directly as index in the
- // operand below.
+
// Load and check that the result is not the hole. We could
// reuse the index or elements register for the value.
//
@@ -7337,14 +7492,14 @@ Result CodeGenerator::EmitKeyedLoad() {
// heuristic about which register to reuse. For example, if
// one is rax, the we can reuse that one because the value
// coming from the deferred code will be in rax.
+ SmiIndex index =
+ masm_->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
__ movq(elements.reg(),
- Operand(elements.reg(),
- index.reg(),
- times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ FieldOperand(elements.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
result = elements;
- elements.Unuse();
- index.Unuse();
__ CompareRoot(result.reg(), Heap::kTheHoleValueRootIndex);
deferred->Branch(equal);
__ IncrementCounter(&Counters::keyed_load_inline, 1);
@@ -7566,7 +7721,7 @@ void Reference::SetValue(InitState init_state) {
// Check whether it is possible to omit the write barrier. If the
// elements array is in new space or the value written is a smi we can
- // safely update the elements array without updating the remembered set.
+ // safely update the elements array without write barrier.
Label in_new_space;
__ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
if (!value_is_constant) {
@@ -7591,10 +7746,10 @@ void Reference::SetValue(InitState init_state) {
// Store the value.
SmiIndex index =
masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
- __ movq(Operand(tmp.reg(),
- index.reg,
- index.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(tmp.reg(),
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize),
value.reg());
__ IncrementCounter(&Counters::keyed_store_inline, 1);
@@ -7674,7 +7829,7 @@ void FastNewContextStub::Generate(MacroAssembler* masm) {
// Setup the object header.
__ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
__ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
- __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
+ __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length));
// Setup the fixed slots.
__ xor_(rbx, rbx); // Set to NULL.
@@ -8349,7 +8504,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check that the last match info has space for the capture registers and the
// additional information. Ensure no overflow in add.
ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
- __ movl(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ movq(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiToInteger32(rax, rax);
__ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
__ cmpl(rdx, rax);
__ j(greater, &runtime);
@@ -8485,14 +8641,13 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 3: Start of string data
Label setup_two_byte, setup_rest;
__ testb(rdi, rdi);
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
__ j(zero, &setup_two_byte);
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ lea(arg4, FieldOperand(rax, rdi, times_1, SeqAsciiString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest);
__ bind(&setup_two_byte);
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ lea(arg4, FieldOperand(rax, rdi, times_2, SeqTwoByteString::kHeaderSize));
__ lea(arg3, FieldOperand(rax, rbx, times_2, SeqTwoByteString::kHeaderSize));
@@ -8512,12 +8667,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Check the result.
Label success;
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::SUCCESS));
__ j(equal, &success);
Label failure;
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::FAILURE));
__ j(equal, &failure);
- __ cmpq(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
+ __ cmpl(rax, Immediate(NativeRegExpMacroAssembler::EXCEPTION));
// If not exception it can only be retry. Handle that in the runtime system.
__ j(not_equal, &runtime);
// Result must now be exception. If there is no pending exception already a
@@ -8627,9 +8782,10 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
// Make the hash mask from the length of the number string cache. It
// contains two elements (number and string) for each cache entry.
- __ movl(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
- __ shrl(mask, Immediate(1)); // Divide length by two (length is not a smi).
- __ subl(mask, Immediate(1)); // Make mask.
+ __ movq(mask, FieldOperand(number_string_cache, FixedArray::kLengthOffset));
+ // Divide smi tagged length by two.
+ __ PositiveSmiDivPowerOfTwoToInteger32(mask, mask, 1);
+ __ subq(mask, Immediate(1)); // Make mask.
// Calculate the entry in the number string cache. The hash value in the
// number string cache for smis is just the smi value, and the hash for
@@ -9149,7 +9305,6 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Get the parameters pointer from the stack and untag the length.
__ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ SmiToInteger32(rcx, rcx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -9157,7 +9312,8 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
__ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi);
__ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex);
__ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister);
- __ movl(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx);
+ __ SmiToInteger32(rcx, rcx); // Untag length for the loop below.
// Copy the fixed array slots.
Label loop;
@@ -10671,143 +10827,190 @@ const char* CompareStub::GetName() {
}
-void StringHelper::GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case) {
- Label not_a_flat_string;
- Label try_again_with_new_string;
+// -------------------------------------------------------------------------
+// StringCharCodeAtGenerator
+
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+ Label flat_string;
Label ascii_string;
Label got_char_code;
// If the receiver is a smi trigger the non-string case.
- __ JumpIfSmi(object, receiver_not_string);
+ __ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
- __ testb(result, Immediate(kIsNotStringMask));
- __ j(not_zero, receiver_not_string);
+ __ testb(result_, Immediate(kIsNotStringMask));
+ __ j(not_zero, receiver_not_string_);
// If the index is non-smi trigger the non-smi case.
- __ JumpIfNotSmi(index, index_not_smi);
+ __ JumpIfNotSmi(index_, &index_not_smi_);
- // Check for index out of range.
- __ SmiCompare(index, FieldOperand(object, String::kLengthOffset));
- __ j(above_equal, index_out_of_range);
+ // Put smi-tagged index into scratch register.
+ __ movq(scratch_, index_);
+ __ bind(&got_smi_index_);
- __ bind(&try_again_with_new_string);
- // ----------- S t a t e -------------
- // -- object : string to access
- // -- result : instance type of the string
- // -- scratch : non-negative index < length
- // -----------------------------------
+ // Check for index out of range.
+ __ SmiCompare(scratch_, FieldOperand(object_, String::kLengthOffset));
+ __ j(above_equal, index_out_of_range_);
// We need special handling for non-flat strings.
- ASSERT_EQ(0, kSeqStringTag);
- __ testb(result, Immediate(kStringRepresentationMask));
- __ j(not_zero, &not_a_flat_string);
+ ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(zero, &flat_string);
- // Put untagged index into scratch register.
- __ SmiToInteger32(scratch, index);
+ // Handle non-flat strings.
+ __ testb(result_, Immediate(kIsConsStringMask));
+ __ j(zero, &call_runtime_);
+
+ // ConsString.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ CompareRoot(FieldOperand(object_, ConsString::kSecondOffset),
+ Heap::kEmptyStringRootIndex);
+ __ j(not_equal, &call_runtime_);
+ // Get the first of the two strings and load its instance type.
+ __ movq(object_, FieldOperand(object_, ConsString::kFirstOffset));
+ __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
+ __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+ // If the first cons component is also non-flat, then go to runtime.
+ ASSERT(kSeqStringTag == 0);
+ __ testb(result_, Immediate(kStringRepresentationMask));
+ __ j(not_zero, &call_runtime_);
// Check for 1-byte or 2-byte string.
- ASSERT_EQ(0, kTwoByteStringTag);
- __ testb(result, Immediate(kStringEncodingMask));
+ __ bind(&flat_string);
+ ASSERT(kAsciiStringTag != 0);
+ __ testb(result_, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii_string);
// 2-byte string.
// Load the 2-byte character code into the result register.
- __ movzxwl(result, FieldOperand(object,
- scratch,
- times_2,
- SeqTwoByteString::kHeaderSize));
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxwl(result_, FieldOperand(object_,
+ scratch_, times_2,
+ SeqTwoByteString::kHeaderSize));
__ jmp(&got_char_code);
- // Handle non-flat strings.
- __ bind(&not_a_flat_string);
- __ and_(result, Immediate(kStringRepresentationMask));
- __ cmpb(result, Immediate(kConsStringTag));
- __ j(not_equal, slow_case);
-
- // ConsString.
- // Check that the right hand side is the empty string (ie if this is really a
- // flat string in a cons string). If that is not the case we would rather go
- // to the runtime system now, to flatten the string.
- __ movq(result, FieldOperand(object, ConsString::kSecondOffset));
- __ CompareRoot(result, Heap::kEmptyStringRootIndex);
- __ j(not_equal, slow_case);
- // Get the first of the two strings and load its instance type.
- __ movq(object, FieldOperand(object, ConsString::kFirstOffset));
- __ movq(result, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
- __ jmp(&try_again_with_new_string);
-
// ASCII string.
- __ bind(&ascii_string);
// Load the byte into the result register.
- __ movzxbl(result, FieldOperand(object,
- scratch,
- times_1,
- SeqAsciiString::kHeaderSize));
+ __ bind(&ascii_string);
+ __ SmiToInteger32(scratch_, scratch_);
+ __ movzxbl(result_, FieldOperand(object_,
+ scratch_, times_1,
+ SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- __ Integer32ToSmi(result, result);
+ __ Integer32ToSmi(result_, result_);
+ __ bind(&exit_);
}
-void StringHelper::GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- Register scratch,
- InvokeFlag flag) {
- ASSERT(!code.is(result));
+void StringCharCodeAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharCodeAt slow case");
- Label slow_case;
- Label exit;
+ // Index is not a smi.
+ __ bind(&index_not_smi_);
+ // If index is a heap number, try converting it to an integer.
+ __ CheckMap(index_, Factory::heap_number_map(), index_not_number_, true);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ push(result_);
+ __ push(index_); // Consumed by runtime conversion function.
+ if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+ __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+ } else {
+ ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+ // NumberToSmi discards numbers that are not exact integers.
+ __ CallRuntime(Runtime::kNumberToSmi, 1);
+ }
+ if (!scratch_.is(rax)) {
+ // Save the conversion result before the pop instructions below
+ // have a chance to overwrite it.
+ __ movq(scratch_, rax);
+ }
+ __ pop(result_);
+ __ pop(index_);
+ __ pop(object_);
+ call_helper.AfterCall(masm);
+ // If index is still not a smi, it must be out of range.
+ __ JumpIfNotSmi(scratch_, index_out_of_range_);
+ // Otherwise, return to the fast path.
+ __ jmp(&got_smi_index_);
+
+ // Call runtime. We get here when the receiver is a string and the
+ // index is a number, but the code of getting the actual character
+ // is too complex (e.g., when the string needs to be flattened).
+ __ bind(&call_runtime_);
+ call_helper.BeforeCall(masm);
+ __ push(object_);
+ __ push(index_);
+ __ CallRuntime(Runtime::kStringCharCodeAt, 2);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
+ }
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharCodeAt slow case");
+}
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
// Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(code, &slow_case);
- __ SmiToInteger32(scratch, code);
- __ cmpl(scratch, Immediate(String::kMaxAsciiCharCode));
- __ j(above, &slow_case);
-
- __ Move(result, Factory::single_character_string_cache());
- __ movq(result, FieldOperand(result,
- scratch,
- times_pointer_size,
- FixedArray::kHeaderSize));
-
- __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
- __ jmp(&exit);
+ __ JumpIfNotSmi(code_, &slow_case_);
+ __ SmiCompare(code_, Smi::FromInt(String::kMaxAsciiCharCode));
+ __ j(above, &slow_case_);
- __ bind(&slow_case);
- if (flag == CALL_FUNCTION) {
- __ push(code);
- __ CallRuntime(Runtime::kCharFromCode, 1);
- if (!result.is(rax)) {
- __ movq(result, rax);
- }
- } else {
- ASSERT(flag == JUMP_FUNCTION);
- ASSERT(result.is(rax));
- __ pop(rax); // Save return address.
- __ push(code);
- __ push(rax); // Restore return address.
- __ TailCallRuntime(Runtime::kCharFromCode, 1, 1);
- }
+ __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+ SmiIndex index = masm->SmiToIndex(kScratchRegister, code_, kPointerSizeLog2);
+ __ movq(result_, FieldOperand(result_, index.reg, index.scale,
+ FixedArray::kHeaderSize));
+ __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+ __ j(equal, &slow_case_);
+ __ bind(&exit_);
+}
- __ bind(&exit);
- if (flag == JUMP_FUNCTION) {
- ASSERT(result.is(rax));
- __ ret(0);
+
+void StringCharFromCodeGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ __ Abort("Unexpected fallthrough to CharFromCode slow case");
+
+ __ bind(&slow_case_);
+ call_helper.BeforeCall(masm);
+ __ push(code_);
+ __ CallRuntime(Runtime::kCharFromCode, 1);
+ if (!result_.is(rax)) {
+ __ movq(result_, rax);
}
+ call_helper.AfterCall(masm);
+ __ jmp(&exit_);
+
+ __ Abort("Unexpected fallthrough from CharFromCode slow case");
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharAtGenerator
+
+void StringCharAtGenerator::GenerateFast(MacroAssembler* masm) {
+ char_code_at_generator_.GenerateFast(masm);
+ char_from_code_generator_.GenerateFast(masm);
+}
+
+
+void StringCharAtGenerator::GenerateSlow(
+ MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+ char_code_at_generator_.GenerateSlow(masm, call_helper);
+ char_from_code_generator_.GenerateSlow(masm, call_helper);
}
@@ -10928,7 +11131,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ bind(&allocated);
// Fill the fields of the cons string.
__ movq(FieldOperand(rcx, ConsString::kLengthOffset), rbx);
- __ movl(FieldOperand(rcx, ConsString::kHashFieldOffset),
+ __ movq(FieldOperand(rcx, ConsString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
__ movq(FieldOperand(rcx, ConsString::kFirstOffset), rax);
__ movq(FieldOperand(rcx, ConsString::kSecondOffset), rdx);
@@ -10978,8 +11181,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
@@ -10988,8 +11190,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of first argument
StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, true);
// Locate first character of second argument.
- __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
@@ -11017,8 +11218,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// Locate first character of result.
__ addq(rcx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Locate first character of first argument.
- __ movq(rdi, FieldOperand(rax, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rax, String::kLengthOffset));
__ addq(rax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rax: first char of first argument
// rbx: result string
@@ -11027,8 +11227,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rdi: length of first argument
StringHelper::GenerateCopyCharacters(masm, rcx, rax, rdi, false);
// Locate first character of second argument.
- __ movq(rdi, FieldOperand(rdx, String::kLengthOffset));
- __ SmiToInteger32(rdi, rdi);
+ __ SmiToInteger32(rdi, FieldOperand(rdx, String::kLengthOffset));
__ addq(rdx, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// rbx: result string
// rcx: next character of result
@@ -11057,15 +11256,15 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
if (ascii) {
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
+ __ incq(src);
+ __ incq(dest);
} else {
__ movzxwl(kScratchRegister, Operand(src, 0));
__ movw(Operand(dest, 0), kScratchRegister);
__ addq(src, Immediate(2));
__ addq(dest, Immediate(2));
}
- __ subl(count, Immediate(1));
+ __ decl(count);
__ j(not_zero, &loop);
}
@@ -11078,38 +11277,39 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
// Copy characters using rep movs of doublewords. Align destination on 4 byte
// boundary before starting rep movs. Copy remaining characters after running
// rep movs.
+ // Count is positive int32, dest and src are character pointers.
ASSERT(dest.is(rdi)); // rep movs destination
ASSERT(src.is(rsi)); // rep movs source
ASSERT(count.is(rcx)); // rep movs count
// Nothing to do for zero characters.
Label done;
- __ testq(count, count);
+ __ testl(count, count);
__ j(zero, &done);
// Make count the number of bytes to copy.
if (!ascii) {
ASSERT_EQ(2, sizeof(uc16)); // NOLINT
- __ addq(count, count);
+ __ addl(count, count);
}
// Don't enter the rep movs if there are less than 4 bytes to copy.
Label last_bytes;
- __ testq(count, Immediate(~7));
+ __ testl(count, Immediate(~7));
__ j(zero, &last_bytes);
// Copy from edi to esi using rep movs instruction.
- __ movq(kScratchRegister, count);
- __ sar(count, Immediate(3)); // Number of doublewords to copy.
+ __ movl(kScratchRegister, count);
+ __ shr(count, Immediate(3)); // Number of doublewords to copy.
__ repmovsq();
// Find number of bytes left.
- __ movq(count, kScratchRegister);
+ __ movl(count, kScratchRegister);
__ and_(count, Immediate(7));
// Check if there are more bytes to copy.
__ bind(&last_bytes);
- __ testq(count, count);
+ __ testl(count, count);
__ j(zero, &done);
// Copy remaining characters.
@@ -11117,9 +11317,9 @@ void StringHelper::GenerateCopyCharactersREP(MacroAssembler* masm,
__ bind(&loop);
__ movb(kScratchRegister, Operand(src, 0));
__ movb(Operand(dest, 0), kScratchRegister);
- __ addq(src, Immediate(1));
- __ addq(dest, Immediate(1));
- __ subq(count, Immediate(1));
+ __ incq(src);
+ __ incq(dest);
+ __ decl(count);
__ j(not_zero, &loop);
__ bind(&done);
@@ -11139,13 +11339,11 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Make sure that both characters are not digits as such strings has a
// different hash algorithm. Don't try to look for these in the symbol table.
Label not_array_index;
- __ movq(scratch, c1);
- __ subq(scratch, Immediate(static_cast<int>('0')));
- __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ leal(scratch, Operand(c1, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(above, &not_array_index);
- __ movq(scratch, c2);
- __ subq(scratch, Immediate(static_cast<int>('0')));
- __ cmpq(scratch, Immediate(static_cast<int>('9' - '0')));
+ __ leal(scratch, Operand(c2, -'0'));
+ __ cmpl(scratch, Immediate(static_cast<int>('9' - '0')));
__ j(below_equal, not_found);
__ bind(&not_array_index);
@@ -11169,8 +11367,8 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Calculate capacity mask from the symbol table capacity.
Register mask = scratch2;
- __ movq(mask, FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
- __ SmiToInteger32(mask, mask);
+ __ SmiToInteger32(mask,
+ FieldOperand(symbol_table, SymbolTable::kCapacityOffset));
__ decl(mask);
Register undefined = scratch4;
@@ -11200,10 +11398,10 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
Register candidate = scratch; // Scratch register contains candidate.
ASSERT_EQ(1, SymbolTable::kEntrySize);
__ movq(candidate,
- FieldOperand(symbol_table,
- scratch,
- times_pointer_size,
- SymbolTable::kElementsStartOffset));
+ FieldOperand(symbol_table,
+ scratch,
+ times_pointer_size,
+ SymbolTable::kElementsStartOffset));
// If entry is undefined no string with this hash can be found.
__ cmpq(candidate, undefined);
@@ -11280,9 +11478,7 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
Register hash,
Register scratch) {
// hash += hash << 3;
- __ movl(scratch, hash);
- __ shll(scratch, Immediate(3));
- __ addl(hash, scratch);
+ __ leal(hash, Operand(hash, hash, times_8, 0));
// hash ^= hash >> 11;
__ movl(scratch, hash);
__ sarl(scratch, Immediate(11));
@@ -11294,7 +11490,6 @@ void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
// if (hash == 0) hash = 27;
Label hash_not_zero;
- __ testl(hash, hash);
__ j(not_zero, &hash_not_zero);
__ movl(hash, Immediate(27));
__ bind(&hash_not_zero);
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index 9d465839c0..242667eded 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -571,10 +571,13 @@ class CodeGenerator: public AstVisitor {
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
- void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
+ void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
- void GenerateCharFromCode(ZoneList<Expression*>* args);
+ void GenerateStringCharFromCode(ZoneList<Expression*>* args);
+
+ // Fast support for string.charAt(n) and string[n].
+ void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
@@ -846,38 +849,6 @@ class GenericBinaryOpStub: public CodeStub {
class StringHelper : public AllStatic {
public:
- // Generates fast code for getting a char code out of a string
- // object at the given index. May bail out for four reasons (in the
- // listed order):
- // * Receiver is not a string (receiver_not_string label).
- // * Index is not a smi (index_not_smi label).
- // * Index is out of range (index_out_of_range).
- // * Some other reason (slow_case label). In this case it's
- // guaranteed that the above conditions are not violated,
- // e.g. it's safe to assume the receiver is a string and the
- // index is a non-negative smi < length.
- // When successful, object, index, and scratch are clobbered.
- // Otherwise, scratch and result are clobbered.
- static void GenerateFastCharCodeAt(MacroAssembler* masm,
- Register object,
- Register index,
- Register scratch,
- Register result,
- Label* receiver_not_string,
- Label* index_not_smi,
- Label* index_out_of_range,
- Label* slow_case);
-
- // Generates code for creating a one-char string from the given char
- // code. May do a runtime call, so any register can be clobbered
- // and, if the given invoke flag specifies a call, an internal frame
- // is required. In tail call mode the result must be rax register.
- static void GenerateCharFromCode(MacroAssembler* masm,
- Register code,
- Register result,
- Register scratch,
- InvokeFlag flag);
-
// Generate code for copying characters using a simple loop. This should only
// be used in places where the number of characters is small and the
// additional setup and checking in GenerateCopyCharactersREP adds too much
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 5bd09c2147..e4e6a0b109 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -1010,7 +1010,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ push(rax); // Map.
__ push(rdx); // Enumeration cache.
__ movq(rax, FieldOperand(rdx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
__ push(rax); // Enumeration cache length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
__ jmp(&loop);
@@ -1020,7 +1019,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Push(Smi::FromInt(0)); // Map (0) - force slow check.
__ push(rax);
__ movq(rax, FieldOperand(rax, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rax, rax);
__ push(rax); // Fixed array length (as smi).
__ Push(Smi::FromInt(0)); // Initial index.
@@ -1906,76 +1904,6 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
}
-void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
- Handle<String> name = expr->name();
- if (strcmp("_IsSmi", *name->ToCString()) == 0) {
- EmitIsSmi(expr->arguments());
- } else if (strcmp("_IsNonNegativeSmi", *name->ToCString()) == 0) {
- EmitIsNonNegativeSmi(expr->arguments());
- } else if (strcmp("_IsObject", *name->ToCString()) == 0) {
- EmitIsObject(expr->arguments());
- } else if (strcmp("_IsUndetectableObject", *name->ToCString()) == 0) {
- EmitIsUndetectableObject(expr->arguments());
- } else if (strcmp("_IsFunction", *name->ToCString()) == 0) {
- EmitIsFunction(expr->arguments());
- } else if (strcmp("_IsArray", *name->ToCString()) == 0) {
- EmitIsArray(expr->arguments());
- } else if (strcmp("_IsRegExp", *name->ToCString()) == 0) {
- EmitIsRegExp(expr->arguments());
- } else if (strcmp("_IsConstructCall", *name->ToCString()) == 0) {
- EmitIsConstructCall(expr->arguments());
- } else if (strcmp("_ObjectEquals", *name->ToCString()) == 0) {
- EmitObjectEquals(expr->arguments());
- } else if (strcmp("_Arguments", *name->ToCString()) == 0) {
- EmitArguments(expr->arguments());
- } else if (strcmp("_ArgumentsLength", *name->ToCString()) == 0) {
- EmitArgumentsLength(expr->arguments());
- } else if (strcmp("_ClassOf", *name->ToCString()) == 0) {
- EmitClassOf(expr->arguments());
- } else if (strcmp("_Log", *name->ToCString()) == 0) {
- EmitLog(expr->arguments());
- } else if (strcmp("_RandomHeapNumber", *name->ToCString()) == 0) {
- EmitRandomHeapNumber(expr->arguments());
- } else if (strcmp("_SubString", *name->ToCString()) == 0) {
- EmitSubString(expr->arguments());
- } else if (strcmp("_RegExpExec", *name->ToCString()) == 0) {
- EmitRegExpExec(expr->arguments());
- } else if (strcmp("_ValueOf", *name->ToCString()) == 0) {
- EmitValueOf(expr->arguments());
- } else if (strcmp("_SetValueOf", *name->ToCString()) == 0) {
- EmitSetValueOf(expr->arguments());
- } else if (strcmp("_NumberToString", *name->ToCString()) == 0) {
- EmitNumberToString(expr->arguments());
- } else if (strcmp("_CharFromCode", *name->ToCString()) == 0) {
- EmitCharFromCode(expr->arguments());
- } else if (strcmp("_FastCharCodeAt", *name->ToCString()) == 0) {
- EmitFastCharCodeAt(expr->arguments());
- } else if (strcmp("_StringAdd", *name->ToCString()) == 0) {
- EmitStringAdd(expr->arguments());
- } else if (strcmp("_StringCompare", *name->ToCString()) == 0) {
- EmitStringCompare(expr->arguments());
- } else if (strcmp("_MathPow", *name->ToCString()) == 0) {
- EmitMathPow(expr->arguments());
- } else if (strcmp("_MathSin", *name->ToCString()) == 0) {
- EmitMathSin(expr->arguments());
- } else if (strcmp("_MathCos", *name->ToCString()) == 0) {
- EmitMathCos(expr->arguments());
- } else if (strcmp("_MathSqrt", *name->ToCString()) == 0) {
- EmitMathSqrt(expr->arguments());
- } else if (strcmp("_CallFunction", *name->ToCString()) == 0) {
- EmitCallFunction(expr->arguments());
- } else if (strcmp("_RegExpConstructResult", *name->ToCString()) == 0) {
- EmitRegExpConstructResult(expr->arguments());
- } else if (strcmp("_SwapElements", *name->ToCString()) == 0) {
- EmitSwapElements(expr->arguments());
- } else if (strcmp("_GetFromCache", *name->ToCString()) == 0) {
- EmitGetFromCache(expr->arguments());
- } else {
- UNREACHABLE();
- }
-}
-
-
void FullCodeGenerator::EmitIsSmi(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
@@ -2414,46 +2342,120 @@ void FullCodeGenerator::EmitNumberToString(ZoneList<Expression*>* args) {
}
-void FullCodeGenerator::EmitCharFromCode(ZoneList<Expression*>* args) {
+void FullCodeGenerator::EmitStringCharFromCode(ZoneList<Expression*>* args) {
ASSERT(args->length() == 1);
VisitForValue(args->at(0), kAccumulator);
- Label slow_case, done;
- // Fast case of Heap::LookupSingleCharacterStringFromCode.
- __ JumpIfNotSmi(rax, &slow_case);
- __ SmiToInteger32(rcx, rax);
- __ cmpl(rcx, Immediate(String::kMaxAsciiCharCode));
- __ j(above, &slow_case);
+ Label done;
+ StringCharFromCodeGenerator generator(rax, rbx);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
- __ Move(rbx, Factory::single_character_string_cache());
- __ movq(rbx, FieldOperand(rbx,
- rcx,
- times_pointer_size,
- FixedArray::kHeaderSize));
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
- __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
- __ j(equal, &slow_case);
- __ movq(rax, rbx);
+ __ bind(&done);
+ Apply(context_, rbx);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = rbx;
+ Register index = rax;
+ Register scratch = rcx;
+ Register result = rdx;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharCodeAtGenerator generator(object,
+ index,
+ scratch,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
__ jmp(&done);
- __ bind(&slow_case);
- __ push(rax);
- __ CallRuntime(Runtime::kCharFromCode, 1);
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // NaN.
+ __ LoadRoot(result, Heap::kNanValueRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move the undefined value into the result register, which will
+ // trigger conversion.
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- Apply(context_, rax);
+ Apply(context_, result);
}
-void FullCodeGenerator::EmitFastCharCodeAt(ZoneList<Expression*>* args) {
- // TODO(fsc): Port the complete implementation from the classic back-end.
- // Move the undefined value into the result register, which will
- // trigger the slow case.
- __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
- Apply(context_, rax);
+void FullCodeGenerator::EmitStringCharAt(ZoneList<Expression*>* args) {
+ ASSERT(args->length() == 2);
+
+ VisitForValue(args->at(0), kStack);
+ VisitForValue(args->at(1), kAccumulator);
+
+ Register object = rbx;
+ Register index = rax;
+ Register scratch1 = rcx;
+ Register scratch2 = rdx;
+ Register result = rax;
+
+ __ pop(object);
+
+ Label need_conversion;
+ Label index_out_of_range;
+ Label done;
+ StringCharAtGenerator generator(object,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &need_conversion,
+ &need_conversion,
+ &index_out_of_range,
+ STRING_INDEX_IS_NUMBER);
+ generator.GenerateFast(masm_);
+ __ jmp(&done);
+
+ __ bind(&index_out_of_range);
+ // When the index is out of range, the spec requires us to return
+ // the empty string.
+ __ LoadRoot(result, Heap::kEmptyStringRootIndex);
+ __ jmp(&done);
+
+ __ bind(&need_conversion);
+ // Move smi zero into the result register, which will trigger
+ // conversion.
+ __ Move(result, Smi::FromInt(0));
+ __ jmp(&done);
+
+ NopRuntimeCallHelper call_helper;
+ generator.GenerateSlow(masm_, call_helper);
+
+ __ bind(&done);
+ Apply(context_, result);
}
+
void FullCodeGenerator::EmitStringAdd(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 8766ebb145..773d2626a1 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -104,8 +104,7 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
- __ movq(r2, FieldOperand(r0, kCapacityOffset));
- __ SmiToInteger32(r2, r2);
+ __ SmiToInteger32(r2, FieldOperand(r0, kCapacityOffset));
__ decl(r2);
// Generate an unrolled loop that performs a few probes before
@@ -165,11 +164,11 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
//
// key - holds the smi key on entry and is unchanged if a branch is
// performed to the miss label.
+ // Holds the result on exit if the load succeeded.
//
// Scratch registers:
//
// r0 - holds the untagged key on entry and holds the hash once computed.
- // Holds the result on exit if the load succeeded.
//
// r1 - used to hold the capacity mask of the dictionary
//
@@ -202,8 +201,8 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
__ xorl(r0, r1);
// Compute capacity mask.
- __ movq(r1, FieldOperand(elements, NumberDictionary::kCapacityOffset));
- __ SmiToInteger32(r1, r1);
+ __ SmiToInteger32(r1,
+ FieldOperand(elements, NumberDictionary::kCapacityOffset));
__ decl(r1);
// Generate an unrolled loop that performs a few probes before giving up.
@@ -245,7 +244,7 @@ static void GenerateNumberDictionaryLoad(MacroAssembler* masm,
// Get the value at the masked, scaled index.
const int kValueOffset =
NumberDictionary::kElementsStartOffset + kPointerSize;
- __ movq(r0, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
+ __ movq(key, FieldOperand(elements, r2, times_pointer_size, kValueOffset));
}
@@ -351,7 +350,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// -- rsp[8] : name
// -- rsp[16] : receiver
// -----------------------------------
- Label slow, check_string, index_int, index_string;
+ Label slow, check_string, index_smi, index_string;
Label check_pixel_array, probe_dictionary;
Label check_number_dictionary;
@@ -377,23 +376,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Check that the key is a smi.
__ JumpIfNotSmi(rax, &check_string);
- // Save key in rbx in case we want it for the number dictionary
- // case.
- __ movq(rbx, rax);
- __ SmiToInteger32(rax, rax);
+
// Get the elements array of the object.
- __ bind(&index_int);
+ __ bind(&index_smi);
__ movq(rcx, FieldOperand(rcx, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
// Check that the key (index) is within bounds.
- __ cmpl(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
+ __ SmiCompare(rax, FieldOperand(rcx, FixedArray::kLengthOffset));
__ j(above_equal, &slow); // Unsigned comparison rejects negative indices.
// Fast case: Do the load.
- __ movq(rax, Operand(rcx, rax, times_pointer_size,
- FixedArray::kHeaderSize - kHeapObjectTag));
+ SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
+ __ movq(rax, FieldOperand(rcx,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
@@ -402,12 +401,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ ret(0);
// Check whether the elements is a pixel array.
- // rax: untagged index
+ // rax: key
// rcx: elements array
__ bind(&check_pixel_array);
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kPixelArrayMapRootIndex);
__ j(not_equal, &check_number_dictionary);
+ __ SmiToInteger32(rax, rax);
__ cmpl(rax, FieldOperand(rcx, PixelArray::kLengthOffset));
__ j(above_equal, &slow);
__ movq(rcx, FieldOperand(rcx, PixelArray::kExternalPointerOffset));
@@ -417,13 +417,13 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_number_dictionary);
// Check whether the elements is a number dictionary.
- // rax: untagged index
- // rbx: key
+ // rax: key
// rcx: elements
__ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
Heap::kHashTableMapRootIndex);
__ j(not_equal, &slow);
- GenerateNumberDictionaryLoad(masm, &slow, rcx, rbx, rax, rdx, rdi);
+ __ SmiToInteger32(rbx, rax);
+ GenerateNumberDictionaryLoad(masm, &slow, rcx, rax, rbx, rdx, rdi);
__ ret(0);
// Slow case: Load name and receiver from stack and jump to runtime.
@@ -512,78 +512,46 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
(1 << String::kArrayIndexValueBits));
__ bind(&index_string);
- __ movl(rax, rbx);
- __ and_(rax, Immediate(String::kArrayIndexHashMask));
- __ shrl(rax, Immediate(String::kHashShift));
- __ jmp(&index_int);
+ // We want the smi-tagged index in rax.
+ __ and_(rbx, Immediate(String::kArrayIndexValueMask));
+ __ shr(rbx, Immediate(String::kHashShift));
+ __ Integer32ToSmi(rax, rbx);
+ __ jmp(&index_smi);
}
void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rsp[0] : return address
- // -- rsp[8] : name
+ // -- rsp[8] : name (index)
// -- rsp[16] : receiver
// -----------------------------------
Label miss;
- Label index_not_smi;
Label index_out_of_range;
- Label slow_char_code;
- Label got_char_code;
Register receiver = rdx;
Register index = rax;
- Register code = rbx;
- Register scratch = rcx;
+ Register scratch1 = rbx;
+ Register scratch2 = rcx;
+ Register result = rax;
__ movq(index, Operand(rsp, 1 * kPointerSize));
__ movq(receiver, Operand(rsp, 2 * kPointerSize));
- StringHelper::GenerateFastCharCodeAt(masm,
- receiver,
- index,
- scratch,
- code,
- &miss, // When not a string.
- &index_not_smi,
- &index_out_of_range,
- &slow_char_code);
- // If we didn't bail out, code register contains smi tagged char
- // code.
- __ bind(&got_char_code);
- StringHelper::GenerateCharFromCode(masm, code, rax, scratch, JUMP_FUNCTION);
-#ifdef DEBUG
- __ Abort("Unexpected fall-through from char from code tail call");
-#endif
-
- // Check if key is a heap number.
- __ bind(&index_not_smi);
- __ CompareRoot(FieldOperand(index, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &miss);
-
- // Push receiver and key on the stack (now that we know they are a
- // string and a number), and call runtime.
- __ bind(&slow_char_code);
- __ EnterInternalFrame();
- __ push(receiver);
- __ push(index);
- __ CallRuntime(Runtime::kStringCharCodeAt, 2);
- ASSERT(!code.is(rax));
- __ movq(code, rax);
- __ LeaveInternalFrame();
+ StringCharAtGenerator char_at_generator(receiver,
+ index,
+ scratch1,
+ scratch2,
+ result,
+ &miss, // When not a string.
+ &miss, // When not a number.
+ &index_out_of_range,
+ STRING_INDEX_IS_ARRAY_INDEX);
+ char_at_generator.GenerateFast(masm);
+ __ ret(0);
- // Check if the runtime call returned NaN char code. If yes, return
- // undefined. Otherwise, we can continue.
- if (FLAG_debug_code) {
- ASSERT(kSmiTag == 0);
- __ JumpIfSmi(code, &got_char_code);
- __ CompareRoot(FieldOperand(code, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ Assert(equal, "StringCharCodeAt must return smi or heap number");
- }
- __ CompareRoot(code, Heap::kNanValueRootIndex);
- __ j(not_equal, &got_char_code);
+ ICRuntimeCallHelper call_helper;
+ char_at_generator.GenerateSlow(masm, call_helper);
__ bind(&index_out_of_range);
__ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
@@ -852,9 +820,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
__ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
Heap::kFixedArrayMapRootIndex);
__ j(not_equal, &check_pixel_array);
- // Untag the key (for checking against untagged length in the fixed array).
- __ SmiToInteger32(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, Array::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// rax: value
// rbx: FixedArray
// rcx: index (as a smi)
@@ -903,11 +869,10 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
// rcx: index (as a smi)
// flags: smicompare (rdx.length(), rbx)
__ j(not_equal, &slow); // do not leave holes in the array
- __ SmiToInteger64(rdi, rcx);
- __ cmpl(rdi, FieldOperand(rbx, FixedArray::kLengthOffset));
+ __ SmiCompare(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
__ j(above_equal, &slow);
- // Increment and restore smi-tag.
- __ Integer64PlusConstantToSmi(rdi, rdi, 1);
+ // Increment index to get new length.
+ __ SmiAddConstant(rdi, rcx, Smi::FromInt(1));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
__ jmp(&fast);
@@ -936,16 +901,14 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) {
Label non_smi_value;
__ JumpIfNotSmi(rax, &non_smi_value);
SmiIndex index = masm->SmiToIndex(rcx, rcx, kPointerSizeLog2);
- __ movq(Operand(rbx, index.reg, index.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(rbx, index.reg, index.scale, FixedArray::kHeaderSize),
rax);
__ ret(0);
__ bind(&non_smi_value);
// Slow case that needs to retain rcx for use by RecordWrite.
// Update write barrier for the elements array address.
SmiIndex index2 = masm->SmiToIndex(kScratchRegister, rcx, kPointerSizeLog2);
- __ movq(Operand(rbx, index2.reg, index2.scale,
- FixedArray::kHeaderSize - kHeapObjectTag),
+ __ movq(FieldOperand(rbx, index2.reg, index2.scale, FixedArray::kHeaderSize),
rax);
__ movq(rdx, rax);
__ RecordWriteNonSmi(rbx, 0, rdx, rcx);
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index b7a6aaf9ef..3823cadb54 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -90,58 +90,21 @@ void MacroAssembler::RecordWriteHelper(Register object,
bind(&not_in_new_space);
}
- Label fast;
-
// Compute the page start address from the heap object pointer, and reuse
// the 'object' register for it.
- ASSERT(is_int32(~Page::kPageAlignmentMask));
- and_(object,
- Immediate(static_cast<int32_t>(~Page::kPageAlignmentMask)));
- Register page_start = object;
-
- // Compute the bit addr in the remembered set/index of the pointer in the
- // page. Reuse 'addr' as pointer_offset.
- subq(addr, page_start);
- shr(addr, Immediate(kPointerSizeLog2));
- Register pointer_offset = addr;
-
- // If the bit offset lies beyond the normal remembered set range, it is in
- // the extra remembered set area of a large object.
- cmpq(pointer_offset, Immediate(Page::kPageSize / kPointerSize));
- j(below, &fast);
-
- // We have a large object containing pointers. It must be a FixedArray.
-
- // Adjust 'page_start' so that addressing using 'pointer_offset' hits the
- // extra remembered set after the large object.
-
- // Load the array length into 'scratch'.
- movl(scratch,
- Operand(page_start,
- Page::kObjectStartOffset + FixedArray::kLengthOffset));
- Register array_length = scratch;
-
- // Extra remembered set starts right after the large object (a FixedArray), at
- // page_start + kObjectStartOffset + objectSize
- // where objectSize is FixedArray::kHeaderSize + kPointerSize * array_length.
- // Add the delta between the end of the normal RSet and the start of the
- // extra RSet to 'page_start', so that addressing the bit using
- // 'pointer_offset' hits the extra RSet words.
- lea(page_start,
- Operand(page_start, array_length, times_pointer_size,
- Page::kObjectStartOffset + FixedArray::kHeaderSize
- - Page::kRSetEndOffset));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bind(&fast);
- bts(Operand(page_start, Page::kRSetOffset), pointer_offset);
-}
-
-
-// Set the remembered set bit for [object+offset].
+ and_(object, Immediate(~Page::kPageAlignmentMask));
+
+ // Compute number of region covering addr. See Page::GetRegionNumberForAddress
+ // method for more details.
+ and_(addr, Immediate(Page::kPageAlignmentMask));
+ shrl(addr, Immediate(Page::kRegionSizeLog2));
+
+ // Set dirty mark for region.
+ bts(Operand(object, Page::kDirtyFlagOffset), addr);
+}
+
+
+// For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the smi_index register contains the array index into
// the elements array represented as a smi. Otherwise it can be used as a
@@ -156,9 +119,8 @@ void MacroAssembler::RecordWrite(Register object,
// registers are rsi.
ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi));
- // First, check if a remembered set write is even needed. The tests below
- // catch stores of Smis and stores into young gen (which does not have space
- // for the remembered set bits).
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis and stores into young gen.
Label done;
JumpIfSmi(value, &done);
@@ -191,8 +153,8 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
bind(&okay);
}
- // Test that the object address is not in the new space. We cannot
- // set remembered set bits in the new space.
+ // Test that the object address is not in the new space. We cannot
+ // update page dirty marks for new space pages.
InNewSpace(object, scratch, equal, &done);
// The offset is relative to a tagged or untagged HeapObject pointer,
@@ -201,48 +163,19 @@ void MacroAssembler::RecordWriteNonSmi(Register object,
ASSERT(IsAligned(offset, kPointerSize) ||
IsAligned(offset + kHeapObjectTag, kPointerSize));
- // We use optimized write barrier code if the word being written to is not in
- // a large object page, or is in the first "page" of a large object page.
- // We make sure that an offset is inside the right limits whether it is
- // tagged or untagged.
- if ((offset > 0) && (offset < Page::kMaxHeapObjectSize - kHeapObjectTag)) {
- // Compute the bit offset in the remembered set, leave it in 'scratch'.
- lea(scratch, Operand(object, offset));
- ASSERT(is_int32(Page::kPageAlignmentMask));
- and_(scratch, Immediate(static_cast<int32_t>(Page::kPageAlignmentMask)));
- shr(scratch, Immediate(kPointerSizeLog2));
-
- // Compute the page address from the heap object pointer, leave it in
- // 'object' (immediate value is sign extended).
- and_(object, Immediate(~Page::kPageAlignmentMask));
-
- // NOTE: For now, we use the bit-test-and-set (bts) x86 instruction
- // to limit code size. We should probably evaluate this decision by
- // measuring the performance of an equivalent implementation using
- // "simpler" instructions
- bts(Operand(object, Page::kRSetOffset), scratch);
+ Register dst = smi_index;
+ if (offset != 0) {
+ lea(dst, Operand(object, offset));
} else {
- Register dst = smi_index;
- if (offset != 0) {
- lea(dst, Operand(object, offset));
- } else {
- // array access: calculate the destination address in the same manner as
- // KeyedStoreIC::GenerateGeneric.
- SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
- lea(dst, FieldOperand(object,
- index.reg,
- index.scale,
- FixedArray::kHeaderSize));
- }
- // If we are already generating a shared stub, not inlining the
- // record write code isn't going to save us any memory.
- if (generating_stub()) {
- RecordWriteHelper(object, dst, scratch);
- } else {
- RecordWriteStub stub(object, dst, scratch);
- CallStub(&stub);
- }
+ // array access: calculate the destination address in the same manner as
+ // KeyedStoreIC::GenerateGeneric.
+ SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2);
+ lea(dst, FieldOperand(object,
+ index.reg,
+ index.scale,
+ FixedArray::kHeaderSize));
}
+ RecordWriteHelper(object, dst, scratch);
bind(&done);
@@ -573,6 +506,11 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
}
+void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
+ movl(dst, Operand(src, kSmiShift / kBitsPerByte));
+}
+
+
void MacroAssembler::SmiToInteger64(Register dst, Register src) {
ASSERT_EQ(0, kSmiTag);
if (!dst.is(src)) {
@@ -614,7 +552,7 @@ void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
- cmpl(Operand(dst, kIntSize), Immediate(src->value()));
+ cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
}
@@ -638,6 +576,18 @@ void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
}
+void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
+ Register src,
+ int power) {
+ ASSERT((0 <= power) && (power < 32));
+ if (dst.is(src)) {
+ shr(dst, Immediate(power + kSmiShift));
+ } else {
+ UNIMPLEMENTED(); // Not used.
+ }
+}
+
+
Condition MacroAssembler::CheckSmi(Register src) {
ASSERT_EQ(0, kSmiTag);
testb(src, Immediate(kSmiTagMask));
@@ -916,7 +866,7 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
if (constant->value() != 0) {
- addl(Operand(dst, kIntSize), Immediate(constant->value()));
+ addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
}
}
@@ -2594,7 +2544,7 @@ void MacroAssembler::AllocateTwoByteString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movl(FieldOperand(result, String::kHashFieldOffset),
+ movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -2632,7 +2582,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
movq(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
Integer32ToSmi(scratch1, length);
movq(FieldOperand(result, String::kLengthOffset), scratch1);
- movl(FieldOperand(result, String::kHashFieldOffset),
+ movq(FieldOperand(result, String::kHashFieldOffset),
Immediate(String::kEmptyHashField));
}
@@ -2691,20 +2641,27 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
}
}
+
int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) {
- // On Windows stack slots are reserved by the caller for all arguments
- // including the ones passed in registers. On Linux 6 arguments are passed in
- // registers and the caller does not reserve stack slots for them.
+ // On Windows 64 stack slots are reserved by the caller for all arguments
+ // including the ones passed in registers, and space is always allocated for
+ // the four register arguments even if the function takes fewer than four
+ // arguments.
+ // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
+ // and the caller does not reserve stack slots for them.
ASSERT(num_arguments >= 0);
#ifdef _WIN64
- static const int kArgumentsWithoutStackSlot = 0;
+ static const int kMinimumStackSlots = 4;
+ if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
+ return num_arguments;
#else
- static const int kArgumentsWithoutStackSlot = 6;
+ static const int kRegisterPassedArguments = 6;
+ if (num_arguments < kRegisterPassedArguments) return 0;
+ return num_arguments - kRegisterPassedArguments;
#endif
- return num_arguments > kArgumentsWithoutStackSlot ?
- num_arguments - kArgumentsWithoutStackSlot : 0;
}
+
void MacroAssembler::PrepareCallCFunction(int num_arguments) {
int frame_alignment = OS::ActivationFrameAlignment();
ASSERT(frame_alignment != 0);
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index b4f3240ec8..0acce0543c 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -78,8 +78,8 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// GC Support
- // Set the remebered set bit for an address which points into an
- // object. RecordWriteHelper only works if the object is not in new
+ // For page containing |object| mark region covering |addr| dirty.
+ // RecordWriteHelper only works if the object is not in new
// space.
void RecordWriteHelper(Register object,
Register addr,
@@ -93,7 +93,7 @@ class MacroAssembler: public Assembler {
Condition cc,
Label* branch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
@@ -103,7 +103,7 @@ class MacroAssembler: public Assembler {
Register value,
Register scratch);
- // Set the remembered set bit for [object+offset].
+ // For page containing |object| mark region covering [object+offset] dirty.
// The value is known to not be a smi.
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
@@ -210,6 +210,7 @@ class MacroAssembler: public Assembler {
// Convert smi to 32-bit integer. I.e., not sign extended into
// high 32 bits of destination.
void SmiToInteger32(Register dst, Register src);
+ void SmiToInteger32(Register dst, const Operand& src);
// Convert smi to 64-bit integer (sign extended if necessary).
void SmiToInteger64(Register dst, Register src);
@@ -220,6 +221,13 @@ class MacroAssembler: public Assembler {
Register src,
int power);
+ // Divide a positive smi's integer value by a power of two.
+ // Provides result as 32-bit integer value.
+ void PositiveSmiDivPowerOfTwoToInteger32(Register dst,
+ Register src,
+ int power);
+
+
// Simple comparison of smis.
void SmiCompare(Register dst, Register src);
void SmiCompare(Register dst, Smi* src);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 8b095cbbe6..fbd95d9ee8 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -375,206 +375,6 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-template <class Compiler>
-static void CompileLoadInterceptor(Compiler* compiler,
- StubCompiler* stub_compiler,
- MacroAssembler* masm,
- JSObject* object,
- JSObject* holder,
- String* name,
- LookupResult* lookup,
- Register receiver,
- Register scratch1,
- Register scratch2,
- Label* miss) {
- ASSERT(holder->HasNamedInterceptor());
- ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
-
- // Check that the receiver isn't a smi.
- __ JumpIfSmi(receiver, miss);
-
- // Check that the maps haven't changed.
- Register reg =
- stub_compiler->CheckPrototypes(object, receiver, holder,
- scratch1, scratch2, name, miss);
-
- if (lookup->IsProperty() && lookup->IsCacheable()) {
- compiler->CompileCacheable(masm,
- stub_compiler,
- receiver,
- reg,
- scratch1,
- scratch2,
- holder,
- lookup,
- name,
- miss);
- } else {
- compiler->CompileRegular(masm,
- receiver,
- reg,
- scratch2,
- holder,
- miss);
- }
-}
-
-
-class LoadInterceptorCompiler BASE_EMBEDDED {
- public:
- explicit LoadInterceptorCompiler(Register name) : name_(name) {}
-
- void CompileCacheable(MacroAssembler* masm,
- StubCompiler* stub_compiler,
- Register receiver,
- Register holder,
- Register scratch1,
- Register scratch2,
- JSObject* interceptor_holder,
- LookupResult* lookup,
- String* name,
- Label* miss_label) {
- AccessorInfo* callback = NULL;
- bool optimize = false;
- // So far the most popular follow ups for interceptor loads are FIELD
- // and CALLBACKS, so inline only them, other cases may be added
- // later.
- if (lookup->type() == FIELD) {
- optimize = true;
- } else if (lookup->type() == CALLBACKS) {
- Object* callback_object = lookup->GetCallbackObject();
- if (callback_object->IsAccessorInfo()) {
- callback = AccessorInfo::cast(callback_object);
- optimize = callback->getter() != NULL;
- }
- }
-
- if (!optimize) {
- CompileRegular(masm, receiver, holder, scratch2, interceptor_holder,
- miss_label);
- return;
- }
-
- // Note: starting a frame here makes GC aware of pointers pushed below.
- __ EnterInternalFrame();
-
- if (lookup->type() == CALLBACKS) {
- __ push(receiver);
- }
- __ push(holder);
- __ push(name_);
-
- // Invoke an interceptor. Note: map checks from receiver to
- // interceptor's holder has been compiled before (see a caller
- // of this method.)
- CompileCallLoadPropertyWithInterceptor(masm,
- receiver,
- holder,
- name_,
- interceptor_holder);
-
- // Check if interceptor provided a value for property. If it's
- // the case, return immediately.
- Label interceptor_failed;
- __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
- __ j(equal, &interceptor_failed);
- __ LeaveInternalFrame();
- __ ret(0);
-
- __ bind(&interceptor_failed);
- __ pop(name_);
- __ pop(holder);
- if (lookup->type() == CALLBACKS) {
- __ pop(receiver);
- }
-
- __ LeaveInternalFrame();
-
- if (lookup->type() == FIELD) {
- // We found FIELD property in prototype chain of interceptor's holder.
- // Check that the maps from interceptor's holder to field's holder
- // haven't changed...
- holder = stub_compiler->CheckPrototypes(interceptor_holder,
- holder,
- lookup->holder(),
- scratch1,
- scratch2,
- name,
- miss_label);
- // ... and retrieve a field from field's holder.
- stub_compiler->GenerateFastPropertyLoad(masm,
- rax,
- holder,
- lookup->holder(),
- lookup->GetFieldIndex());
- __ ret(0);
- } else {
- // We found CALLBACKS property in prototype chain of interceptor's
- // holder.
- ASSERT(lookup->type() == CALLBACKS);
- ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
- ASSERT(callback != NULL);
- ASSERT(callback->getter() != NULL);
-
- // Prepare for tail call. Push receiver to stack after return address.
- Label cleanup;
- __ pop(scratch2); // return address
- __ push(receiver);
- __ push(scratch2);
-
- // Check that the maps from interceptor's holder to callback's holder
- // haven't changed.
- holder = stub_compiler->CheckPrototypes(interceptor_holder, holder,
- lookup->holder(), scratch1,
- scratch2,
- name,
- &cleanup);
-
- // Continue tail call preparation: push remaining parameters after
- // return address.
- __ pop(scratch2); // return address
- __ push(holder);
- __ Move(holder, Handle<AccessorInfo>(callback));
- __ push(holder);
- __ push(FieldOperand(holder, AccessorInfo::kDataOffset));
- __ push(name_);
- __ push(scratch2); // restore return address
-
- // Tail call to runtime.
- ExternalReference ref =
- ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
- __ TailCallExternalReference(ref, 5, 1);
-
- // Clean up code: we pushed receiver after return address and
- // need to remove it from there.
- __ bind(&cleanup);
- __ pop(scratch1); // return address
- __ pop(scratch2); // receiver
- __ push(scratch1);
- }
- }
-
-
- void CompileRegular(MacroAssembler* masm,
- Register receiver,
- Register holder,
- Register scratch,
- JSObject* interceptor_holder,
- Label* miss_label) {
- __ pop(scratch); // save old return address
- PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
- __ push(scratch); // restore old return address
-
- ExternalReference ref = ExternalReference(
- IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
- __ TailCallExternalReference(ref, 5, 1);
- }
-
- private:
- Register name_;
-};
-
-
// Reserves space for the extra arguments to FastHandleApiCall in the
// caller's frame.
//
@@ -761,9 +561,9 @@ class CallInterceptorCompiler BASE_EMBEDDED {
Label miss_cleanup;
Label* miss = can_do_fast_api_call ? &miss_cleanup : miss_label;
Register holder =
- stub_compiler_->CheckPrototypes(object, receiver, interceptor_holder,
- scratch1, scratch2, name,
- depth1, miss);
+ stub_compiler_->CheckPrototypes(object, receiver,
+ interceptor_holder, scratch1,
+ scratch2, name, depth1, miss);
// Invoke an interceptor and if it provides a value,
// branch to |regular_invoke|.
@@ -776,10 +576,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
// Check that the maps from interceptor's holder to constant function's
// holder haven't changed and thus we can use cached constant function.
- stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
- lookup->holder(),
- scratch1, scratch2, name,
- depth2, miss);
+ if (interceptor_holder != lookup->holder()) {
+ stub_compiler_->CheckPrototypes(interceptor_holder, receiver,
+ lookup->holder(), scratch1,
+ scratch2, name, depth2, miss);
+ } else {
+ // CheckPrototypes has a side effect of fetching a 'holder'
+ // for API (object which is instanceof for the signature). It's
+ // safe to omit it here, as if present, it should be fetched
+ // by the previous CheckPrototypes.
+ ASSERT(depth2 == kInvalidProtoDepth);
+ }
// Invoke function.
if (can_do_fast_api_call) {
@@ -1148,7 +955,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ j(not_equal, &miss);
if (argc == 1) { // Otherwise fall through to call builtin.
- Label call_builtin, exit, with_rset_update, attempt_to_grow_elements;
+ Label call_builtin, exit, with_write_barrier, attempt_to_grow_elements;
// Get the array's length into rax and calculate new length.
__ movq(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1156,8 +963,7 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ SmiAddConstant(rax, rax, Smi::FromInt(argc));
// Get the element's length into rcx.
- __ movl(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
- __ Integer32ToSmi(rcx, rcx);
+ __ movq(rcx, FieldOperand(rbx, FixedArray::kLengthOffset));
// Check if we could survive without allocation.
__ SmiCompare(rax, rcx);
@@ -1176,12 +982,12 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(Operand(rdx, 0), rcx);
// Check if value is a smi.
- __ JumpIfNotSmi(rcx, &with_rset_update);
+ __ JumpIfNotSmi(rcx, &with_write_barrier);
__ bind(&exit);
__ ret((argc + 1) * kPointerSize);
- __ bind(&with_rset_update);
+ __ bind(&with_write_barrier);
__ InNewSpace(rbx, rcx, equal, &exit);
@@ -1229,11 +1035,11 @@ Object* CallStubCompiler::CompileArrayPushCall(Object* object,
__ movq(rdx, Operand(rsp, (argc + 1) * kPointerSize));
// Increment element's and array's sizes.
- __ addl(FieldOperand(rbx, FixedArray::kLengthOffset),
- Immediate(kAllocationDelta));
+ __ SmiAddConstant(FieldOperand(rbx, FixedArray::kLengthOffset),
+ Smi::FromInt(kAllocationDelta));
__ movq(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- // Elements are in new space, so no remembered set updates are necessary.
+ // Elements are in new space, so write barrier is not required.
__ ret((argc + 1) * kPointerSize);
__ bind(&call_builtin);
@@ -1339,6 +1145,25 @@ Object* CallStubCompiler::CompileArrayPopCall(Object* object,
}
+Object* CallStubCompiler::CompileStringCharAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
+
+Object* CallStubCompiler::CompileStringCharCodeAtCall(Object* object,
+ JSObject* holder,
+ JSFunction* function,
+ String* name,
+ CheckType check) {
+ // TODO(722): implement this.
+ return Heap::undefined_value();
+}
+
Object* CallStubCompiler::CompileCallInterceptor(JSObject* object,
@@ -2117,9 +1942,8 @@ Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
}
-
void StubCompiler::GenerateLoadInterceptor(JSObject* object,
- JSObject* holder,
+ JSObject* interceptor_holder,
LookupResult* lookup,
Register receiver,
Register name_reg,
@@ -2127,18 +1951,128 @@ void StubCompiler::GenerateLoadInterceptor(JSObject* object,
Register scratch2,
String* name,
Label* miss) {
- LoadInterceptorCompiler compiler(name_reg);
- CompileLoadInterceptor(&compiler,
- this,
- masm(),
- object,
- holder,
- name,
- lookup,
- receiver,
- scratch1,
- scratch2,
- miss);
+ ASSERT(interceptor_holder->HasNamedInterceptor());
+ ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
+
+ // Check that the receiver isn't a smi.
+ __ JumpIfSmi(receiver, miss);
+
+ // So far the most popular follow ups for interceptor loads are FIELD
+ // and CALLBACKS, so inline only them, other cases may be added
+ // later.
+ bool compile_followup_inline = false;
+ if (lookup->IsProperty() && lookup->IsCacheable()) {
+ if (lookup->type() == FIELD) {
+ compile_followup_inline = true;
+ } else if (lookup->type() == CALLBACKS &&
+ lookup->GetCallbackObject()->IsAccessorInfo() &&
+ AccessorInfo::cast(lookup->GetCallbackObject())->getter() != NULL) {
+ compile_followup_inline = true;
+ }
+ }
+
+ if (compile_followup_inline) {
+ // Compile the interceptor call, followed by inline code to load the
+ // property from further up the prototype chain if the call fails.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ ASSERT(holder_reg.is(receiver) || holder_reg.is(scratch1));
+
+ // Save necessary data before invoking an interceptor.
+ // Requires a frame to make GC aware of pushed pointers.
+ __ EnterInternalFrame();
+
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ // CALLBACKS case needs a receiver to be passed into C++ callback.
+ __ push(receiver);
+ }
+ __ push(holder_reg);
+ __ push(name_reg);
+
+ // Invoke an interceptor. Note: map checks from receiver to
+ // interceptor's holder has been compiled before (see a caller
+ // of this method.)
+ CompileCallLoadPropertyWithInterceptor(masm(),
+ receiver,
+ holder_reg,
+ name_reg,
+ interceptor_holder);
+
+ // Check if interceptor provided a value for property. If it's
+ // the case, return immediately.
+ Label interceptor_failed;
+ __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+ __ j(equal, &interceptor_failed);
+ __ LeaveInternalFrame();
+ __ ret(0);
+
+ __ bind(&interceptor_failed);
+ __ pop(name_reg);
+ __ pop(holder_reg);
+ if (lookup->type() == CALLBACKS && !receiver.is(holder_reg)) {
+ __ pop(receiver);
+ }
+
+ __ LeaveInternalFrame();
+
+ // Check that the maps from interceptor's holder to lookup's holder
+ // haven't changed. And load lookup's holder into |holder| register.
+ if (interceptor_holder != lookup->holder()) {
+ holder_reg = CheckPrototypes(interceptor_holder,
+ holder_reg,
+ lookup->holder(),
+ scratch1,
+ scratch2,
+ name,
+ miss);
+ }
+
+ if (lookup->type() == FIELD) {
+ // We found FIELD property in prototype chain of interceptor's holder.
+ // Retrieve a field from field's holder.
+ GenerateFastPropertyLoad(masm(), rax, holder_reg,
+ lookup->holder(), lookup->GetFieldIndex());
+ __ ret(0);
+ } else {
+ // We found CALLBACKS property in prototype chain of interceptor's
+ // holder.
+ ASSERT(lookup->type() == CALLBACKS);
+ ASSERT(lookup->GetCallbackObject()->IsAccessorInfo());
+ AccessorInfo* callback = AccessorInfo::cast(lookup->GetCallbackObject());
+ ASSERT(callback != NULL);
+ ASSERT(callback->getter() != NULL);
+
+ // Tail call to runtime.
+ // Important invariant in CALLBACKS case: the code above must be
+ // structured to never clobber |receiver| register.
+ __ pop(scratch2); // return address
+ __ push(receiver);
+ __ push(holder_reg);
+ __ Move(holder_reg, Handle<AccessorInfo>(callback));
+ __ push(holder_reg);
+ __ push(FieldOperand(holder_reg, AccessorInfo::kDataOffset));
+ __ push(name_reg);
+ __ push(scratch2); // restore return address
+
+ ExternalReference ref =
+ ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
+ } else { // !compile_followup_inline
+ // Call the runtime system to load the interceptor.
+ // Check that the maps haven't changed.
+ Register holder_reg = CheckPrototypes(object, receiver, interceptor_holder,
+ scratch1, scratch2, name, miss);
+ __ pop(scratch2); // save old return address
+ PushInterceptorArguments(masm(), receiver, holder_reg,
+ name_reg, interceptor_holder);
+ __ push(scratch2); // restore old return address
+
+ ExternalReference ref = ExternalReference(
+ IC_Utility(IC::kLoadPropertyWithInterceptorForLoad));
+ __ TailCallExternalReference(ref, 5, 1);
+ }
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index 1c9751bb12..affe18ffa3 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -590,7 +590,7 @@ class VirtualFrame : public ZoneObject {
inline bool Equals(VirtualFrame* other);
// Classes that need raw access to the elements_ array.
- friend class DeferredCode;
+ friend class FrameRegisterState;
friend class JumpTarget;
};