diff options
author | Ryan Dahl <ry@tinyclouds.org> | 2011-01-28 01:56:31 -0800 |
---|---|---|
committer | Ryan Dahl <ry@tinyclouds.org> | 2011-01-28 01:57:00 -0800 |
commit | 7eaa956baee9070f8a073aa5c1bff01b1b8f2f5d (patch) | |
tree | f23f0a06b77cfd983640ff0fcf9e5d4714e4d108 /deps/v8/src/arm/macro-assembler-arm.cc | |
parent | 97375c475e17562a016aa4d13f94030bd0f3ae37 (diff) | |
download | node-new-7eaa956baee9070f8a073aa5c1bff01b1b8f2f5d.tar.gz |
Upgrade V8 to 3.0.12
Diffstat (limited to 'deps/v8/src/arm/macro-assembler-arm.cc')
-rw-r--r-- | deps/v8/src/arm/macro-assembler-arm.cc | 144 |
1 files changed, 74 insertions, 70 deletions
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc index a78de986e7..66cfdca670 100644 --- a/deps/v8/src/arm/macro-assembler-arm.cc +++ b/deps/v8/src/arm/macro-assembler-arm.cc @@ -1,4 +1,4 @@ -// Copyright 2010 the V8 project authors. All rights reserved. +// Copyright 2011 the V8 project authors. All rights reserved. // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: @@ -318,7 +318,7 @@ void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { CheckConstPool(true, true); add(pc, pc, Operand(index, LSL, - assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); + Instruction::kInstrSizeLog2 - kSmiTagSize)); BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); nop(); // Jump table alignment. for (int i = 0; i < targets.length(); i++) { @@ -369,12 +369,12 @@ void MacroAssembler::RecordWriteHelper(Register object, void MacroAssembler::InNewSpace(Register object, Register scratch, - Condition cc, + Condition cond, Label* branch) { - ASSERT(cc == eq || cc == ne); + ASSERT(cond == eq || cond == ne); and_(scratch, object, Operand(ExternalReference::new_space_mask())); cmp(scratch, Operand(ExternalReference::new_space_start())); - b(cc, branch); + b(cond, branch); } @@ -615,37 +615,24 @@ void MacroAssembler::LeaveFrame(StackFrame::Type type) { void MacroAssembler::EnterExitFrame(bool save_doubles) { - // r0 is argc. - // Compute callee's stack pointer before making changes and save it as - // ip register so that it is restored as sp register on exit, thereby - // popping the args. - - // ip = sp + kPointerSize * #args; - add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); - - // Compute the argv pointer and keep it in a callee-saved register. - sub(r6, ip, Operand(kPointerSize)); - - // Prepare the stack to be aligned when calling into C. After this point there - // are 5 pushes before the call into C, so the stack needs to be aligned after - // 5 pushes. - int frame_alignment = ActivationFrameAlignment(); - int frame_alignment_mask = frame_alignment - 1; - if (frame_alignment != kPointerSize) { - // The following code needs to be more general if this assert does not hold. - ASSERT(frame_alignment == 2 * kPointerSize); - // With 5 pushes left the frame must be unaligned at this point. - mov(r7, Operand(Smi::FromInt(0))); - tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask)); - push(r7, eq); // Push if aligned to make it unaligned. - } - - // Push in reverse order: caller_fp, sp_on_exit, and caller_pc. - stm(db_w, sp, fp.bit() | ip.bit() | lr.bit()); + // Compute the argv pointer in a callee-saved register. + add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); + sub(r6, r6, Operand(kPointerSize)); + + // Setup the frame structure on the stack. + ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement); + ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset); + ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset); + Push(lr, fp); mov(fp, Operand(sp)); // Setup new frame pointer. - + // Reserve room for saved entry sp and code object. + sub(sp, sp, Operand(2 * kPointerSize)); + if (FLAG_debug_code) { + mov(ip, Operand(0)); + str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); + } mov(ip, Operand(CodeObject())); - push(ip); // Accessed from ExitFrame::code_slot. + str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset)); // Save the frame pointer and the context in top. mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); @@ -659,25 +646,30 @@ void MacroAssembler::EnterExitFrame(bool save_doubles) { // Optionally save all double registers. if (save_doubles) { - // TODO(regis): Use vstrm instruction. - // The stack alignment code above made sp unaligned, so add space for one - // more double register and use aligned addresses. - ASSERT(kDoubleSize == frame_alignment); - // Mark the frame as containing doubles by pushing a non-valid return - // address, i.e. 0. - ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize); - mov(ip, Operand(0)); // Marker and alignment word. - push(ip); - int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize; - sub(sp, sp, Operand(space)); + sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize)); + const int offset = -2 * kPointerSize; for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { DwVfpRegister reg = DwVfpRegister::from_code(i); - vstr(reg, sp, i * kDoubleSize + kPointerSize); + vstr(reg, fp, offset - ((i + 1) * kDoubleSize)); } - // Note that d0 will be accessible at fp - 2*kPointerSize - - // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the - // alignment word were pushed after the fp. + // Note that d0 will be accessible at + // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize, + // since the sp slot and code slot were pushed after the fp. + } + + // Reserve place for the return address and align the frame preparing for + // calling the runtime function. + const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); + sub(sp, sp, Operand(kPointerSize)); + if (frame_alignment > 0) { + ASSERT(IsPowerOf2(frame_alignment)); + and_(sp, sp, Operand(-frame_alignment)); } + + // Set the exit frame sp value to point just before the return address + // location. + add(ip, sp, Operand(kPointerSize)); + str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset)); } @@ -715,12 +707,10 @@ int MacroAssembler::ActivationFrameAlignment() { void MacroAssembler::LeaveExitFrame(bool save_doubles) { // Optionally restore all double registers. if (save_doubles) { - // TODO(regis): Use vldrm instruction. for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { DwVfpRegister reg = DwVfpRegister::from_code(i); - // Register d15 is just below the marker. - const int offset = ExitFrameConstants::kMarkerOffset; - vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset); + const int offset = -2 * kPointerSize; + vldr(reg, fp, offset - ((i + 1) * kDoubleSize)); } } @@ -736,9 +726,12 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles) { str(r3, MemOperand(ip)); #endif - // Pop the arguments, restore registers, and return. - mov(sp, Operand(fp)); // respect ABI stack constraint - ldm(ia, sp, fp.bit() | sp.bit() | pc.bit()); + // Tear down the exit frame, pop the arguments, and return. Callee-saved + // register r4 still holds argc. + mov(sp, Operand(fp)); + ldm(ia_w, sp, fp.bit() | lr.bit()); + add(sp, sp, Operand(r4, LSL, kPointerSizeLog2)); + mov(pc, lr); } @@ -933,7 +926,7 @@ void MacroAssembler::IsObjectJSStringType(Register object, ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); tst(scratch, Operand(kIsNotStringMask)); - b(nz, fail); + b(ne, fail); } @@ -1392,7 +1385,7 @@ void MacroAssembler::CheckMap(Register obj, Label* fail, bool is_heap_object) { if (!is_heap_object) { - BranchOnSmi(obj, fail); + JumpIfSmi(obj, fail); } ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); mov(ip, Operand(map)); @@ -1407,7 +1400,7 @@ void MacroAssembler::CheckMap(Register obj, Label* fail, bool is_heap_object) { if (!is_heap_object) { - BranchOnSmi(obj, fail); + JumpIfSmi(obj, fail); } ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); LoadRoot(ip, index); @@ -1421,7 +1414,7 @@ void MacroAssembler::TryGetFunctionPrototype(Register function, Register scratch, Label* miss) { // Check that the receiver isn't a smi. - BranchOnSmi(function, miss); + JumpIfSmi(function, miss); // Check that the function really is a function. Load map into result reg. CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); @@ -1520,7 +1513,7 @@ void MacroAssembler::ObjectToDoubleVFPRegister(Register object, Label done; if ((flags & OBJECT_NOT_SMI) == 0) { Label not_smi; - BranchOnNotSmi(object, ¬_smi); + JumpIfNotSmi(object, ¬_smi); // Remove smi tag and convert to double. mov(scratch1, Operand(object, ASR, kSmiTagSize)); vmov(scratch3, scratch1); @@ -1813,9 +1806,9 @@ void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, } -void MacroAssembler::Assert(Condition cc, const char* msg) { +void MacroAssembler::Assert(Condition cond, const char* msg) { if (FLAG_debug_code) - Check(cc, msg); + Check(cond, msg); } @@ -1848,9 +1841,9 @@ void MacroAssembler::AssertFastElements(Register elements) { } -void MacroAssembler::Check(Condition cc, const char* msg) { +void MacroAssembler::Check(Condition cond, const char* msg) { Label L; - b(cc, &L); + b(cond, &L); Abort(msg); // will not return here bind(&L); @@ -1946,7 +1939,7 @@ void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi) { - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); tst(reg1, Operand(kSmiTagMask)); tst(reg2, Operand(kSmiTagMask), eq); b(ne, on_not_both_smi); @@ -1956,7 +1949,7 @@ void MacroAssembler::JumpIfNotBothSmi(Register reg1, void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi) { - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); tst(reg1, Operand(kSmiTagMask)); tst(reg2, Operand(kSmiTagMask), ne); b(eq, on_either_smi); @@ -1964,19 +1957,30 @@ void MacroAssembler::JumpIfEitherSmi(Register reg1, void MacroAssembler::AbortIfSmi(Register object) { - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Assert(ne, "Operand is a smi"); } void MacroAssembler::AbortIfNotSmi(Register object) { - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); tst(object, Operand(kSmiTagMask)); Assert(eq, "Operand is not smi"); } +void MacroAssembler::JumpIfNotHeapNumber(Register object, + Register heap_number_map, + Register scratch, + Label* on_not_heap_number) { + ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); + AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); + cmp(scratch, heap_number_map); + b(ne, on_not_heap_number); +} + + void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( Register first, Register second, @@ -2003,7 +2007,7 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, Register scratch2, Label* failure) { // Check that neither is a smi. - ASSERT_EQ(0, kSmiTag); + STATIC_ASSERT(kSmiTag == 0); and_(scratch1, first, Operand(second)); tst(scratch1, Operand(kSmiTagMask)); b(eq, failure); |