summaryrefslogtreecommitdiff
path: root/deps/v8/src/mips64/assembler-mips64.cc
diff options
context:
space:
mode:
authorMichaƫl Zasso <targos@protonmail.com>2017-09-12 11:34:59 +0200
committerAnna Henningsen <anna@addaleax.net>2017-09-13 16:15:18 +0200
commitd82e1075dbc2cec2d6598ade10c1f43805f690fd (patch)
treeccd242b9b491dfc341d1099fe11b0ef528839877 /deps/v8/src/mips64/assembler-mips64.cc
parentb4b7ac6ae811b2b5a3082468115dfb5a5246fe3f (diff)
downloadnode-new-d82e1075dbc2cec2d6598ade10c1f43805f690fd.tar.gz
deps: update V8 to 6.1.534.36
PR-URL: https://github.com/nodejs/node/pull/14730 Reviewed-By: Ben Noordhuis <info@bnoordhuis.nl> Reviewed-By: Ali Ijaz Sheikh <ofrobots@google.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com> Reviewed-By: Matteo Collina <matteo.collina@gmail.com>
Diffstat (limited to 'deps/v8/src/mips64/assembler-mips64.cc')
-rw-r--r--deps/v8/src/mips64/assembler-mips64.cc313
1 files changed, 232 insertions, 81 deletions
diff --git a/deps/v8/src/mips64/assembler-mips64.cc b/deps/v8/src/mips64/assembler-mips64.cc
index 084d5db036..37c839ce95 100644
--- a/deps/v8/src/mips64/assembler-mips64.cc
+++ b/deps/v8/src/mips64/assembler-mips64.cc
@@ -37,6 +37,7 @@
#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
+#include "src/code-stubs.h"
#include "src/mips64/assembler-mips64-inl.h"
namespace v8 {
@@ -205,21 +206,27 @@ void RelocInfo::unchecked_update_wasm_size(Isolate* isolate, uint32_t size,
// Implementation of Operand and MemOperand.
// See assembler-mips-inl.h for inlined constructors.
-Operand::Operand(Handle<Object> handle) {
- AllowDeferredHandleDereference using_raw_address;
+Operand::Operand(Handle<HeapObject> handle) {
rm_ = no_reg;
- // Verify all Objects referred by code are NOT in new space.
- Object* obj = *handle;
- if (obj->IsHeapObject()) {
- imm64_ = reinterpret_cast<intptr_t>(handle.location());
- rmode_ = RelocInfo::EMBEDDED_OBJECT;
- } else {
- // No relocation needed.
- imm64_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE64;
- }
+ value_.immediate = reinterpret_cast<intptr_t>(handle.address());
+ rmode_ = RelocInfo::EMBEDDED_OBJECT;
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
}
+Operand Operand::EmbeddedCode(CodeStub* stub) {
+ Operand result(0, RelocInfo::CODE_TARGET);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(stub);
+ return result;
+}
MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
offset_ = offset;
@@ -232,6 +239,24 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
offset_ = unit * multiplier + offset_addend;
}
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber(request.heap_number(),
+ IMMUTABLE, TENURED);
+ break;
+ case HeapObjectRequest::kCodeStub:
+ request.code_stub()->set_isolate(isolate);
+ object = request.code_stub()->GetCode();
+ break;
+ }
+ Address pc = buffer_ + request.offset();
+ set_target_value_at(isolate, pc,
+ reinterpret_cast<uint64_t>(object.location()));
+ }
+}
// -----------------------------------------------------------------------------
// Specific instructions, constants, and masks.
@@ -270,8 +295,7 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
- : AssemblerBase(isolate_data, buffer, buffer_size),
- recorded_ast_id_(TypeFeedbackId::None()) {
+ : AssemblerBase(isolate_data, buffer, buffer_size) {
reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
last_trampoline_pool_end_ = 0;
@@ -287,14 +311,14 @@ Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size)
trampoline_emitted_ = FLAG_force_long_branches;
unbound_labels_count_ = 0;
block_buffer_growth_ = false;
-
- ClearRecordedAstId();
}
-
-void Assembler::GetCode(CodeDesc* desc) {
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) {
EmitForbiddenSlotInstruction();
DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
// Set up code descriptor.
desc->buffer = buffer_;
desc->buffer_size = buffer_size_;
@@ -309,7 +333,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
- DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+ DCHECK(m >= 4 && base::bits::IsPowerOfTwo(m));
EmitForbiddenSlotInstruction();
while ((pc_offset() & (m - 1)) != 0) {
nop();
@@ -461,6 +485,29 @@ const int kEndOfChain = -4;
// Determines the end of the Jump chain (a subset of the label link chain).
const int kEndOfJumpChain = 0;
+bool Assembler::IsMsaBranch(Instr instr) {
+ uint32_t opcode = GetOpcodeField(instr);
+ uint32_t rs_field = GetRsField(instr);
+ if (opcode == COP1) {
+ switch (rs_field) {
+ case BZ_V:
+ case BZ_B:
+ case BZ_H:
+ case BZ_W:
+ case BZ_D:
+ case BNZ_V:
+ case BNZ_B:
+ case BNZ_H:
+ case BNZ_W:
+ case BNZ_D:
+ return true;
+ default:
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
bool Assembler::IsBranch(Instr instr) {
uint32_t opcode = GetOpcodeField(instr);
@@ -474,7 +521,7 @@ bool Assembler::IsBranch(Instr instr) {
rt_field == BLTZAL || rt_field == BGEZAL)) ||
(opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
(opcode == COP1 && rs_field == BC1EQZ) ||
- (opcode == COP1 && rs_field == BC1NEZ);
+ (opcode == COP1 && rs_field == BC1NEZ) || IsMsaBranch(instr);
if (!isBranch && kArchVariant == kMips64r6) {
// All the 3 variants of POP10 (BOVC, BEQC, BEQZALC) and
// POP30 (BNVC, BNEC, BNEZALC) are branch ops.
@@ -1434,6 +1481,7 @@ void Assembler::bgec(Register rs, Register rt, int16_t offset) {
void Assembler::bgezal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1504,6 +1552,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1540,6 +1589,7 @@ void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
void Assembler::blezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1548,6 +1598,7 @@ void Assembler::blezalc(Register rt, int16_t offset) {
void Assembler::bgezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BLEZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1555,6 +1606,7 @@ void Assembler::bgezalc(Register rt, int16_t offset) {
void Assembler::bgezall(Register rs, int16_t offset) {
DCHECK(kArchVariant != kMips64r6);
DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rs.is(ra)));
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
BlockTrampolinePoolFor(1); // For associated delay slot.
@@ -1564,6 +1616,7 @@ void Assembler::bgezall(Register rs, int16_t offset) {
void Assembler::bltzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, rt, rt, offset, CompactBranchType::COMPACT_BRANCH);
}
@@ -1571,6 +1624,7 @@ void Assembler::bltzalc(Register rt, int16_t offset) {
void Assembler::bgtzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(BGTZ, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1579,6 +1633,7 @@ void Assembler::bgtzalc(Register rt, int16_t offset) {
void Assembler::beqzalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(ADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -1587,6 +1642,7 @@ void Assembler::beqzalc(Register rt, int16_t offset) {
void Assembler::bnezalc(Register rt, int16_t offset) {
DCHECK(kArchVariant == kMips64r6);
DCHECK(!(rt.is(zero_reg)));
+ DCHECK(!(rt.is(ra)));
GenInstrImmediate(DADDI, zero_reg, rt, offset,
CompactBranchType::COMPACT_BRANCH);
}
@@ -2062,31 +2118,130 @@ void Assembler::dlsa(Register rd, Register rt, Register rs, uint8_t sa) {
// ------------Memory-instructions-------------
-// Helper for base-reg + offset, when offset is larger than int16.
-void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
- DCHECK(!src.rm().is(at));
- DCHECK(is_int32(src.offset_));
+void Assembler::AdjustBaseAndOffset(MemOperand& src,
+ OffsetAccessType access_type,
+ int second_access_add_to_offset) {
+ // This method is used to adjust the base register and offset pair
+ // for a load/store when the offset doesn't fit into int16_t.
+ // It is assumed that 'base + offset' is sufficiently aligned for memory
+ // operands that are machine word in size or smaller. For doubleword-sized
+ // operands it's assumed that 'base' is a multiple of 8, while 'offset'
+ // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments
+ // and spilled variables on the stack accessed relative to the stack
+ // pointer register).
+ // We preserve the "alignment" of 'offset' by adjusting it by a multiple of 8.
+
+ bool doubleword_aligned = (src.offset() & (kDoubleSize - 1)) == 0;
+ bool two_accesses = static_cast<bool>(access_type) || !doubleword_aligned;
+ DCHECK(second_access_add_to_offset <= 7); // Must be <= 7.
+
+ // is_int16 must be passed a signed value, hence the static cast below.
+ if (is_int16(src.offset()) &&
+ (!two_accesses || is_int16(static_cast<int32_t>(
+ src.offset() + second_access_add_to_offset)))) {
+ // Nothing to do: 'offset' (and, if needed, 'offset + 4', or other specified
+ // value) fits into int16_t.
+ return;
+ }
+
+ DCHECK(!src.rm().is(
+ at)); // Must not overwrite the register 'base' while loading 'offset'.
- if (kArchVariant == kMips64r6) {
- int32_t hi = (src.offset_ >> kLuiShift) & kImm16Mask;
- if (src.offset_ & kNegOffset) {
- if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) {
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
- return;
- }
+#ifdef DEBUG
+ // Remember the "(mis)alignment" of 'offset', it will be checked at the end.
+ uint32_t misalignment = src.offset() & (kDoubleSize - 1);
+#endif
+
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of two 16-bit signed offsets. This can save an instruction or two.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -64KB to about +64KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForSimpleAdjustment =
+ 0x7ff8; // Max int16_t that's a multiple of 8.
+ constexpr int32_t kMaxOffsetForSimpleAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForSimpleAdjustment) {
+ daddiu(at, src.rm(), kMinOffsetForSimpleAdjustment);
+ src.offset_ -= kMinOffsetForSimpleAdjustment;
+ } else if (-kMaxOffsetForSimpleAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ daddiu(at, src.rm(), -kMinOffsetForSimpleAdjustment);
+ src.offset_ += kMinOffsetForSimpleAdjustment;
+ } else if (kArchVariant == kMips64r6) {
+ // On r6 take advantage of the daui instruction, e.g.:
+ // daui at, base, offset_high
+ // [dahi at, 1] // When `offset` is close to +2GB.
+ // lw reg_lo, offset_low(at)
+ // [lw reg_hi, (offset_low+4)(at)] // If misaligned 64-bit load.
+ // or when offset_low+4 overflows int16_t:
+ // daui at, base, offset_high
+ // daddiu at, at, 8
+ // lw reg_lo, (offset_low-8)(at)
+ // lw reg_hi, (offset_low-4)(at)
+ int16_t offset_low = static_cast<uint16_t>(src.offset());
+ int32_t offset_low32 = offset_low;
+ int16_t offset_high = static_cast<uint16_t>(src.offset() >> 16);
+ bool increment_hi16 = offset_low < 0;
+ bool overflow_hi16 = false;
+
+ if (increment_hi16) {
+ offset_high++;
+ overflow_hi16 = (offset_high == -32768);
+ }
+ daui(at, src.rm(), static_cast<uint16_t>(offset_high));
+
+ if (overflow_hi16) {
+ dahi(at, 1);
+ }
- hi += 1;
+ if (two_accesses && !is_int16(static_cast<int32_t>(
+ offset_low32 + second_access_add_to_offset))) {
+ // Avoid overflow in the 16-bit offset of the load/store instruction when
+ // adding 4.
+ daddiu(at, at, kDoubleSize);
+ offset_low32 -= kDoubleSize;
}
- daui(at, src.rm(), hi);
- daddiu(at, at, src.offset_ & kImm16Mask);
+ src.offset_ = offset_low32;
} else {
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
- ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- daddu(at, at, src.rm()); // Add base register.
+ // Do not load the whole 32-bit 'offset' if it can be represented as
+ // a sum of three 16-bit signed offsets. This can save an instruction.
+ // To simplify matters, only do this for a symmetric range of offsets from
+ // about -96KB to about +96KB, allowing further addition of 4 when accessing
+ // 64-bit variables with two 32-bit accesses.
+ constexpr int32_t kMinOffsetForMediumAdjustment =
+ 2 * kMinOffsetForSimpleAdjustment;
+ constexpr int32_t kMaxOffsetForMediumAdjustment =
+ 3 * kMinOffsetForSimpleAdjustment;
+ if (0 <= src.offset() && src.offset() <= kMaxOffsetForMediumAdjustment) {
+ daddiu(at, src.rm(), kMinOffsetForMediumAdjustment / 2);
+ daddiu(at, at, kMinOffsetForMediumAdjustment / 2);
+ src.offset_ -= kMinOffsetForMediumAdjustment;
+ } else if (-kMaxOffsetForMediumAdjustment <= src.offset() &&
+ src.offset() < 0) {
+ daddiu(at, src.rm(), -kMinOffsetForMediumAdjustment / 2);
+ daddiu(at, at, -kMinOffsetForMediumAdjustment / 2);
+ src.offset_ += kMinOffsetForMediumAdjustment;
+ } else {
+ // Now that all shorter options have been exhausted, load the full 32-bit
+ // offset.
+ int32_t loaded_offset = RoundDown(src.offset(), kDoubleSize);
+ lui(at, (loaded_offset >> kLuiShift) & kImm16Mask);
+ ori(at, at, loaded_offset & kImm16Mask); // Load 32-bit offset.
+ daddu(at, at, src.rm());
+ src.offset_ -= loaded_offset;
+ }
}
+ src.rm_ = at;
+
+ DCHECK(is_int16(src.offset()));
+ if (two_accesses) {
+ DCHECK(is_int16(
+ static_cast<int32_t>(src.offset() + second_access_add_to_offset)));
+ }
+ DCHECK(misalignment == (src.offset() & (kDoubleSize - 1)));
}
void Assembler::lb(Register rd, const MemOperand& rs) {
@@ -2849,26 +3004,30 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::madd_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_S);
+ // On Loongson 3A (MIPS64R2), MADD.S instruction is actually fused MADD.S and
+ // this causes failure in some of the tests. Since this optimization is rarely
+ // used, and not used at all on MIPS64R6, this isntruction is removed.
+ UNREACHABLE();
}
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+ // On Loongson 3A (MIPS64R2), MADD.D instruction is actually fused MADD.D and
+ // this causes failure in some of the tests. Since this optimization is rarely
+ // used, and not used at all on MIPS64R6, this isntruction is removed.
+ UNREACHABLE();
}
void Assembler::msub_s(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_S);
+ // See explanation for instruction madd_s.
+ UNREACHABLE();
}
void Assembler::msub_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
- DCHECK(kArchVariant == kMips64r2);
- GenInstrRegister(COP1X, fr, ft, fs, fd, MSUB_D);
+ // See explanation for instruction madd_d.
+ UNREACHABLE();
}
void Assembler::maddf_s(FPURegister fd, FPURegister fs, FPURegister ft) {
@@ -3253,14 +3412,17 @@ MSA_BRANCH_LIST(MSA_BRANCH)
V(st_w, ST_W) \
V(st_d, ST_D)
-#define MSA_LD_ST(name, opcode) \
- void Assembler::name(MSARegister wd, const MemOperand& rs) { \
- if (is_int10(rs.offset())) { \
- GenInstrMsaMI10(opcode, rs.offset(), rs.rm(), wd); \
- } else { \
- LoadRegPlusOffsetToAt(rs); \
- GenInstrMsaMI10(opcode, 0, at, wd); \
- } \
+#define MSA_LD_ST(name, opcode) \
+ void Assembler::name(MSARegister wd, const MemOperand& rs) { \
+ MemOperand source = rs; \
+ AdjustBaseAndOffset(source); \
+ if (is_int10(source.offset())) { \
+ GenInstrMsaMI10(opcode, source.offset(), source.rm(), wd); \
+ } else { \
+ DCHECK(!rs.rm().is(at)); \
+ daddiu(at, source.rm(), source.offset()); \
+ GenInstrMsaMI10(opcode, 0, at, wd); \
+ } \
}
MSA_LD_ST_LIST(MSA_LD_ST)
@@ -3815,9 +3977,7 @@ void Assembler::GrowBuffer() {
// Some internal data structures overflow for very large buffers,
// they must ensure that kMaximalBufferSize is not too large.
- if (desc.buffer_size > kMaximalBufferSize ||
- static_cast<size_t>(desc.buffer_size) >
- isolate_data().max_old_generation_size_) {
+ if (desc.buffer_size > kMaximalBufferSize) {
V8::FatalProcessOutOfMemory("Assembler::GrowBuffer");
}
@@ -3906,14 +4066,7 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
return;
}
DCHECK(buffer_space() >= kMaxRelocSize); // Too late to grow buffer here.
- if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
- RelocInfo reloc_info_with_ast_id(pc_, rmode, RecordedAstId().ToInt(),
- NULL);
- ClearRecordedAstId();
- reloc_info_writer.Write(&reloc_info_with_ast_id);
- } else {
- reloc_info_writer.Write(&rinfo);
- }
+ reloc_info_writer.Write(&rinfo);
}
}
@@ -4005,7 +4158,6 @@ Address Assembler::target_address_at(Address pc) {
}
// We should never get here, force a bad address if we do.
UNREACHABLE();
- return (Address)0x0;
}
@@ -4030,18 +4182,17 @@ void Assembler::QuietNaN(HeapObject* object) {
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
// and possibly removed.
-void Assembler::set_target_address_at(Isolate* isolate, Address pc,
- Address target,
- ICacheFlushMode icache_flush_mode) {
-// There is an optimization where only 4 instructions are used to load address
-// in code on MIP64 because only 48-bits of address is effectively used.
-// It relies on fact the upper [63:48] bits are not used for virtual address
-// translation and they have to be set according to value of bit 47 in order
-// get canonical address.
+void Assembler::set_target_value_at(Isolate* isolate, Address pc,
+ uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ // There is an optimization where only 4 instructions are used to load address
+ // in code on MIP64 because only 48-bits of address is effectively used.
+ // It relies on fact the upper [63:48] bits are not used for virtual address
+ // translation and they have to be set according to value of bit 47 in order
+ // get canonical address.
Instr instr1 = instr_at(pc + kInstrSize);
uint32_t rt_code = GetRt(instr1);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint64_t itarget = reinterpret_cast<uint64_t>(target);
#ifdef DEBUG
// Check we have the result from a li macro-instruction.
@@ -4056,11 +4207,11 @@ void Assembler::set_target_address_at(Isolate* isolate, Address pc,
// ori rt, rt, lower-16.
// dsll rt, rt, 16.
// ori rt rt, lower-16.
- *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
- *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
- | ((itarget >> 16) & kImm16Mask);
- *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
- | (itarget & kImm16Mask);
+ *p = LUI | (rt_code << kRtShift) | ((target >> 32) & kImm16Mask);
+ *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift) |
+ ((target >> 16) & kImm16Mask);
+ *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift) |
+ (target & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
Assembler::FlushICache(isolate, pc, 4 * Assembler::kInstrSize);