summaryrefslogtreecommitdiff
path: root/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
diff options
context:
space:
mode:
Diffstat (limited to 'deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc')
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc304
1 files changed, 143 insertions, 161 deletions
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index 767247b2fd..ee1ef6d939 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -877,7 +877,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ CallCodeObject(reg);
} else {
@@ -925,7 +925,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasRegisterInput(instr, 0)) {
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ JumpCodeObject(reg);
} else {
@@ -962,7 +962,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
CHECK(!instr->InputAt(0)->IsImmediate());
Register reg = i.InputRegister(0);
DCHECK_IMPLIES(
- HasCallDescriptorFlag(instr, CallDescriptor::kFixedTargetRegister),
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ Jump(reg);
frame_access_state()->ClearSPDelta();
@@ -1131,8 +1131,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchDeoptimize: {
DeoptimizationExit* exit =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
- CodeGenResult result = AssembleDeoptimizerCall(exit);
- if (result != kSuccess) return result;
+ __ b(exit->label());
break;
}
case kArchRet:
@@ -1929,21 +1928,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
i.OutputRegister(0), kScratchDoubleReg);
#if V8_TARGET_ARCH_PPC64
- if (check_conversion) {
- // Set 2nd output to zero if conversion fails.
CRegister cr = cr7;
int crbit = v8::internal::Assembler::encode_crbit(
cr, static_cast<CRBit>(VXCVI % CRWIDTH));
__ mcrfs(cr, VXCVI); // extract FPSCR field containing VXCVI into cr7
+ // Handle conversion failures (such as overflow).
if (CpuFeatures::IsSupported(ISELECT)) {
- __ li(i.OutputRegister(1), Operand(1));
- __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand(1));
+ __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
+ } else {
+ __ isel(i.OutputRegister(0), r0, i.OutputRegister(0), crbit);
+ }
} else {
- __ li(i.OutputRegister(1), Operand::Zero());
- __ bc(v8::internal::kInstrSize * 2, BT, crbit);
- __ li(i.OutputRegister(1), Operand(1));
+ if (check_conversion) {
+ __ li(i.OutputRegister(1), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ li(i.OutputRegister(1), Operand(1));
+ } else {
+ __ mr(ip, i.OutputRegister(0));
+ __ li(i.OutputRegister(0), Operand::Zero());
+ __ bc(v8::internal::kInstrSize * 2, BT, crbit);
+ __ mr(i.OutputRegister(0), ip);
+ }
}
- }
#endif
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
@@ -2270,189 +2278,118 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vspltb(dst, dst, Operand(7));
break;
}
-#define SHIFT_TO_CORRECT_LANE(starting_lane_nummber, lane_input, \
- lane_width_in_bytes, input_register) \
- int shift_bits = abs(lane_input - starting_lane_nummber) * \
- lane_width_in_bytes * kBitsPerByte; \
- if (shift_bits > 0) { \
- __ li(ip, Operand(shift_bits)); \
- __ mtvsrd(kScratchDoubleReg, ip); \
- __ vspltb(kScratchDoubleReg, kScratchDoubleReg, Operand(7)); \
- if (lane_input < starting_lane_nummber) { \
- __ vsro(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } else { \
- DCHECK(lane_input > starting_lane_nummber); \
- __ vslo(kScratchDoubleReg, input_register, kScratchDoubleReg); \
- } \
- input_register = kScratchDoubleReg; \
- }
case kPPC_F64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src);
- __ mfvsrd(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovInt64ToDouble(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_F32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(kScratchReg, src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
__ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
break;
}
case kPPC_I64x2ExtractLane: {
- int32_t lane = 1 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(0, lane, 8, src)
- __ mfvsrd(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 8;
+ __ vextractd(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I32x4ExtractLane: {
- int32_t lane = 3 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(1, lane, 4, src)
- __ mfvsrwz(i.OutputRegister(), src);
+ constexpr int lane_width_in_bytes = 4;
+ __ vextractuw(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneU: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(16));
- __ srd(i.OutputRegister(), r0, ip);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I16x8ExtractLaneS: {
- int32_t lane = 7 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(2, lane, 2, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 16);
+ constexpr int lane_width_in_bytes = 2;
+ __ vextractuh(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsh(i.OutputRegister(), kScratchReg);
break;
}
case kPPC_I8x16ExtractLaneU: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(r0, src);
- __ li(ip, Operand(24));
- __ srd(i.OutputRegister(), r0, ip);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
break;
}
case kPPC_I8x16ExtractLaneS: {
- int32_t lane = 15 - i.InputInt8(1);
- Simd128Register src = i.InputSimd128Register(0);
- SHIFT_TO_CORRECT_LANE(4, lane, 1, src)
- __ mfvsrwz(kScratchReg, src);
- __ sradi(i.OutputRegister(), kScratchReg, 24);
- break;
- }
-#undef SHIFT_TO_CORRECT_LANE
-#define GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane, \
- lane_width_in_bytes) \
- uint64_t mask = 0; \
- for (int i = 0, j = 0; i <= kSimd128Size - 1; i++) { \
- mask <<= kBitsPerByte; \
- if (i >= lane * lane_width_in_bytes && \
- i < lane * lane_width_in_bytes + lane_width_in_bytes) { \
- mask |= replacement_value_byte_lane + j; \
- j++; \
- } else { \
- mask |= i; \
- } \
- if (i == (kSimd128Size / 2) - 1) { \
- __ mov(r0, Operand(mask)); \
- mask = 0; \
- } else if (i >= kSimd128Size - 1) { \
- __ mov(ip, Operand(mask)); \
- } \
- } \
- /* Need to maintain 16 byte alignment for lvx */ \
- __ mr(kScratchReg, sp); \
- __ ClearRightImm(sp, sp, Operand(base::bits::WhichPowerOfTwo(16))); \
- __ addi(sp, sp, Operand(-16)); \
- __ StoreP(ip, MemOperand(sp, 0)); \
- __ StoreP(r0, MemOperand(sp, 8)); \
- __ lvx(kScratchDoubleReg, MemOperand(r0, sp)); \
- __ mr(sp, kScratchReg);
+ __ vextractub(kScratchDoubleReg, i.InputSimd128Register(0),
+ Operand(15 - i.InputInt8(1)));
+ __ mfvsrd(kScratchReg, kScratchDoubleReg);
+ __ extsb(i.OutputRegister(), kScratchReg);
+ break;
+ }
case kPPC_F64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
+ Simd128Register dst = i.OutputSimd128Register();
__ MovDoubleToInt64(r0, i.InputDoubleRegister(2));
- __ mtvsrd(dst, r0);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_F32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(2));
- __ mtvsrd(dst, kScratchReg);
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ mtvsrd(kScratchDoubleReg, r0);
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I64x2ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 1 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 16;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 8;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertd(dst, kScratchDoubleReg,
+ Operand((1 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I32x4ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 3 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 20;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertw(dst, kScratchDoubleReg,
+ Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I16x8ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
- Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 7 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 22;
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 2;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinserth(dst, kScratchDoubleReg,
+ Operand((7 - i.InputInt8(1)) * lane_width_in_bytes));
break;
}
case kPPC_I8x16ReplaceLane: {
- Simd128Register src = i.InputSimd128Register(0);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
Simd128Register dst = i.OutputSimd128Register();
- int32_t lane = 15 - i.InputInt8(1);
- constexpr int replacement_value_byte_lane = 23;
- constexpr int lane_width_in_bytes = 1;
- GENERATE_REPLACE_LANE_MASK(lane, replacement_value_byte_lane,
- lane_width_in_bytes)
- __ mtvsrd(dst, i.InputRegister(2));
- __ vperm(dst, src, dst, kScratchDoubleReg);
+ __ mtvsrd(kScratchDoubleReg, i.InputRegister(2));
+ __ vinsertb(dst, kScratchDoubleReg, Operand(15 - i.InputInt8(1)));
break;
}
-#undef GENERATE_REPLACE_LANE_MASK
case kPPC_F64x2Add: {
__ xvadddp(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
@@ -3248,51 +3185,58 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vperm(dst, src0, src1, kScratchDoubleReg);
break;
}
- case kPPC_I16x8AddSaturateS: {
+ case kPPC_I16x8AddSatS: {
__ vaddshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateS: {
+ case kPPC_I16x8SubSatS: {
__ vsubshs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8AddSaturateU: {
+ case kPPC_I16x8AddSatU: {
__ vadduhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I16x8SubSaturateU: {
+ case kPPC_I16x8SubSatU: {
__ vsubuhs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateS: {
+ case kPPC_I8x16AddSatS: {
__ vaddsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateS: {
+ case kPPC_I8x16SubSatS: {
__ vsubsbs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16AddSaturateU: {
+ case kPPC_I8x16AddSatU: {
__ vaddubs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
- case kPPC_I8x16SubSaturateU: {
+ case kPPC_I8x16SubSatU: {
__ vsububs(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputSimd128Register(1));
break;
}
case kPPC_I8x16Swizzle: {
- // Reverse the input to match IBM lane numbering.
- Simd128Register tempFPReg1 = i.ToSimd128Register(instr->TempAt(0));
+ Simd128Register dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1),
+ tempFPReg1 = i.ToSimd128Register(instr->TempAt(0)),
+ tempFPReg2 = i.ToSimd128Register(instr->TempAt(1));
+ // Saturate the indices to 5 bits. Input indices more than 31 should
+ // return 0.
+ __ xxspltib(tempFPReg2, Operand(31));
+ __ vminub(tempFPReg2, src1, tempFPReg2);
__ addi(sp, sp, Operand(-16));
- __ stxvd(i.InputSimd128Register(0), MemOperand(r0, sp));
+ __ stxvd(src0, MemOperand(r0, sp));
__ ldbrx(r0, MemOperand(r0, sp));
__ li(ip, Operand(8));
__ ldbrx(ip, MemOperand(ip, sp));
@@ -3302,8 +3246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lxvd(kScratchDoubleReg, MemOperand(r0, sp));
__ addi(sp, sp, Operand(16));
__ vxor(tempFPReg1, tempFPReg1, tempFPReg1);
- __ vperm(i.OutputSimd128Register(), kScratchDoubleReg, tempFPReg1,
- i.InputSimd128Register(1));
+ __ vperm(dst, kScratchDoubleReg, tempFPReg1, tempFPReg2);
break;
}
case kPPC_F64x2Qfma: {
@@ -3438,6 +3381,42 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ xvrspi(i.OutputSimd128Register(), i.InputSimd128Register(0));
break;
}
+ case kPPC_I32x4BitMask: {
+ __ mov(kScratchReg,
+ Operand(0x8080808000204060)); // Select 0 for the high bits.
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I16x8BitMask: {
+ __ mov(kScratchReg, Operand(0x10203040506070));
+ __ mtvsrd(kScratchDoubleReg, kScratchReg);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractub(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I8x16BitMask: {
+ Register temp = i.ToRegister(instr->TempAt(0));
+ __ mov(temp, Operand(0x8101820283038));
+ __ mov(ip, Operand(0x4048505860687078));
+ __ mtvsrdd(kScratchDoubleReg, temp, ip);
+ __ vbpermq(kScratchDoubleReg, i.InputSimd128Register(0),
+ kScratchDoubleReg);
+ __ vextractuh(kScratchDoubleReg, kScratchDoubleReg, Operand(6));
+ __ mfvsrd(i.OutputRegister(), kScratchDoubleReg);
+ break;
+ }
+ case kPPC_I32x4DotI16x8S: {
+ __ vxor(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ vmsumshm(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), kScratchDoubleReg);
+ break;
+ }
case kPPC_StoreCompressTagged: {
ASSEMBLE_STORE_INTEGER(StoreTaggedField, StoreTaggedFieldX);
break;
@@ -3885,7 +3864,10 @@ void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
void CodeGenerator::FinishCode() {}
-void CodeGenerator::PrepareForDeoptimizationExits(int deopt_count) {}
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {
+ // __ EmitConstantPool();
+}
void CodeGenerator::AssembleMove(InstructionOperand* source,
InstructionOperand* destination) {