summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorvboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>2023-05-08 21:59:40 +0000
committervboxsync <vboxsync@cfe28804-0f27-0410-a406-dd0f0b0b656f>2023-05-08 21:59:40 +0000
commit68a4e3b671ae124bdabeefadc37a69208497ccca (patch)
tree02ff0ca51b75c97ffef53be5da4e0cb03d5066fd /src
parent19bc2db4a56e8947abf8d8b3e689cdac332f1cde (diff)
downloadVirtualBox-svn-68a4e3b671ae124bdabeefadc37a69208497ccca.tar.gz
VMM/IEM: Backed the iemOpHlpCalcRmEffAddrEx functionality into the regular function by extending the cbImm parameter. bugref:10369
git-svn-id: https://www.virtualbox.org/svn/vbox/trunk@99685 cfe28804-0f27-0410-a406-dd0f0b0b656f
Diffstat (limited to 'src')
-rw-r--r--src/VBox/VMM/VMMAll/IEMAll.cpp334
-rw-r--r--src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h6
-rw-r--r--src/VBox/VMM/include/IEMOpHlp.h5
3 files changed, 21 insertions, 324 deletions
diff --git a/src/VBox/VMM/VMMAll/IEMAll.cpp b/src/VBox/VMM/VMMAll/IEMAll.cpp
index 801ad78d650..00d6a5cb751 100644
--- a/src/VBox/VMM/VMMAll/IEMAll.cpp
+++ b/src/VBox/VMM/VMMAll/IEMAll.cpp
@@ -8770,12 +8770,13 @@ VBOXSTRICTRC iemMemMarkSelDescAccessed(PVMCPUCC pVCpu, uint16_t uSel) RT_NOEXCEP
* @return Strict VBox status code.
* @param pVCpu The cross context virtual CPU structure of the calling thread.
* @param bRm The ModRM byte.
- * @param cbImm The size of any immediate following the
- * effective address opcode bytes. Important for
- * RIP relative addressing.
+ * @param cbImmAndRspOffset - First byte: The size of any immediate
+ * following the effective address opcode bytes
+ * (only for RIP relative addressing).
+ * - Second byte: RSP displacement (for POP [ESP]).
* @param pGCPtrEff Where to return the effective address.
*/
-VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
+VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT
{
Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
# define SET_SS_DEF() \
@@ -8865,7 +8866,7 @@ VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, P
case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
- case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
+ case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
case 5:
if ((bRm & X86_MODRM_MOD_MASK) != 0)
{
@@ -8930,7 +8931,7 @@ VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, P
if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
{
IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
- u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
+ u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
}
else
{
@@ -8987,311 +8988,7 @@ VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, P
case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
- case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
- case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
- case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
- case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
- case 9: u64EffAddr += pVCpu->cpum.GstCtx.r9; break;
- case 10: u64EffAddr += pVCpu->cpum.GstCtx.r10; break;
- case 11: u64EffAddr += pVCpu->cpum.GstCtx.r11; break;
- case 12: u64EffAddr += pVCpu->cpum.GstCtx.r12; break;
- case 14: u64EffAddr += pVCpu->cpum.GstCtx.r14; break;
- case 15: u64EffAddr += pVCpu->cpum.GstCtx.r15; break;
- /* complicated encodings */
- case 5:
- case 13:
- if ((bRm & X86_MODRM_MOD_MASK) != 0)
- {
- if (!pVCpu->iem.s.uRexB)
- {
- u64EffAddr += pVCpu->cpum.GstCtx.rbp;
- SET_SS_DEF();
- }
- else
- u64EffAddr += pVCpu->cpum.GstCtx.r13;
- }
- else
- {
- uint32_t u32Disp;
- IEM_OPCODE_GET_NEXT_U32(&u32Disp);
- u64EffAddr += (int32_t)u32Disp;
- }
- break;
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
- break;
- }
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
-
- /* Get and add the displacement. */
- switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
- {
- case 0:
- break;
- case 1:
- {
- int8_t i8Disp;
- IEM_OPCODE_GET_NEXT_S8(&i8Disp);
- u64EffAddr += i8Disp;
- break;
- }
- case 2:
- {
- uint32_t u32Disp;
- IEM_OPCODE_GET_NEXT_U32(&u32Disp);
- u64EffAddr += (int32_t)u32Disp;
- break;
- }
- IEM_NOT_REACHED_DEFAULT_CASE_RET(); /* (caller checked for these) */
- }
-
- }
-
- if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_64BIT)
- *pGCPtrEff = u64EffAddr;
- else
- {
- Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
- *pGCPtrEff = u64EffAddr & UINT32_MAX;
- }
- }
-
- Log5(("iemOpHlpCalcRmEffAddr: EffAddr=%#010RGv\n", *pGCPtrEff));
- return VINF_SUCCESS;
-}
-
-
-/**
- * Calculates the effective address of a ModR/M memory operand.
- *
- * Meant to be used via IEM_MC_CALC_RM_EFF_ADDR.
- *
- * @return Strict VBox status code.
- * @param pVCpu The cross context virtual CPU structure of the calling thread.
- * @param bRm The ModRM byte.
- * @param cbImm The size of any immediate following the
- * effective address opcode bytes. Important for
- * RIP relative addressing.
- * @param pGCPtrEff Where to return the effective address.
- * @param offRsp RSP displacement.
- */
-VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT
-{
- Log5(("iemOpHlpCalcRmEffAddr: bRm=%#x\n", bRm));
-# define SET_SS_DEF() \
- do \
- { \
- if (!(pVCpu->iem.s.fPrefixes & IEM_OP_PRF_SEG_MASK)) \
- pVCpu->iem.s.iEffSeg = X86_SREG_SS; \
- } while (0)
-
- if (pVCpu->iem.s.enmCpuMode != IEMMODE_64BIT)
- {
-/** @todo Check the effective address size crap! */
- if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT)
- {
- uint16_t u16EffAddr;
-
- /* Handle the disp16 form with no registers first. */
- if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 6)
- IEM_OPCODE_GET_NEXT_U16(&u16EffAddr);
- else
- {
- /* Get the displacment. */
- switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
- {
- case 0: u16EffAddr = 0; break;
- case 1: IEM_OPCODE_GET_NEXT_S8_SX_U16(&u16EffAddr); break;
- case 2: IEM_OPCODE_GET_NEXT_U16(&u16EffAddr); break;
- default: AssertFailedReturn(VERR_IEM_IPE_1); /* (caller checked for these) */
- }
-
- /* Add the base and index registers to the disp. */
- switch (bRm & X86_MODRM_RM_MASK)
- {
- case 0: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.si; break;
- case 1: u16EffAddr += pVCpu->cpum.GstCtx.bx + pVCpu->cpum.GstCtx.di; break;
- case 2: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.si; SET_SS_DEF(); break;
- case 3: u16EffAddr += pVCpu->cpum.GstCtx.bp + pVCpu->cpum.GstCtx.di; SET_SS_DEF(); break;
- case 4: u16EffAddr += pVCpu->cpum.GstCtx.si; break;
- case 5: u16EffAddr += pVCpu->cpum.GstCtx.di; break;
- case 6: u16EffAddr += pVCpu->cpum.GstCtx.bp; SET_SS_DEF(); break;
- case 7: u16EffAddr += pVCpu->cpum.GstCtx.bx; break;
- }
- }
-
- *pGCPtrEff = u16EffAddr;
- }
- else
- {
- Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT);
- uint32_t u32EffAddr;
-
- /* Handle the disp32 form with no registers first. */
- if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
- IEM_OPCODE_GET_NEXT_U32(&u32EffAddr);
- else
- {
- /* Get the register (or SIB) value. */
- switch ((bRm & X86_MODRM_RM_MASK))
- {
- case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
- case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
- case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
- case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
- case 4: /* SIB */
- {
- uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
-
- /* Get the index and scale it. */
- switch ((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK)
- {
- case 0: u32EffAddr = pVCpu->cpum.GstCtx.eax; break;
- case 1: u32EffAddr = pVCpu->cpum.GstCtx.ecx; break;
- case 2: u32EffAddr = pVCpu->cpum.GstCtx.edx; break;
- case 3: u32EffAddr = pVCpu->cpum.GstCtx.ebx; break;
- case 4: u32EffAddr = 0; /*none */ break;
- case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; break;
- case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
- case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
- u32EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
-
- /* add base */
- switch (bSib & X86_SIB_BASE_MASK)
- {
- case 0: u32EffAddr += pVCpu->cpum.GstCtx.eax; break;
- case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
- case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
- case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
- case 4:
- u32EffAddr += pVCpu->cpum.GstCtx.esp + offRsp;
- SET_SS_DEF();
- break;
- case 5:
- if ((bRm & X86_MODRM_MOD_MASK) != 0)
- {
- u32EffAddr += pVCpu->cpum.GstCtx.ebp;
- SET_SS_DEF();
- }
- else
- {
- uint32_t u32Disp;
- IEM_OPCODE_GET_NEXT_U32(&u32Disp);
- u32EffAddr += u32Disp;
- }
- break;
- case 6: u32EffAddr += pVCpu->cpum.GstCtx.esi; break;
- case 7: u32EffAddr += pVCpu->cpum.GstCtx.edi; break;
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
- break;
- }
- case 5: u32EffAddr = pVCpu->cpum.GstCtx.ebp; SET_SS_DEF(); break;
- case 6: u32EffAddr = pVCpu->cpum.GstCtx.esi; break;
- case 7: u32EffAddr = pVCpu->cpum.GstCtx.edi; break;
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
-
- /* Get and add the displacement. */
- switch ((bRm >> X86_MODRM_MOD_SHIFT) & X86_MODRM_MOD_SMASK)
- {
- case 0:
- break;
- case 1:
- {
- int8_t i8Disp; IEM_OPCODE_GET_NEXT_S8(&i8Disp);
- u32EffAddr += i8Disp;
- break;
- }
- case 2:
- {
- uint32_t u32Disp; IEM_OPCODE_GET_NEXT_U32(&u32Disp);
- u32EffAddr += u32Disp;
- break;
- }
- default:
- AssertFailedReturn(VERR_IEM_IPE_2); /* (caller checked for these) */
- }
-
- }
- if (pVCpu->iem.s.enmEffAddrMode == IEMMODE_32BIT)
- *pGCPtrEff = u32EffAddr;
- else
- {
- Assert(pVCpu->iem.s.enmEffAddrMode == IEMMODE_16BIT);
- *pGCPtrEff = u32EffAddr & UINT16_MAX;
- }
- }
- }
- else
- {
- uint64_t u64EffAddr;
-
- /* Handle the rip+disp32 form with no registers first. */
- if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
- {
- IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
- u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
- }
- else
- {
- /* Get the register (or SIB) value. */
- switch ((bRm & X86_MODRM_RM_MASK) | pVCpu->iem.s.uRexB)
- {
- case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
- case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
- case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
- case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
- case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; SET_SS_DEF(); break;
- case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
- case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
- case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
- case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
- case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
- case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
- case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
- case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
- case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
- /* SIB */
- case 4:
- case 12:
- {
- uint8_t bSib; IEM_OPCODE_GET_NEXT_U8(&bSib);
-
- /* Get the index and scale it. */
- switch (((bSib >> X86_SIB_INDEX_SHIFT) & X86_SIB_INDEX_SMASK) | pVCpu->iem.s.uRexIndex)
- {
- case 0: u64EffAddr = pVCpu->cpum.GstCtx.rax; break;
- case 1: u64EffAddr = pVCpu->cpum.GstCtx.rcx; break;
- case 2: u64EffAddr = pVCpu->cpum.GstCtx.rdx; break;
- case 3: u64EffAddr = pVCpu->cpum.GstCtx.rbx; break;
- case 4: u64EffAddr = 0; /*none */ break;
- case 5: u64EffAddr = pVCpu->cpum.GstCtx.rbp; break;
- case 6: u64EffAddr = pVCpu->cpum.GstCtx.rsi; break;
- case 7: u64EffAddr = pVCpu->cpum.GstCtx.rdi; break;
- case 8: u64EffAddr = pVCpu->cpum.GstCtx.r8; break;
- case 9: u64EffAddr = pVCpu->cpum.GstCtx.r9; break;
- case 10: u64EffAddr = pVCpu->cpum.GstCtx.r10; break;
- case 11: u64EffAddr = pVCpu->cpum.GstCtx.r11; break;
- case 12: u64EffAddr = pVCpu->cpum.GstCtx.r12; break;
- case 13: u64EffAddr = pVCpu->cpum.GstCtx.r13; break;
- case 14: u64EffAddr = pVCpu->cpum.GstCtx.r14; break;
- case 15: u64EffAddr = pVCpu->cpum.GstCtx.r15; break;
- IEM_NOT_REACHED_DEFAULT_CASE_RET();
- }
- u64EffAddr <<= (bSib >> X86_SIB_SCALE_SHIFT) & X86_SIB_SCALE_SMASK;
-
- /* add base */
- switch ((bSib & X86_SIB_BASE_MASK) | pVCpu->iem.s.uRexB)
- {
- case 0: u64EffAddr += pVCpu->cpum.GstCtx.rax; break;
- case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
- case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
- case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
- case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + offRsp; SET_SS_DEF(); break;
+ case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
@@ -9377,11 +9074,12 @@ VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm,
* @return The effective address.
* @param pVCpu The cross context virtual CPU structure of the calling thread.
* @param bRm The ModRM byte.
- * @param cbImm The size of any immediate following the
- * effective address opcode bytes. Important for
- * RIP relative addressing.
+ * @param cbImmAndRspOffset - First byte: The size of any immediate
+ * following the effective address opcode bytes
+ * (only for RIP relative addressing).
+ * - Second byte: RSP displacement (for POP [ESP]).
*/
-RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP
+RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP
{
Log5(("iemOpHlpCalcRmEffAddrJmp: bRm=%#x\n", bRm));
# define SET_SS_DEF() \
@@ -9471,7 +9169,7 @@ RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM
case 1: u32EffAddr += pVCpu->cpum.GstCtx.ecx; break;
case 2: u32EffAddr += pVCpu->cpum.GstCtx.edx; break;
case 3: u32EffAddr += pVCpu->cpum.GstCtx.ebx; break;
- case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp; SET_SS_DEF(); break;
+ case 4: u32EffAddr += pVCpu->cpum.GstCtx.esp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
case 5:
if ((bRm & X86_MODRM_MOD_MASK) != 0)
{
@@ -9535,7 +9233,7 @@ RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM
if ((bRm & (X86_MODRM_MOD_MASK | X86_MODRM_RM_MASK)) == 5)
{
IEM_OPCODE_GET_NEXT_S32_SX_U64(&u64EffAddr);
- u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + cbImm;
+ u64EffAddr += pVCpu->cpum.GstCtx.rip + IEM_GET_INSTR_LEN(pVCpu) + (cbImmAndRspOffset & UINT32_C(0xff));
}
else
{
@@ -9592,7 +9290,7 @@ RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM
case 1: u64EffAddr += pVCpu->cpum.GstCtx.rcx; break;
case 2: u64EffAddr += pVCpu->cpum.GstCtx.rdx; break;
case 3: u64EffAddr += pVCpu->cpum.GstCtx.rbx; break;
- case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp; SET_SS_DEF(); break;
+ case 4: u64EffAddr += pVCpu->cpum.GstCtx.rsp + (cbImmAndRspOffset >> 8); SET_SS_DEF(); break;
case 6: u64EffAddr += pVCpu->cpum.GstCtx.rsi; break;
case 7: u64EffAddr += pVCpu->cpum.GstCtx.rdi; break;
case 8: u64EffAddr += pVCpu->cpum.GstCtx.r8; break;
diff --git a/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h b/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
index cc23324d5ec..0110b160ef4 100644
--- a/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
+++ b/src/VBox/VMM/VMMAll/IEMAllInstructionsOneByte.cpp.h
@@ -5213,9 +5213,9 @@ FNIEMOP_DEF_1(iemOp_pop_Ev, uint8_t, bRm)
VBOXSTRICTRC rcStrict;
switch (pVCpu->iem.s.enmEffOpSize)
{
- case IEMMODE_16BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 2); break;
- case IEMMODE_32BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 4); break;
- case IEMMODE_64BIT: rcStrict = iemOpHlpCalcRmEffAddrEx(pVCpu, bRm, 0, &GCPtrEff, 8); break;
+ case IEMMODE_16BIT: rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 2 << 8, &GCPtrEff); break;
+ case IEMMODE_32BIT: rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 4 << 8, &GCPtrEff); break;
+ case IEMMODE_64BIT: rcStrict = iemOpHlpCalcRmEffAddr(pVCpu, bRm, 8 << 8, &GCPtrEff); break;
IEM_NOT_REACHED_DEFAULT_CASE_RET();
}
if (rcStrict != VINF_SUCCESS)
diff --git a/src/VBox/VMM/include/IEMOpHlp.h b/src/VBox/VMM/include/IEMOpHlp.h
index 82332bf0fa3..4670594f75a 100644
--- a/src/VBox/VMM/include/IEMOpHlp.h
+++ b/src/VBox/VMM/include/IEMOpHlp.h
@@ -651,10 +651,9 @@ void iemOpStubMsg2(PVMCPUCC pVCpu) RT_NOEXCEPT;
return IEMOP_RAISE_INVALID_OPCODE(); \
} while (0)
-VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
-VBOXSTRICTRC iemOpHlpCalcRmEffAddrEx(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm, PRTGCPTR pGCPtrEff, int8_t offRsp) RT_NOEXCEPT;
+VBOXSTRICTRC iemOpHlpCalcRmEffAddr(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset, PRTGCPTR pGCPtrEff) RT_NOEXCEPT;
#ifdef IEM_WITH_SETJMP
-RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint8_t cbImm) IEM_NOEXCEPT_MAY_LONGJMP;
+RTGCPTR iemOpHlpCalcRmEffAddrJmp(PVMCPUCC pVCpu, uint8_t bRm, uint32_t cbImmAndRspOffset) IEM_NOEXCEPT_MAY_LONGJMP;
#endif
/** @} */