summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2022-07-15 07:34:55 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2022-07-23 12:54:13 +0200
commit5f4e77cc9abf65c4593cd255d8c3cd6c5d0ca7f6 (patch)
tree2e240c99cc316c5e076d921bbf10faa029bd93ac
parent198685e58b134679e6079d872396f4352780282f (diff)
downloadlinux-rt-5f4e77cc9abf65c4593cd255d8c3cd6c5d0ca7f6.tar.gz
KVM: emulate: do not adjust size of fastop and setcc subroutines
commit 79629181607e801c0b41b8790ac4ee2eb5d7bc3e upstream. Instead of doing complicated calculations to find the size of the subroutines (which are even more complicated because they need to be stringified into an asm statement), just hardcode to 16. It is less dense for a few combinations of IBT/SLS/retbleed, but it has the advantage of being really simple. Cc: stable@vger.kernel.org # 5.15.x: 84e7051c0bc1: x86/kvm: fix FASTOP_SIZE when return thunks are enabled Cc: stable@vger.kernel.org Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--arch/x86/kvm/emulate.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index b3d30f591113..318a78379ca6 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -187,13 +187,6 @@
#define X8(x...) X4(x), X4(x)
#define X16(x...) X8(x), X8(x)
-#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
-#define RET_LENGTH (1 + (4 * IS_ENABLED(CONFIG_RETHUNK)) + \
- IS_ENABLED(CONFIG_SLS))
-#define FASTOP_LENGTH (ENDBR_INSN_SIZE + 7 + RET_LENGTH)
-#define FASTOP_SIZE (8 << ((FASTOP_LENGTH > 8) & 1) << ((FASTOP_LENGTH > 16) & 1))
-static_assert(FASTOP_LENGTH <= FASTOP_SIZE);
-
struct opcode {
u64 flags : 56;
u64 intercept : 8;
@@ -307,9 +300,15 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
* different operand sizes can be reached by calculation, rather than a jump
* table (which would be bigger than the code).
+ *
+ * The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
+ * and 1 for the straight line speculation INT3, leaves 7 bytes for the
+ * body of the function. Currently none is larger than 4.
*/
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
+#define FASTOP_SIZE 16
+
#define __FOP_FUNC(name) \
".align " __stringify(FASTOP_SIZE) " \n\t" \
".type " name ", @function \n\t" \
@@ -441,9 +440,7 @@ static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
* RET | JMP __x86_return_thunk [1,5 bytes; CONFIG_RETHUNK]
* INT3 [1 byte; CONFIG_SLS]
*/
-#define SETCC_LENGTH (3 + RET_LENGTH)
-#define SETCC_ALIGN (4 << ((SETCC_LENGTH > 4) & 1) << ((SETCC_LENGTH > 8) & 1))
-static_assert(SETCC_LENGTH <= SETCC_ALIGN);
+#define SETCC_ALIGN 16
#define FOP_SETCC(op) \
".align " __stringify(SETCC_ALIGN) " \n\t" \