From 2d1b21eceaf0765d60b543b2b8e26c2f55517259 Mon Sep 17 00:00:00 2001 From: Alexander Gordeev Date: Mon, 3 Apr 2023 08:44:40 +0200 Subject: s390/kdump: remove nodat stack restriction for calling nodat functions To allow calling of DAT-off code from kernel the stack needs to be switched to nodat_stack (or other stack mapped as 1:1). Before call_nodat() macro was introduced that was necessary to provide the very same memory address for STNSM and STOSM instructions. If the kernel would stay on a random stack (e.g. a virtually mapped one) then a virtual address provided for STNSM instruction could differ from the physical address needed for the corresponding STOSM instruction. After call_nodat() macro is introduced the kernel stack does not need to be mapped 1:1 anymore, since the macro stores the physical memory address of return PSW in a register before entering DAT-off mode. This way the return LPSWE instruction is able to pick the correct memory location and restore the DAT-on mode. That however might fail in case the 16-byte return PSW happened to cross page boundary: PSW mask and PSW address could end up in two separate non-contiguous physical pages. Align the return PSW on 16-byte boundary so it always fits into a single physical page. As result any stack (including the virtually mapped one) could be used for calling DAT-off code and prior switching to nodat_stack becomes unnecessary. Signed-off-by: Alexander Gordeev Reviewed-by: Heiko Carstens Signed-off-by: Heiko Carstens Signed-off-by: Vasily Gorbik --- arch/s390/include/asm/stacktrace.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/s390/include/asm') diff --git a/arch/s390/include/asm/stacktrace.h b/arch/s390/include/asm/stacktrace.h index 1966422cf030..78f7b729b65f 100644 --- a/arch/s390/include/asm/stacktrace.h +++ b/arch/s390/include/asm/stacktrace.h @@ -210,7 +210,9 @@ static __always_inline unsigned long get_stack_pointer(struct task_struct *task, #define call_nodat(nr, rettype, fn, ...) \ ({ \ rettype (*__fn)(CALL_PARM_##nr(__VA_ARGS__)) = (fn); \ - psw_t psw_enter, psw_leave; \ + /* aligned since psw_leave must not cross page boundary */ \ + psw_t __aligned(16) psw_leave; \ + psw_t psw_enter; \ CALL_LARGS_##nr(__VA_ARGS__); \ CALL_REGS_##nr; \ \ -- cgit v1.2.1