summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/Kconfig8
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/sh/boot/compressed/misc.c5
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/include/asm/ftrace.h13
-rw-r--r--arch/x86/include/asm/unwind_hints.h8
-rw-r--r--arch/x86/kernel/ftrace.c14
-rw-r--r--arch/x86/kernel/ftrace_64.S42
8 files changed, 86 insertions, 6 deletions
diff --git a/arch/Kconfig b/arch/Kconfig
index 8bcc1c746142..17c42bc36321 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -939,6 +939,14 @@ config RELR
config ARCH_HAS_MEM_ENCRYPT
bool
+config HAVE_SPARSE_SYSCALL_NR
+ bool
+ help
+ An architecture should select this if its syscall numbering is sparse
+ to save space. For example, MIPS architecture has a syscall array with
+ entries at 4000, 5000 and 6000 locations. This option turns on syscall
+ related optimizations for a given architecture.
+
source "kernel/gcov/Kconfig"
source "scripts/gcc-plugins/Kconfig"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index c86be02b6d89..61b9269cdd3e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -74,6 +74,7 @@ config MIPS
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
+ select HAVE_SPARSE_SYSCALL_NR
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
diff --git a/arch/sh/boot/compressed/misc.c b/arch/sh/boot/compressed/misc.c
index c15cac9251b9..e69ec12cbbe6 100644
--- a/arch/sh/boot/compressed/misc.c
+++ b/arch/sh/boot/compressed/misc.c
@@ -111,6 +111,11 @@ void __stack_chk_fail(void)
error("stack-protector: Kernel stack is corrupted\n");
}
+/* Needed because vmlinux.lds.h references this */
+void ftrace_stub(void)
+{
+}
+
#ifdef CONFIG_SUPERH64
#define stackalign 8
#else
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9c9bc348c412..58b31ee198d9 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -157,6 +157,7 @@ config X86
select HAVE_DMA_CONTIGUOUS
select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS
+ select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
select HAVE_EBPF_JIT
select HAVE_EFFICIENT_UNALIGNED_ACCESS
select HAVE_EISA
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index c38a66661576..c2a7458f912c 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -28,6 +28,19 @@ static inline unsigned long ftrace_call_adjust(unsigned long addr)
return addr;
}
+/*
+ * When a ftrace registered caller is tracing a function that is
+ * also set by a register_ftrace_direct() call, it needs to be
+ * differentiated in the ftrace_caller trampoline. To do this, we
+ * place the direct caller in the ORIG_AX part of pt_regs. This
+ * tells the ftrace_caller that there's a direct caller.
+ */
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr)
+{
+ /* Emulate a call */
+ regs->orig_ax = addr;
+}
+
#ifdef CONFIG_DYNAMIC_FTRACE
struct dyn_arch_ftrace {
diff --git a/arch/x86/include/asm/unwind_hints.h b/arch/x86/include/asm/unwind_hints.h
index 0bcdb1279361..f5e2eb12cb71 100644
--- a/arch/x86/include/asm/unwind_hints.h
+++ b/arch/x86/include/asm/unwind_hints.h
@@ -86,6 +86,14 @@
UNWIND_HINT sp_offset=\sp_offset
.endm
+.macro UNWIND_HINT_SAVE
+ UNWIND_HINT type=UNWIND_HINT_TYPE_SAVE
+.endm
+
+.macro UNWIND_HINT_RESTORE
+ UNWIND_HINT type=UNWIND_HINT_TYPE_RESTORE
+.endm
+
#else /* !__ASSEMBLY__ */
#define UNWIND_HINT(sp_reg, sp_offset, type, end) \
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 024c3053dbba..060a361d9d11 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -1043,6 +1043,20 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
return;
/*
+ * If the return location is actually pointing directly to
+ * the start of a direct trampoline (if we trace the trampoline
+ * it will still be offset by MCOUNT_INSN_SIZE), then the
+ * return address is actually off by one word, and we
+ * need to adjust for that.
+ */
+ if (ftrace_direct_func_count) {
+ if (ftrace_find_direct_func(self_addr + MCOUNT_INSN_SIZE)) {
+ self_addr = *parent;
+ parent++;
+ }
+ }
+
+ /*
* Protect against fault, even if it shouldn't
* happen. This tool is too much intrusive to
* ignore such a protection.
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 6e8961ca3605..369e61faacfe 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -85,6 +85,7 @@
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
+ movq $0, ORIG_RAX(%rsp)
/*
* Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers.
@@ -111,7 +112,11 @@
subq $MCOUNT_INSN_SIZE, %rdi
.endm
-.macro restore_mcount_regs
+.macro restore_mcount_regs save=0
+
+ /* ftrace_regs_caller or frame pointers require this */
+ movq RBP(%rsp), %rbp
+
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
@@ -120,10 +125,7 @@
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
- /* ftrace_regs_caller can modify %rbp */
- movq RBP(%rsp), %rbp
-
- addq $MCOUNT_REG_SIZE, %rsp
+ addq $MCOUNT_REG_SIZE-\save, %rsp
.endm
@@ -174,6 +176,8 @@ SYM_FUNC_START(ftrace_regs_caller)
/* Save the current flags before any operations that can change them */
pushfq
+ UNWIND_HINT_SAVE
+
/* added 8 bytes to save flags */
save_mcount_regs 8
/* save_mcount_regs fills in first two parameters */
@@ -226,7 +230,33 @@ SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
movq R10(%rsp), %r10
movq RBX(%rsp), %rbx
- restore_mcount_regs
+ movq ORIG_RAX(%rsp), %rax
+ movq %rax, MCOUNT_REG_SIZE-8(%rsp)
+
+ /* If ORIG_RAX is anything but zero, make this a call to that */
+ movq ORIG_RAX(%rsp), %rax
+ cmpq $0, %rax
+ je 1f
+
+ /* Swap the flags with orig_rax */
+ movq MCOUNT_REG_SIZE(%rsp), %rdi
+ movq %rdi, MCOUNT_REG_SIZE-8(%rsp)
+ movq %rax, MCOUNT_REG_SIZE(%rsp)
+
+ restore_mcount_regs 8
+
+ jmp 2f
+
+1: restore_mcount_regs
+
+
+2:
+ /*
+ * The stack layout is nondetermistic here, depending on which path was
+ * taken. This confuses objtool and ORC, rightfully so. For now,
+ * pretend the stack always looks like the non-direct case.
+ */
+ UNWIND_HINT_RESTORE
/* Restore flags */
popfq