From 8a3c5a9941320ca46419ee5d44c662c4cde3b711 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Tue, 7 Jun 2022 18:24:58 +0200 Subject: [ANNOUNCE] v5.19-rc1-rt1 Dear RT folks! I'm pleased to announce the v5.19-rc1-rt1 patch set. Changes since v5.18-rt11: - Rebase to v5.19-rc1. Known issues - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.19-rc1-rt1 The RT patch against v5.19-rc1 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.19/older/patch-5.19-rc1-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.19/older/patches-5.19-rc1-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior --- patches/0001-printk-rename-cpulock-functions.patch | 294 ---------- ...0001-sched-Fix-missing-prototype-warnings.patch | 141 ----- ...scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch | 69 --- ...nal-Rename-send_signal-send_signal_locked.patch | 123 ---- ...printk-cpu-sync-always-disable-interrupts.patch | 53 -- ...se-per-CPU-API-to-update-per-CPU-statisti.patch | 516 ----------------- ...ace-__group_send_sig_info-with-send_signa.patch | 96 ---- ...-Rename-flush_smp_call_function_from_idle.patch | 125 ----- ...d-missing-memory-barrier-to-wake_up_klogd.patch | 89 --- ...-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch | 133 ----- ...Remove-get_cpu-semantics-in-fc_exch_em_al.patch | 43 -- ...ftirq-handling-RT-safe-in-flush_smp_call_.patch | 106 ---- ...n-t-disable-interrupts-on-PREEMPT_RT-duri.patch | 10 +- patches/0004-printk-wake-up-all-waiters.patch | 27 - ...sa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch | 77 --- ...c-Avoid-using-get_cpu-in-bnx2fc_cmd_alloc.patch | 55 -- ...n-t-check-for-atomic-context-on-PREEMPT_R.patch | 2 +- ...tk-wake-waiters-for-safe-and-NMI-contexts.patch | 86 --- .../0005-ptrace-Remove-arch_ptrace_attach.patch | 157 ------ ...caller_id-timestamp-after-migration-disab.patch | 66 --- ...lockdep_assert_held-instead-of-assert_spi.patch | 39 -- ...intk-call-boot_delay_msec-in-printk_delay.patch | 41 -- ...plement-PTRACE_KILL-by-always-sending-SIG.patch | 65 --- ...5-gt-Queue-and-wait-for-the-irq_work-item.patch | 2 +- ...-add-con_printk-macro-for-console-details.patch | 55 -- ...cument-that-wait_task_inactive-can-t-fail.patch | 65 --- ...-Use-spin_lock_irq-instead-of-local_irq_d.patch | 12 +- ...printk-refactor-and-rework-printing-logic.patch | 617 --------------------- ...t-ptrace_stop-can-generate-spuriuos-SIGTR.patch | 197 ------- ...-buffer-definitions-into-console_emit_nex.patch | 183 ------ patches/0010-ptrace-Don-t-change-__state.patch | 182 ------ patches/0011-printk-add-pr_flush.patch | 184 ------ ...race-Always-take-siglock-in-ptrace_resume.patch | 46 -- ...k-add-functions-to-prefer-direct-printing.patch | 330 ----------- ...l-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch | 212 ------- .../0013-printk-add-kthread-console-printers.patch | 530 ------------------ ...tend-console_lock-for-per-console-locking.patch | 584 ------------------- patches/0015-printk-remove-console_locked.patch | 96 ---- ...ntk-add-infrastucture-for-atomic-consoles.patch | 52 +- .../0017-serial-8250-implement-write_atomic.patch | 105 ++-- ...intk-avoid-preempt_disable-for-PREEMPT_RT.patch | 10 +- patches/ARM64__Allow_to_enable_RT.patch | 4 +- patches/Add_localversion_for_-RT_release.patch | 2 +- patches/POWERPC__Allow_to_enable_RT.patch | 4 +- patches/Revert-drm-i915-Depend-on-PREEMPT_RT.patch | 2 +- ...t-disable-preemption-while-calling-svc_po.patch | 55 -- patches/arch_arm64__Add_lazy_preempt_support.patch | 8 +- ...ke-arch_faults_on_old_pte-check-for-migra.patch | 36 -- ...elay-freeing-memory-in-fpsimd_flush_threa.patch | 43 -- ...ve-Make-kernel-FPU-protection-RT-friendly.patch | 56 -- patches/arm__Add_support_for_lazy_preemption.patch | 4 +- ...t-disable-preemption-around-__blk_mq_run_.patch | 2 +- ...td-Protect-per-CPU-resource-by-disabling-.patch | 89 --- ...eplace_bit_spinlocks_with_rtmutex_for_-rt.patch | 2 +- patches/entry--Fix-the-preempt-lazy-fallout.patch | 2 +- ...rq-Provide-generic_handle_domain_irq_safe.patch | 6 +- ...sim-Make-the-irq_work-always-run-in-hard-.patch | 37 -- ...l-Prevent-softirq-pending-leak-in-irq_pol.patch | 49 -- ...ockdep-Use-sched_clock-for-random-numbers.patch | 35 ++ ...e-the-stackdepot-related-allocation-out-o.patch | 91 +++ ...Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch | 58 -- .../powerpc__Add_support_for_lazy_preemption.patch | 2 +- ...sks-Use-rcuwait-for-the-rcu_tasks_kthread.patch | 76 --- ...se-schedule_hrtimeout_range-while-waiting.patch | 42 -- ...Also-force-sched-priority-to-timersd-on-b.patch | 4 +- ...der-task_struct-saved_state-in-wait_task_.patch | 8 +- .../sched__Add_support_for_lazy_preemption.patch | 56 +- patches/series | 52 +- patches/signal__Revert_ptrace_preempt_magic.patch | 2 +- ...tirq-Disable-softirq-stacks-on-PREEMPT_RT.patch | 6 +- ...-Use-a-dedicated-thread-for-timer-wakeups.patch | 4 +- ...timer-storm-since-introduction-of-timersd.patch | 4 +- patches/tpm_tis__fix_stall_after_iowrites.patch | 31 +- ...serial_pl011__Make_the_locking_work_on_RT.patch | 4 +- patches/x86__Support_for_lazy_preemption.patch | 14 +- 75 files changed, 310 insertions(+), 6455 deletions(-) delete mode 100644 patches/0001-printk-rename-cpulock-functions.patch delete mode 100644 patches/0001-sched-Fix-missing-prototype-warnings.patch delete mode 100644 patches/0001-scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch delete mode 100644 patches/0001-signal-Rename-send_signal-send_signal_locked.patch delete mode 100644 patches/0002-printk-cpu-sync-always-disable-interrupts.patch delete mode 100644 patches/0002-scsi-fcoe-Use-per-CPU-API-to-update-per-CPU-statisti.patch delete mode 100644 patches/0002-signal-Replace-__group_send_sig_info-with-send_signa.patch delete mode 100644 patches/0002-smp-Rename-flush_smp_call_function_from_idle.patch delete mode 100644 patches/0003-printk-add-missing-memory-barrier-to-wake_up_klogd.patch delete mode 100644 patches/0003-ptrace-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch delete mode 100644 patches/0003-scsi-libfc-Remove-get_cpu-semantics-in-fc_exch_em_al.patch delete mode 100644 patches/0003-smp-Make-softirq-handling-RT-safe-in-flush_smp_call_.patch delete mode 100644 patches/0004-printk-wake-up-all-waiters.patch delete mode 100644 patches/0004-ptrace-xtensa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch delete mode 100644 patches/0004-scsi-bnx2fc-Avoid-using-get_cpu-in-bnx2fc_cmd_alloc.patch delete mode 100644 patches/0005-printk-wake-waiters-for-safe-and-NMI-contexts.patch delete mode 100644 patches/0005-ptrace-Remove-arch_ptrace_attach.patch delete mode 100644 patches/0006-printk-get-caller_id-timestamp-after-migration-disab.patch delete mode 100644 patches/0006-signal-Use-lockdep_assert_held-instead-of-assert_spi.patch delete mode 100644 patches/0007-printk-call-boot_delay_msec-in-printk_delay.patch delete mode 100644 patches/0007-ptrace-Reimplement-PTRACE_KILL-by-always-sending-SIG.patch delete mode 100644 patches/0008-printk-add-con_printk-macro-for-console-details.patch delete mode 100644 patches/0008-ptrace-Document-that-wait_task_inactive-can-t-fail.patch delete mode 100644 patches/0009-printk-refactor-and-rework-printing-logic.patch delete mode 100644 patches/0009-ptrace-Admit-ptrace_stop-can-generate-spuriuos-SIGTR.patch delete mode 100644 patches/0010-printk-move-buffer-definitions-into-console_emit_nex.patch delete mode 100644 patches/0010-ptrace-Don-t-change-__state.patch delete mode 100644 patches/0011-printk-add-pr_flush.patch delete mode 100644 patches/0011-ptrace-Always-take-siglock-in-ptrace_resume.patch delete mode 100644 patches/0012-printk-add-functions-to-prefer-direct-printing.patch delete mode 100644 patches/0012-sched-signal-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch delete mode 100644 patches/0013-printk-add-kthread-console-printers.patch delete mode 100644 patches/0014-printk-extend-console_lock-for-per-console-locking.patch delete mode 100644 patches/0015-printk-remove-console_locked.patch delete mode 100644 patches/SUNRPC-Don-t-disable-preemption-while-calling-svc_po.patch delete mode 100644 patches/arm64-mm-Make-arch_faults_on_old_pte-check-for-migra.patch delete mode 100644 patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch delete mode 100644 patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch delete mode 100644 patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch delete mode 100644 patches/genirq-irq_sim-Make-the-irq_work-always-run-in-hard-.patch delete mode 100644 patches/lib-irq_poll-Prevent-softirq-pending-leak-in-irq_pol.patch create mode 100644 patches/locking-lockdep-Use-sched_clock-for-random-numbers.patch create mode 100644 patches/mm-slub-Move-the-stackdepot-related-allocation-out-o.patch delete mode 100644 patches/mm-vmalloc-Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch delete mode 100644 patches/rcu-tasks-Use-rcuwait-for-the-rcu_tasks_kthread.patch delete mode 100644 patches/rcu-tasks-Use-schedule_hrtimeout_range-while-waiting.patch diff --git a/patches/0001-printk-rename-cpulock-functions.patch b/patches/0001-printk-rename-cpulock-functions.patch deleted file mode 100644 index dd8a16c132cf..000000000000 --- a/patches/0001-printk-rename-cpulock-functions.patch +++ /dev/null @@ -1,294 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:23 +0206 -Subject: [PATCH 01/18] printk: rename cpulock functions - -Since the printk cpulock is CPU-reentrant and since it is used -in all contexts, its usage must be carefully considered and -most likely will require programming locklessly. To avoid -mistaking the printk cpulock as a typical lock, rename it to -cpu_sync. The main functions then become: - - printk_cpu_sync_get_irqsave(flags); - printk_cpu_sync_put_irqrestore(flags); - -Add extra notes of caution in the function description to help -developers understand the requirements for correct usage. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-2-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/printk.h | 54 ++++++++++++++++++++++--------------- - kernel/printk/printk.c | 71 ++++++++++++++++++++++++------------------------- - lib/dump_stack.c | 4 +- - lib/nmi_backtrace.c | 4 +- - 4 files changed, 73 insertions(+), 60 deletions(-) - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -277,43 +277,55 @@ static inline void printk_trigger_flush( - #endif - - #ifdef CONFIG_SMP --extern int __printk_cpu_trylock(void); --extern void __printk_wait_on_cpu_lock(void); --extern void __printk_cpu_unlock(void); -+extern int __printk_cpu_sync_try_get(void); -+extern void __printk_cpu_sync_wait(void); -+extern void __printk_cpu_sync_put(void); - - /** -- * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning -- * lock and disable interrupts. -+ * printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning -+ * lock and disable interrupts. - * @flags: Stack-allocated storage for saving local interrupt state, -- * to be passed to printk_cpu_unlock_irqrestore(). -+ * to be passed to printk_cpu_sync_put_irqrestore(). - * - * If the lock is owned by another CPU, spin until it becomes available. - * Interrupts are restored while spinning. -+ * -+ * CAUTION: This function must be used carefully. It does not behave like a -+ * typical lock. Here are important things to watch out for... -+ * -+ * * This function is reentrant on the same CPU. Therefore the calling -+ * code must not assume exclusive access to data if code accessing the -+ * data can run reentrant or within NMI context on the same CPU. -+ * -+ * * If there exists usage of this function from NMI context, it becomes -+ * unsafe to perform any type of locking or spinning to wait for other -+ * CPUs after calling this function from any context. This includes -+ * using spinlocks or any other busy-waiting synchronization methods. - */ --#define printk_cpu_lock_irqsave(flags) \ -- for (;;) { \ -- local_irq_save(flags); \ -- if (__printk_cpu_trylock()) \ -- break; \ -- local_irq_restore(flags); \ -- __printk_wait_on_cpu_lock(); \ -+#define printk_cpu_sync_get_irqsave(flags) \ -+ for (;;) { \ -+ local_irq_save(flags); \ -+ if (__printk_cpu_sync_try_get()) \ -+ break; \ -+ local_irq_restore(flags); \ -+ __printk_cpu_sync_wait(); \ - } - - /** -- * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning -- * lock and restore interrupts. -- * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave(). -+ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning -+ * lock and restore interrupts. -+ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave(). - */ --#define printk_cpu_unlock_irqrestore(flags) \ -+#define printk_cpu_sync_put_irqrestore(flags) \ - do { \ -- __printk_cpu_unlock(); \ -+ __printk_cpu_sync_put(); \ - local_irq_restore(flags); \ -- } while (0) \ -+ } while (0) - - #else - --#define printk_cpu_lock_irqsave(flags) ((void)flags) --#define printk_cpu_unlock_irqrestore(flags) ((void)flags) -+#define printk_cpu_sync_get_irqsave(flags) ((void)flags) -+#define printk_cpu_sync_put_irqrestore(flags) ((void)flags) - - #endif /* CONFIG_SMP */ - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -3667,26 +3667,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind); - #endif - - #ifdef CONFIG_SMP --static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1); --static atomic_t printk_cpulock_nested = ATOMIC_INIT(0); -+static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1); -+static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0); - - /** -- * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant -- * spinning lock is not owned by any CPU. -+ * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant -+ * spinning lock is not owned by any CPU. - * - * Context: Any context. - */ --void __printk_wait_on_cpu_lock(void) -+void __printk_cpu_sync_wait(void) - { - do { - cpu_relax(); -- } while (atomic_read(&printk_cpulock_owner) != -1); -+ } while (atomic_read(&printk_cpu_sync_owner) != -1); - } --EXPORT_SYMBOL(__printk_wait_on_cpu_lock); -+EXPORT_SYMBOL(__printk_cpu_sync_wait); - - /** -- * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant -- * spinning lock. -+ * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant -+ * spinning lock. - * - * If no processor has the lock, the calling processor takes the lock and - * becomes the owner. If the calling processor is already the owner of the -@@ -3695,7 +3695,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock) - * Context: Any context. Expects interrupts to be disabled. - * Return: 1 on success, otherwise 0. - */ --int __printk_cpu_trylock(void) -+int __printk_cpu_sync_try_get(void) - { - int cpu; - int old; -@@ -3705,79 +3705,80 @@ int __printk_cpu_trylock(void) - /* - * Guarantee loads and stores from this CPU when it is the lock owner - * are _not_ visible to the previous lock owner. This pairs with -- * __printk_cpu_unlock:B. -+ * __printk_cpu_sync_put:B. - * - * Memory barrier involvement: - * -- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then -- * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B. -+ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, -+ * then __printk_cpu_sync_put:A can never read from -+ * __printk_cpu_sync_try_get:B. - * - * Relies on: - * -- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B -+ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B - * of the previous CPU - * matching -- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B -- * of this CPU -+ * ACQUIRE from __printk_cpu_sync_try_get:A to -+ * __printk_cpu_sync_try_get:B of this CPU - */ -- old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1, -- cpu); /* LMM(__printk_cpu_trylock:A) */ -+ old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1, -+ cpu); /* LMM(__printk_cpu_sync_try_get:A) */ - if (old == -1) { - /* - * This CPU is now the owner and begins loading/storing -- * data: LMM(__printk_cpu_trylock:B) -+ * data: LMM(__printk_cpu_sync_try_get:B) - */ - return 1; - - } else if (old == cpu) { - /* This CPU is already the owner. */ -- atomic_inc(&printk_cpulock_nested); -+ atomic_inc(&printk_cpu_sync_nested); - return 1; - } - - return 0; - } --EXPORT_SYMBOL(__printk_cpu_trylock); -+EXPORT_SYMBOL(__printk_cpu_sync_try_get); - - /** -- * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock. -+ * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock. - * - * The calling processor must be the owner of the lock. - * - * Context: Any context. Expects interrupts to be disabled. - */ --void __printk_cpu_unlock(void) -+void __printk_cpu_sync_put(void) - { -- if (atomic_read(&printk_cpulock_nested)) { -- atomic_dec(&printk_cpulock_nested); -+ if (atomic_read(&printk_cpu_sync_nested)) { -+ atomic_dec(&printk_cpu_sync_nested); - return; - } - - /* - * This CPU is finished loading/storing data: -- * LMM(__printk_cpu_unlock:A) -+ * LMM(__printk_cpu_sync_put:A) - */ - - /* - * Guarantee loads and stores from this CPU when it was the - * lock owner are visible to the next lock owner. This pairs -- * with __printk_cpu_trylock:A. -+ * with __printk_cpu_sync_try_get:A. - * - * Memory barrier involvement: - * -- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, -- * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A. -+ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B, -+ * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A. - * - * Relies on: - * -- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B -+ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B - * of this CPU - * matching -- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B -- * of the next CPU -+ * ACQUIRE from __printk_cpu_sync_try_get:A to -+ * __printk_cpu_sync_try_get:B of the next CPU - */ -- atomic_set_release(&printk_cpulock_owner, -- -1); /* LMM(__printk_cpu_unlock:B) */ -+ atomic_set_release(&printk_cpu_sync_owner, -+ -1); /* LMM(__printk_cpu_sync_put:B) */ - } --EXPORT_SYMBOL(__printk_cpu_unlock); -+EXPORT_SYMBOL(__printk_cpu_sync_put); - #endif /* CONFIG_SMP */ ---- a/lib/dump_stack.c -+++ b/lib/dump_stack.c -@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl - * Permit this cpu to perform nested stack dumps while serialising - * against other CPUs - */ -- printk_cpu_lock_irqsave(flags); -+ printk_cpu_sync_get_irqsave(flags); - __dump_stack(log_lvl); -- printk_cpu_unlock_irqrestore(flags); -+ printk_cpu_sync_put_irqrestore(flags); - } - EXPORT_SYMBOL(dump_stack_lvl); - ---- a/lib/nmi_backtrace.c -+++ b/lib/nmi_backtrace.c -@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r - * Allow nested NMI backtraces while serializing - * against other CPUs. - */ -- printk_cpu_lock_irqsave(flags); -+ printk_cpu_sync_get_irqsave(flags); - if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) { - pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n", - cpu, (void *)instruction_pointer(regs)); -@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r - else - dump_stack(); - } -- printk_cpu_unlock_irqrestore(flags); -+ printk_cpu_sync_put_irqrestore(flags); - cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); - return true; - } diff --git a/patches/0001-sched-Fix-missing-prototype-warnings.patch b/patches/0001-sched-Fix-missing-prototype-warnings.patch deleted file mode 100644 index dee84b170761..000000000000 --- a/patches/0001-sched-Fix-missing-prototype-warnings.patch +++ /dev/null @@ -1,141 +0,0 @@ -From: Thomas Gleixner -Date: Wed, 13 Apr 2022 15:31:02 +0200 -Subject: [PATCH 1/3] sched: Fix missing prototype warnings - -A W=1 build emits more than a dozen missing prototype warnings related to -scheduler and scheduler specific includes. - -Reported-by: kernel test robot -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220413133024.249118058@linutronix.de ---- - include/linux/sched.h | 2 ++ - kernel/sched/build_policy.c | 2 ++ - kernel/sched/build_utility.c | 1 + - kernel/sched/core.c | 3 +++ - kernel/sched/deadline.c | 2 -- - kernel/sched/fair.c | 1 + - kernel/sched/sched.h | 8 ++------ - kernel/sched/smp.h | 6 ++++++ - kernel/stop_machine.c | 2 -- - 9 files changed, 17 insertions(+), 10 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -2362,4 +2362,6 @@ static inline void sched_core_free(struc - static inline void sched_core_fork(struct task_struct *p) { } - #endif - -+extern void sched_set_stop_task(int cpu, struct task_struct *stop); -+ - #endif ---- a/kernel/sched/build_policy.c -+++ b/kernel/sched/build_policy.c -@@ -15,6 +15,7 @@ - /* Headers: */ - #include - #include -+#include - #include - #include - -@@ -31,6 +32,7 @@ - #include - - #include "sched.h" -+#include "smp.h" - - #include "autogroup.h" - #include "stats.h" ---- a/kernel/sched/build_utility.c -+++ b/kernel/sched/build_utility.c -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -26,7 +26,10 @@ - #include - #include - #include -+#include - #include -+#include -+#include - #include - #include - #include ---- a/kernel/sched/deadline.c -+++ b/kernel/sched/deadline.c -@@ -1220,8 +1220,6 @@ int dl_runtime_exceeded(struct sched_dl_ - return (dl_se->runtime <= 0); - } - --extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); -- - /* - * This function implements the GRUB accounting rule: - * according to the GRUB reclaiming algorithm, the runtime is ---- a/kernel/sched/fair.c -+++ b/kernel/sched/fair.c -@@ -36,6 +36,7 @@ - #include - #include - #include -+#include - - #include - #include ---- a/kernel/sched/sched.h -+++ b/kernel/sched/sched.h -@@ -1827,12 +1827,7 @@ static inline void dirty_sched_domain_sy - #endif - - extern int sched_update_scaling(void); -- --extern void flush_smp_call_function_from_idle(void); -- --#else /* !CONFIG_SMP: */ --static inline void flush_smp_call_function_from_idle(void) { } --#endif -+#endif /* CONFIG_SMP */ - - #include "stats.h" - -@@ -2309,6 +2304,7 @@ extern void resched_cpu(int cpu); - - extern struct rt_bandwidth def_rt_bandwidth; - extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); -+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); - - extern void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime); - extern void init_dl_task_timer(struct sched_dl_entity *dl_se); ---- a/kernel/sched/smp.h -+++ b/kernel/sched/smp.h -@@ -7,3 +7,9 @@ - extern void sched_ttwu_pending(void *arg); - - extern void send_call_function_single_ipi(int cpu); -+ -+#ifdef CONFIG_SMP -+extern void flush_smp_call_function_from_idle(void); -+#else -+static inline void flush_smp_call_function_from_idle(void) { } -+#endif ---- a/kernel/stop_machine.c -+++ b/kernel/stop_machine.c -@@ -535,8 +535,6 @@ void stop_machine_park(int cpu) - kthread_park(stopper->thread); - } - --extern void sched_set_stop_task(int cpu, struct task_struct *stop); -- - static void cpu_stop_create(unsigned int cpu) - { - sched_set_stop_task(cpu, per_cpu(cpu_stopper.thread, cpu)); diff --git a/patches/0001-scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch b/patches/0001-scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch deleted file mode 100644 index 075584b5dc47..000000000000 --- a/patches/0001-scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch +++ /dev/null @@ -1,69 +0,0 @@ -From: Davidlohr Bueso -Date: Fri, 6 May 2022 12:57:55 +0200 -Subject: [PATCH 1/4] scsi: fcoe: Add a local_lock to fcoe_percpu - -fcoe_get_paged_crc_eof() relies on the caller having preemption -disabled to ensure the per-CPU fcoe_percpu context remains valid -throughout the call. This is done by either holding spinlocks -(such as bnx2fc_global_lock or qedf_global_lock) or the get_cpu() -from fcoe_alloc_paged_crc_eof(). This last one breaks PREEMPT_RT -semantics as there can be memory allocation and end up sleeping -in atomic contexts. - -Introduce a local_lock_t to struct fcoe_percpu that will keep the -non-RT case the same, mapping to preempt_disable/enable, while -RT will use a per-CPU spinlock allowing the region to be preemptible -but still maintain CPU locality. The other users of fcoe_percpu -are already safe in this regard and do not require local_lock()ing. - -Signed-off-by: Davidlohr Bueso -Acked-by: Sebastian Andrzej Siewior -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20211117025956.79616-3-dave@stgolabs.net -Link: https://lore.kernel.org/r/20220506105758.283887-2-bigeasy@linutronix.de ---- - drivers/scsi/fcoe/fcoe.c | 6 ++++-- - include/scsi/libfcoe.h | 2 ++ - 2 files changed, 6 insertions(+), 2 deletions(-) - ---- a/drivers/scsi/fcoe/fcoe.c -+++ b/drivers/scsi/fcoe/fcoe.c -@@ -1453,9 +1453,10 @@ static int fcoe_alloc_paged_crc_eof(stru - struct fcoe_percpu_s *fps; - int rc; - -- fps = &get_cpu_var(fcoe_percpu); -+ local_lock(&fcoe_percpu.lock); -+ fps = this_cpu_ptr(&fcoe_percpu); - rc = fcoe_get_paged_crc_eof(skb, tlen, fps); -- put_cpu_var(fcoe_percpu); -+ local_unlock(&fcoe_percpu.lock); - - return rc; - } -@@ -2488,6 +2489,7 @@ static int __init fcoe_init(void) - p = per_cpu_ptr(&fcoe_percpu, cpu); - INIT_WORK(&p->work, fcoe_receive_work); - skb_queue_head_init(&p->fcoe_rx_list); -+ local_lock_init(&p->lock); - } - - /* Setup link change notification */ ---- a/include/scsi/libfcoe.h -+++ b/include/scsi/libfcoe.h -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -326,6 +327,7 @@ struct fcoe_percpu_s { - struct sk_buff_head fcoe_rx_list; - struct page *crc_eof_page; - int crc_eof_offset; -+ local_lock_t lock; - }; - - /** diff --git a/patches/0001-signal-Rename-send_signal-send_signal_locked.patch b/patches/0001-signal-Rename-send_signal-send_signal_locked.patch deleted file mode 100644 index 2f1ba470b873..000000000000 --- a/patches/0001-signal-Rename-send_signal-send_signal_locked.patch +++ /dev/null @@ -1,123 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:34 -0500 -Subject: [PATCH 01/12] signal: Rename send_signal send_signal_locked - -Rename send_signal and __send_signal to send_signal_locked and -__send_signal_locked to make send_signal usable outside of -signal.c. - -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-1-ebiederm@xmission.com ---- - include/linux/signal.h | 2 ++ - kernel/signal.c | 24 ++++++++++++------------ - 2 files changed, 14 insertions(+), 12 deletions(-) - ---- a/include/linux/signal.h -+++ b/include/linux/signal.h -@@ -283,6 +283,8 @@ extern int do_send_sig_info(int sig, str - extern int group_send_sig_info(int sig, struct kernel_siginfo *info, - struct task_struct *p, enum pid_type type); - extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); -+extern int send_signal_locked(int sig, struct kernel_siginfo *info, -+ struct task_struct *p, enum pid_type type); - extern int sigprocmask(int, sigset_t *, sigset_t *); - extern void set_current_blocked(sigset_t *); - extern void __set_current_blocked(const sigset_t *); ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1071,8 +1071,8 @@ static inline bool legacy_queue(struct s - return (sig < SIGRTMIN) && sigismember(&signals->signal, sig); - } - --static int __send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, -- enum pid_type type, bool force) -+static int __send_signal_locked(int sig, struct kernel_siginfo *info, -+ struct task_struct *t, enum pid_type type, bool force) - { - struct sigpending *pending; - struct sigqueue *q; -@@ -1212,8 +1212,8 @@ static inline bool has_si_pid_and_uid(st - return ret; - } - --static int send_signal(int sig, struct kernel_siginfo *info, struct task_struct *t, -- enum pid_type type) -+int send_signal_locked(int sig, struct kernel_siginfo *info, -+ struct task_struct *t, enum pid_type type) - { - /* Should SIGKILL or SIGSTOP be received by a pid namespace init? */ - bool force = false; -@@ -1245,7 +1245,7 @@ static int send_signal(int sig, struct k - force = true; - } - } -- return __send_signal(sig, info, t, type, force); -+ return __send_signal_locked(sig, info, t, type, force); - } - - static void print_fatal_signal(int signr) -@@ -1284,7 +1284,7 @@ static int __init setup_print_fatal_sign - int - __group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) - { -- return send_signal(sig, info, p, PIDTYPE_TGID); -+ return send_signal_locked(sig, info, p, PIDTYPE_TGID); - } - - int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, -@@ -1294,7 +1294,7 @@ int do_send_sig_info(int sig, struct ker - int ret = -ESRCH; - - if (lock_task_sighand(p, &flags)) { -- ret = send_signal(sig, info, p, type); -+ ret = send_signal_locked(sig, info, p, type); - unlock_task_sighand(p, &flags); - } - -@@ -1347,7 +1347,7 @@ force_sig_info_to_task(struct kernel_sig - if (action->sa.sa_handler == SIG_DFL && - (!t->ptrace || (handler == HANDLER_EXIT))) - t->signal->flags &= ~SIGNAL_UNKILLABLE; -- ret = send_signal(sig, info, t, PIDTYPE_PID); -+ ret = send_signal_locked(sig, info, t, PIDTYPE_PID); - spin_unlock_irqrestore(&t->sighand->siglock, flags); - - return ret; -@@ -1567,7 +1567,7 @@ int kill_pid_usb_asyncio(int sig, int er - - if (sig) { - if (lock_task_sighand(p, &flags)) { -- ret = __send_signal(sig, &info, p, PIDTYPE_TGID, false); -+ ret = __send_signal_locked(sig, &info, p, PIDTYPE_TGID, false); - unlock_task_sighand(p, &flags); - } else - ret = -ESRCH; -@@ -2103,7 +2103,7 @@ bool do_notify_parent(struct task_struct - * parent's namespaces. - */ - if (valid_signal(sig) && sig) -- __send_signal(sig, &info, tsk->parent, PIDTYPE_TGID, false); -+ __send_signal_locked(sig, &info, tsk->parent, PIDTYPE_TGID, false); - __wake_up_parent(tsk, tsk->parent); - spin_unlock_irqrestore(&psig->siglock, flags); - -@@ -2601,7 +2601,7 @@ static int ptrace_signal(int signr, kern - /* If the (new) signal is now blocked, requeue it. */ - if (sigismember(¤t->blocked, signr) || - fatal_signal_pending(current)) { -- send_signal(signr, info, current, type); -+ send_signal_locked(signr, info, current, type); - signr = 0; - } - -@@ -4793,7 +4793,7 @@ void kdb_send_sig(struct task_struct *t, - "the deadlock.\n"); - return; - } -- ret = send_signal(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); -+ ret = send_signal_locked(sig, SEND_SIG_PRIV, t, PIDTYPE_PID); - spin_unlock(&t->sighand->siglock); - if (ret) - kdb_printf("Fail to deliver Signal %d to process %d.\n", diff --git a/patches/0002-printk-cpu-sync-always-disable-interrupts.patch b/patches/0002-printk-cpu-sync-always-disable-interrupts.patch deleted file mode 100644 index f52b200ef603..000000000000 --- a/patches/0002-printk-cpu-sync-always-disable-interrupts.patch +++ /dev/null @@ -1,53 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:24 +0206 -Subject: [PATCH 02/18] printk: cpu sync always disable interrupts - -The CPU sync functions are a NOP for !CONFIG_SMP. But for -!CONFIG_SMP they still need to disable interrupts in order to -preserve context within the CPU sync sections. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Reviewed-by: Sergey Senozhatsky -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-3-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/printk.h | 18 +++++++++--------- - 1 file changed, 9 insertions(+), 9 deletions(-) - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -281,9 +281,16 @@ extern int __printk_cpu_sync_try_get(voi - extern void __printk_cpu_sync_wait(void); - extern void __printk_cpu_sync_put(void); - -+#else -+ -+#define __printk_cpu_sync_try_get() true -+#define __printk_cpu_sync_wait() -+#define __printk_cpu_sync_put() -+#endif /* CONFIG_SMP */ -+ - /** -- * printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning -- * lock and disable interrupts. -+ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk -+ * cpu-reentrant spinning lock. - * @flags: Stack-allocated storage for saving local interrupt state, - * to be passed to printk_cpu_sync_put_irqrestore(). - * -@@ -322,13 +329,6 @@ extern void __printk_cpu_sync_put(void); - local_irq_restore(flags); \ - } while (0) - --#else -- --#define printk_cpu_sync_get_irqsave(flags) ((void)flags) --#define printk_cpu_sync_put_irqrestore(flags) ((void)flags) -- --#endif /* CONFIG_SMP */ -- - extern int kptr_restrict; - - /** diff --git a/patches/0002-scsi-fcoe-Use-per-CPU-API-to-update-per-CPU-statisti.patch b/patches/0002-scsi-fcoe-Use-per-CPU-API-to-update-per-CPU-statisti.patch deleted file mode 100644 index a6e7c6c558cd..000000000000 --- a/patches/0002-scsi-fcoe-Use-per-CPU-API-to-update-per-CPU-statisti.patch +++ /dev/null @@ -1,516 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 6 May 2022 12:57:56 +0200 -Subject: [PATCH 2/4] scsi: fcoe: Use per-CPU API to update per-CPU statistics. - -The per-CPU statistics (struct fc_stats) is updated by getting a stable -per-CPU pointer via get_cpu() + per_cpu_ptr() and then performing the -increment. This can be optimized by using this_cpu_*() which will do -whatever is needed on the architecture to perform the update safe and -efficient. -The read out of the individual value (fc_get_host_stats()) should be -done by using READ_ONCE() instead of a plain-C access. The difference is -that READ_ONCE() will always perform a single access while the plain-C -access can be splitt by the compiler into two loads if it appears -beneficial. -The usage of u64 has the side-effect that it is also 64bit wide on 32bit -architectures and the read is always split into two loads. The can lead -to strange values if the read happens during an update which alters both -32bit parts of the 64bit value. This can be circumvanted by either using -a 32bit variables on 32bit architecures or extending the statistics with -a sequence counter. - -Use this_cpu_*() API to update the statistics and READ_ONCE() to read -it. - -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220506105758.283887-3-bigeasy@linutronix.de ---- - drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 22 ++++++---------------- - drivers/scsi/bnx2fc/bnx2fc_io.c | 13 +++++-------- - drivers/scsi/fcoe/fcoe.c | 36 ++++++++++++------------------------ - drivers/scsi/fcoe/fcoe_ctlr.c | 26 +++++++++----------------- - drivers/scsi/fcoe/fcoe_transport.c | 6 +++--- - drivers/scsi/libfc/fc_fcp.c | 29 ++++++++++------------------- - drivers/scsi/libfc/fc_lport.c | 30 +++++++++++++++--------------- - drivers/scsi/qedf/qedf_main.c | 7 ++----- - 8 files changed, 62 insertions(+), 107 deletions(-) - ---- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c -+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c -@@ -273,7 +273,6 @@ static int bnx2fc_xmit(struct fc_lport * - struct fcoe_port *port; - struct fcoe_hdr *hp; - struct bnx2fc_rport *tgt; -- struct fc_stats *stats; - u8 sof, eof; - u32 crc; - unsigned int hlen, tlen, elen; -@@ -399,10 +398,8 @@ static int bnx2fc_xmit(struct fc_lport * - } - - /*update tx stats */ -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->TxFrames++; -- stats->TxWords += wlen; -- put_cpu(); -+ this_cpu_inc(lport->stats->TxFrames); -+ this_cpu_add(lport->stats->TxWords, wlen); - - /* send down to lld */ - fr_dev(fp) = lport; -@@ -512,7 +509,6 @@ static void bnx2fc_recv_frame(struct sk_ - u32 fr_len, fr_crc; - struct fc_lport *lport; - struct fcoe_rcv_info *fr; -- struct fc_stats *stats; - struct fc_frame_header *fh; - struct fcoe_crc_eof crc_eof; - struct fc_frame *fp; -@@ -543,10 +539,8 @@ static void bnx2fc_recv_frame(struct sk_ - skb_pull(skb, sizeof(struct fcoe_hdr)); - fr_len = skb->len - sizeof(struct fcoe_crc_eof); - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->RxFrames++; -- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; -- put_cpu(); -+ this_cpu_inc(lport->stats->RxFrames); -+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); -@@ -633,9 +627,7 @@ static void bnx2fc_recv_frame(struct sk_ - fr_crc = le32_to_cpu(fr_crc(fp)); - - if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- crc_err = (stats->InvalidCRCCount++); -- put_cpu(); -+ crc_err = this_cpu_inc_return(lport->stats->InvalidCRCCount); - if (crc_err < 5) - printk(KERN_WARNING PFX "dropping frame with " - "CRC error\n"); -@@ -964,9 +956,7 @@ static void bnx2fc_indicate_netevent(voi - mutex_unlock(&lport->lp_mutex); - fc_host_port_type(lport->host) = - FC_PORTTYPE_UNKNOWN; -- per_cpu_ptr(lport->stats, -- get_cpu())->LinkFailureCount++; -- put_cpu(); -+ this_cpu_inc(lport->stats->LinkFailureCount); - fcoe_clean_pending_queue(lport); - wait_for_upload = 1; - } ---- a/drivers/scsi/bnx2fc/bnx2fc_io.c -+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c -@@ -2032,7 +2032,6 @@ int bnx2fc_post_io_req(struct bnx2fc_rpo - struct bnx2fc_interface *interface = port->priv; - struct bnx2fc_hba *hba = interface->hba; - struct fc_lport *lport = port->lport; -- struct fc_stats *stats; - int task_idx, index; - u16 xid; - -@@ -2045,20 +2044,18 @@ int bnx2fc_post_io_req(struct bnx2fc_rpo - io_req->data_xfer_len = scsi_bufflen(sc_cmd); - bnx2fc_priv(sc_cmd)->io_req = io_req; - -- stats = per_cpu_ptr(lport->stats, get_cpu()); - if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { - io_req->io_req_flags = BNX2FC_READ; -- stats->InputRequests++; -- stats->InputBytes += io_req->data_xfer_len; -+ this_cpu_inc(lport->stats->InputRequests); -+ this_cpu_add(lport->stats->InputBytes, io_req->data_xfer_len); - } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { - io_req->io_req_flags = BNX2FC_WRITE; -- stats->OutputRequests++; -- stats->OutputBytes += io_req->data_xfer_len; -+ this_cpu_inc(lport->stats->OutputRequests); -+ this_cpu_add(lport->stats->OutputBytes, io_req->data_xfer_len); - } else { - io_req->io_req_flags = 0; -- stats->ControlRequests++; -+ this_cpu_inc(lport->stats->ControlRequests); - } -- put_cpu(); - - xid = io_req->xid; - ---- a/drivers/scsi/fcoe/fcoe.c -+++ b/drivers/scsi/fcoe/fcoe.c -@@ -1434,8 +1434,7 @@ static int fcoe_rcv(struct sk_buff *skb, - - return NET_RX_SUCCESS; - err: -- per_cpu_ptr(lport->stats, get_cpu())->ErrorFrames++; -- put_cpu(); -+ this_cpu_inc(lport->stats->ErrorFrames); - err2: - kfree_skb(skb); - return NET_RX_DROP; -@@ -1475,7 +1474,6 @@ static int fcoe_xmit(struct fc_lport *lp - struct ethhdr *eh; - struct fcoe_crc_eof *cp; - struct sk_buff *skb; -- struct fc_stats *stats; - struct fc_frame_header *fh; - unsigned int hlen; /* header length implies the version */ - unsigned int tlen; /* trailer length */ -@@ -1586,10 +1584,8 @@ static int fcoe_xmit(struct fc_lport *lp - skb_shinfo(skb)->gso_size = 0; - } - /* update tx stats: regardless if LLD fails */ -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->TxFrames++; -- stats->TxWords += wlen; -- put_cpu(); -+ this_cpu_inc(lport->stats->TxFrames); -+ this_cpu_add(lport->stats->TxWords, wlen); - - /* send down to lld */ - fr_dev(fp) = lport; -@@ -1611,7 +1607,6 @@ static inline int fcoe_filter_frames(str - struct fcoe_interface *fcoe; - struct fc_frame_header *fh; - struct sk_buff *skb = (struct sk_buff *)fp; -- struct fc_stats *stats; - - /* - * We only check CRC if no offload is available and if it is -@@ -1641,11 +1636,8 @@ static inline int fcoe_filter_frames(str - return 0; - } - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->InvalidCRCCount++; -- if (stats->InvalidCRCCount < 5) -+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < 5) - printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); -- put_cpu(); - return -EINVAL; - } - -@@ -1658,7 +1650,6 @@ static void fcoe_recv_frame(struct sk_bu - u32 fr_len; - struct fc_lport *lport; - struct fcoe_rcv_info *fr; -- struct fc_stats *stats; - struct fcoe_crc_eof crc_eof; - struct fc_frame *fp; - struct fcoe_hdr *hp; -@@ -1686,9 +1677,11 @@ static void fcoe_recv_frame(struct sk_bu - */ - hp = (struct fcoe_hdr *) skb_network_header(skb); - -- stats = per_cpu_ptr(lport->stats, get_cpu()); - if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { -- if (stats->ErrorFrames < 5) -+ struct fc_stats *stats; -+ -+ stats = per_cpu_ptr(lport->stats, raw_smp_processor_id()); -+ if (READ_ONCE(stats->ErrorFrames) < 5) - printk(KERN_WARNING "fcoe: FCoE version " - "mismatch: The frame has " - "version %x, but the " -@@ -1701,8 +1694,8 @@ static void fcoe_recv_frame(struct sk_bu - skb_pull(skb, sizeof(struct fcoe_hdr)); - fr_len = skb->len - sizeof(struct fcoe_crc_eof); - -- stats->RxFrames++; -- stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; -+ this_cpu_inc(lport->stats->RxFrames); -+ this_cpu_add(lport->stats->RxWords, fr_len / FCOE_WORD_TO_BYTE); - - fp = (struct fc_frame *)skb; - fc_frame_init(fp); -@@ -1718,13 +1711,11 @@ static void fcoe_recv_frame(struct sk_bu - goto drop; - - if (!fcoe_filter_frames(lport, fp)) { -- put_cpu(); - fc_exch_recv(lport, fp); - return; - } - drop: -- stats->ErrorFrames++; -- put_cpu(); -+ this_cpu_inc(lport->stats->ErrorFrames); - kfree_skb(skb); - } - -@@ -1848,7 +1839,6 @@ static int fcoe_device_notification(stru - struct net_device *netdev = netdev_notifier_info_to_dev(ptr); - struct fcoe_ctlr *ctlr; - struct fcoe_interface *fcoe; -- struct fc_stats *stats; - u32 link_possible = 1; - u32 mfs; - int rc = NOTIFY_OK; -@@ -1922,9 +1912,7 @@ static int fcoe_device_notification(stru - break; - case FCOE_CTLR_ENABLED: - case FCOE_CTLR_UNUSED: -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->LinkFailureCount++; -- put_cpu(); -+ this_cpu_inc(lport->stats->LinkFailureCount); - fcoe_clean_pending_queue(lport); - } - } ---- a/drivers/scsi/fcoe/fcoe_ctlr.c -+++ b/drivers/scsi/fcoe/fcoe_ctlr.c -@@ -824,22 +824,21 @@ static unsigned long fcoe_ctlr_age_fcfs( - unsigned long deadline; - unsigned long sel_time = 0; - struct list_head del_list; -- struct fc_stats *stats; - - INIT_LIST_HEAD(&del_list); - -- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); -- - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; - if (fip->sel_fcf == fcf) { - if (time_after(jiffies, deadline)) { -- stats->MissDiscAdvCount++; -+ u64 miss_cnt; -+ -+ miss_cnt = this_cpu_inc_return(fip->lp->stats->MissDiscAdvCount); - printk(KERN_INFO "libfcoe: host%d: " - "Missing Discovery Advertisement " - "for fab %16.16llx count %lld\n", - fip->lp->host->host_no, fcf->fabric_name, -- stats->MissDiscAdvCount); -+ miss_cnt); - } else if (time_after(next_timer, deadline)) - next_timer = deadline; - } -@@ -855,7 +854,7 @@ static unsigned long fcoe_ctlr_age_fcfs( - */ - list_del(&fcf->list); - list_add(&fcf->list, &del_list); -- stats->VLinkFailureCount++; -+ this_cpu_inc(fip->lp->stats->VLinkFailureCount); - } else { - if (time_after(next_timer, deadline)) - next_timer = deadline; -@@ -864,7 +863,6 @@ static unsigned long fcoe_ctlr_age_fcfs( - sel_time = fcf->time; - } - } -- put_cpu(); - - list_for_each_entry_safe(fcf, next, &del_list, list) { - /* Removes fcf from current list */ -@@ -1142,7 +1140,6 @@ static void fcoe_ctlr_recv_els(struct fc - struct fip_desc *desc; - struct fip_encaps *els; - struct fcoe_fcf *sel; -- struct fc_stats *stats; - enum fip_desc_type els_dtype = 0; - u8 els_op; - u8 sub; -@@ -1286,10 +1283,8 @@ static void fcoe_ctlr_recv_els(struct fc - fr_dev(fp) = lport; - fr_encaps(fp) = els_dtype; - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->RxFrames++; -- stats->RxWords += skb->len / FIP_BPW; -- put_cpu(); -+ this_cpu_inc(lport->stats->RxFrames); -+ this_cpu_add(lport->stats->RxWords, skb->len / FIP_BPW); - - fc_exch_recv(lport, fp); - return; -@@ -1427,9 +1422,7 @@ static void fcoe_ctlr_recv_clr_vlink(str - ntoh24(vp->fd_fc_id)); - if (vn_port && (vn_port == lport)) { - mutex_lock(&fip->ctlr_mutex); -- per_cpu_ptr(lport->stats, -- get_cpu())->VLinkFailureCount++; -- put_cpu(); -+ this_cpu_inc(lport->stats->VLinkFailureCount); - fcoe_ctlr_reset(fip); - mutex_unlock(&fip->ctlr_mutex); - } -@@ -1457,8 +1450,7 @@ static void fcoe_ctlr_recv_clr_vlink(str - * followed by physical port - */ - mutex_lock(&fip->ctlr_mutex); -- per_cpu_ptr(lport->stats, get_cpu())->VLinkFailureCount++; -- put_cpu(); -+ this_cpu_inc(lport->stats->VLinkFailureCount); - fcoe_ctlr_reset(fip); - mutex_unlock(&fip->ctlr_mutex); - ---- a/drivers/scsi/fcoe/fcoe_transport.c -+++ b/drivers/scsi/fcoe/fcoe_transport.c -@@ -183,9 +183,9 @@ void __fcoe_get_lesb(struct fc_lport *lp - memset(lesb, 0, sizeof(*lesb)); - for_each_possible_cpu(cpu) { - stats = per_cpu_ptr(lport->stats, cpu); -- lfc += stats->LinkFailureCount; -- vlfc += stats->VLinkFailureCount; -- mdac += stats->MissDiscAdvCount; -+ lfc += READ_ONCE(stats->LinkFailureCount); -+ vlfc += READ_ONCE(stats->VLinkFailureCount); -+ mdac += READ_ONCE(stats->MissDiscAdvCount); - } - lesb->lesb_link_fail = htonl(lfc); - lesb->lesb_vlink_fail = htonl(vlfc); ---- a/drivers/scsi/libfc/fc_fcp.c -+++ b/drivers/scsi/libfc/fc_fcp.c -@@ -143,8 +143,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_all - INIT_LIST_HEAD(&fsp->list); - spin_lock_init(&fsp->scsi_pkt_lock); - } else { -- per_cpu_ptr(lport->stats, get_cpu())->FcpPktAllocFails++; -- put_cpu(); -+ this_cpu_inc(lport->stats->FcpPktAllocFails); - } - return fsp; - } -@@ -266,8 +265,7 @@ static int fc_fcp_send_abort(struct fc_f - if (!fsp->seq_ptr) - return -EINVAL; - -- per_cpu_ptr(fsp->lp->stats, get_cpu())->FcpPktAborts++; -- put_cpu(); -+ this_cpu_inc(fsp->lp->stats->FcpPktAborts); - - fsp->state |= FC_SRB_ABORT_PENDING; - rc = fc_seq_exch_abort(fsp->seq_ptr, 0); -@@ -436,8 +434,7 @@ static inline struct fc_frame *fc_fcp_fr - if (likely(fp)) - return fp; - -- per_cpu_ptr(lport->stats, get_cpu())->FcpFrameAllocFails++; -- put_cpu(); -+ this_cpu_inc(lport->stats->FcpFrameAllocFails); - /* error case */ - fc_fcp_can_queue_ramp_down(lport); - shost_printk(KERN_ERR, lport->host, -@@ -471,7 +468,6 @@ static void fc_fcp_recv_data(struct fc_f - { - struct scsi_cmnd *sc = fsp->cmd; - struct fc_lport *lport = fsp->lp; -- struct fc_stats *stats; - struct fc_frame_header *fh; - size_t start_offset; - size_t offset; -@@ -533,14 +529,12 @@ static void fc_fcp_recv_data(struct fc_f - - if (~crc != le32_to_cpu(fr_crc(fp))) { - crc_err: -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->ErrorFrames++; -+ this_cpu_inc(lport->stats->ErrorFrames); - /* per cpu count, not total count, but OK for limit */ -- if (stats->InvalidCRCCount++ < FC_MAX_ERROR_CNT) -+ if (this_cpu_inc_return(lport->stats->InvalidCRCCount) < FC_MAX_ERROR_CNT) - printk(KERN_WARNING "libfc: CRC error on data " - "frame for port (%6.6x)\n", - lport->port_id); -- put_cpu(); - /* - * Assume the frame is total garbage. - * We may have copied it over the good part -@@ -1861,7 +1855,6 @@ int fc_queuecommand(struct Scsi_Host *sh - struct fc_fcp_pkt *fsp; - int rval; - int rc = 0; -- struct fc_stats *stats; - - rval = fc_remote_port_chkready(rport); - if (rval) { -@@ -1913,20 +1906,18 @@ int fc_queuecommand(struct Scsi_Host *sh - /* - * setup the data direction - */ -- stats = per_cpu_ptr(lport->stats, get_cpu()); - if (sc_cmd->sc_data_direction == DMA_FROM_DEVICE) { - fsp->req_flags = FC_SRB_READ; -- stats->InputRequests++; -- stats->InputBytes += fsp->data_len; -+ this_cpu_inc(lport->stats->InputRequests); -+ this_cpu_add(lport->stats->InputBytes, fsp->data_len); - } else if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) { - fsp->req_flags = FC_SRB_WRITE; -- stats->OutputRequests++; -- stats->OutputBytes += fsp->data_len; -+ this_cpu_inc(lport->stats->OutputRequests); -+ this_cpu_add(lport->stats->OutputBytes, fsp->data_len); - } else { - fsp->req_flags = 0; -- stats->ControlRequests++; -+ this_cpu_inc(lport->stats->ControlRequests); - } -- put_cpu(); - - /* - * send it to the lower layer ---- a/drivers/scsi/libfc/fc_lport.c -+++ b/drivers/scsi/libfc/fc_lport.c -@@ -308,21 +308,21 @@ struct fc_host_statistics *fc_get_host_s - - stats = per_cpu_ptr(lport->stats, cpu); - -- fc_stats->tx_frames += stats->TxFrames; -- fc_stats->tx_words += stats->TxWords; -- fc_stats->rx_frames += stats->RxFrames; -- fc_stats->rx_words += stats->RxWords; -- fc_stats->error_frames += stats->ErrorFrames; -- fc_stats->invalid_crc_count += stats->InvalidCRCCount; -- fc_stats->fcp_input_requests += stats->InputRequests; -- fc_stats->fcp_output_requests += stats->OutputRequests; -- fc_stats->fcp_control_requests += stats->ControlRequests; -- fcp_in_bytes += stats->InputBytes; -- fcp_out_bytes += stats->OutputBytes; -- fc_stats->fcp_packet_alloc_failures += stats->FcpPktAllocFails; -- fc_stats->fcp_packet_aborts += stats->FcpPktAborts; -- fc_stats->fcp_frame_alloc_failures += stats->FcpFrameAllocFails; -- fc_stats->link_failure_count += stats->LinkFailureCount; -+ fc_stats->tx_frames += READ_ONCE(stats->TxFrames); -+ fc_stats->tx_words += READ_ONCE(stats->TxWords); -+ fc_stats->rx_frames += READ_ONCE(stats->RxFrames); -+ fc_stats->rx_words += READ_ONCE(stats->RxWords); -+ fc_stats->error_frames += READ_ONCE(stats->ErrorFrames); -+ fc_stats->invalid_crc_count += READ_ONCE(stats->InvalidCRCCount); -+ fc_stats->fcp_input_requests += READ_ONCE(stats->InputRequests); -+ fc_stats->fcp_output_requests += READ_ONCE(stats->OutputRequests); -+ fc_stats->fcp_control_requests += READ_ONCE(stats->ControlRequests); -+ fcp_in_bytes += READ_ONCE(stats->InputBytes); -+ fcp_out_bytes += READ_ONCE(stats->OutputBytes); -+ fc_stats->fcp_packet_alloc_failures += READ_ONCE(stats->FcpPktAllocFails); -+ fc_stats->fcp_packet_aborts += READ_ONCE(stats->FcpPktAborts); -+ fc_stats->fcp_frame_alloc_failures += READ_ONCE(stats->FcpFrameAllocFails); -+ fc_stats->link_failure_count += READ_ONCE(stats->LinkFailureCount); - } - fc_stats->fcp_input_megabytes = div_u64(fcp_in_bytes, 1000000); - fc_stats->fcp_output_megabytes = div_u64(fcp_out_bytes, 1000000); ---- a/drivers/scsi/qedf/qedf_main.c -+++ b/drivers/scsi/qedf/qedf_main.c -@@ -1067,7 +1067,6 @@ static int qedf_xmit(struct fc_lport *lp - u32 crc; - unsigned int hlen, tlen, elen; - int wlen; -- struct fc_stats *stats; - struct fc_lport *tmp_lport; - struct fc_lport *vn_port = NULL; - struct qedf_rport *fcport; -@@ -1215,10 +1214,8 @@ static int qedf_xmit(struct fc_lport *lp - hp->fcoe_sof = sof; - - /*update tx stats */ -- stats = per_cpu_ptr(lport->stats, get_cpu()); -- stats->TxFrames++; -- stats->TxWords += wlen; -- put_cpu(); -+ this_cpu_inc(lport->stats->TxFrames); -+ this_cpu_add(lport->stats->TxWords, wlen); - - /* Get VLAN ID from skb for printing purposes */ - __vlan_hwaccel_get_tag(skb, &vlan_tci); diff --git a/patches/0002-signal-Replace-__group_send_sig_info-with-send_signa.patch b/patches/0002-signal-Replace-__group_send_sig_info-with-send_signa.patch deleted file mode 100644 index 797cefba2260..000000000000 --- a/patches/0002-signal-Replace-__group_send_sig_info-with-send_signa.patch +++ /dev/null @@ -1,96 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:35 -0500 -Subject: [PATCH 02/12] signal: Replace __group_send_sig_info with - send_signal_locked - -The function __group_send_sig_info is just a light wrapper around -send_signal_locked with one parameter fixed to a constant value. As -the wrapper adds no real value update the code to directly call the -wrapped function. - -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-2-ebiederm@xmission.com ---- - drivers/tty/tty_jobctrl.c | 4 ++-- - include/linux/signal.h | 1 - - kernel/signal.c | 8 +------- - kernel/time/posix-cpu-timers.c | 6 +++--- - 4 files changed, 6 insertions(+), 13 deletions(-) - ---- a/drivers/tty/tty_jobctrl.c -+++ b/drivers/tty/tty_jobctrl.c -@@ -215,8 +215,8 @@ int tty_signal_session_leader(struct tty - spin_unlock_irq(&p->sighand->siglock); - continue; - } -- __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); -- __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); -+ send_signal_locked(SIGHUP, SEND_SIG_PRIV, p, PIDTYPE_TGID); -+ send_signal_locked(SIGCONT, SEND_SIG_PRIV, p, PIDTYPE_TGID); - put_pid(p->signal->tty_old_pgrp); /* A noop */ - spin_lock(&tty->ctrl.lock); - tty_pgrp = get_pid(tty->ctrl.pgrp); ---- a/include/linux/signal.h -+++ b/include/linux/signal.h -@@ -282,7 +282,6 @@ extern int do_send_sig_info(int sig, str - struct task_struct *p, enum pid_type type); - extern int group_send_sig_info(int sig, struct kernel_siginfo *info, - struct task_struct *p, enum pid_type type); --extern int __group_send_sig_info(int, struct kernel_siginfo *, struct task_struct *); - extern int send_signal_locked(int sig, struct kernel_siginfo *info, - struct task_struct *p, enum pid_type type); - extern int sigprocmask(int, sigset_t *, sigset_t *); ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -1281,12 +1281,6 @@ static int __init setup_print_fatal_sign - - __setup("print-fatal-signals=", setup_print_fatal_signals); - --int --__group_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p) --{ -- return send_signal_locked(sig, info, p, PIDTYPE_TGID); --} -- - int do_send_sig_info(int sig, struct kernel_siginfo *info, struct task_struct *p, - enum pid_type type) - { -@@ -2173,7 +2167,7 @@ static void do_notify_parent_cldstop(str - spin_lock_irqsave(&sighand->siglock, flags); - if (sighand->action[SIGCHLD-1].sa.sa_handler != SIG_IGN && - !(sighand->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDSTOP)) -- __group_send_sig_info(SIGCHLD, &info, parent); -+ send_signal_locked(SIGCHLD, &info, parent, PIDTYPE_TGID); - /* - * Even if SIGCHLD is not generated, we must wake up wait4 calls. - */ ---- a/kernel/time/posix-cpu-timers.c -+++ b/kernel/time/posix-cpu-timers.c -@@ -870,7 +870,7 @@ static inline void check_dl_overrun(stru - { - if (tsk->dl.dl_overrun) { - tsk->dl.dl_overrun = 0; -- __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); -+ send_signal_locked(SIGXCPU, SEND_SIG_PRIV, tsk, PIDTYPE_TGID); - } - } - -@@ -884,7 +884,7 @@ static bool check_rlimit(u64 time, u64 l - rt ? "RT" : "CPU", hard ? "hard" : "soft", - current->comm, task_pid_nr(current)); - } -- __group_send_sig_info(signo, SEND_SIG_PRIV, current); -+ send_signal_locked(signo, SEND_SIG_PRIV, current, PIDTYPE_TGID); - return true; - } - -@@ -958,7 +958,7 @@ static void check_cpu_itimer(struct task - trace_itimer_expire(signo == SIGPROF ? - ITIMER_PROF : ITIMER_VIRTUAL, - task_tgid(tsk), cur_time); -- __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); -+ send_signal_locked(signo, SEND_SIG_PRIV, tsk, PIDTYPE_TGID); - } - - if (it->expires && it->expires < *expires) diff --git a/patches/0002-smp-Rename-flush_smp_call_function_from_idle.patch b/patches/0002-smp-Rename-flush_smp_call_function_from_idle.patch deleted file mode 100644 index de9f037a3f19..000000000000 --- a/patches/0002-smp-Rename-flush_smp_call_function_from_idle.patch +++ /dev/null @@ -1,125 +0,0 @@ -From: Thomas Gleixner -Date: Wed, 13 Apr 2022 15:31:03 +0200 -Subject: [PATCH 2/3] smp: Rename flush_smp_call_function_from_idle() - -This is invoked from the stopper thread too, which is definitely not idle. -Rename it to flush_smp_call_function_queue() and fixup the callers. - -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220413133024.305001096@linutronix.de ---- - kernel/sched/core.c | 2 +- - kernel/sched/idle.c | 2 +- - kernel/sched/smp.h | 4 ++-- - kernel/smp.c | 27 ++++++++++++++++++++------- - 4 files changed, 24 insertions(+), 11 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2411,7 +2411,7 @@ static int migration_cpu_stop(void *data - * __migrate_task() such that we will not miss enforcing cpus_ptr - * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. - */ -- flush_smp_call_function_from_idle(); -+ flush_smp_call_function_queue(); - - raw_spin_lock(&p->pi_lock); - rq_lock(rq, &rf); ---- a/kernel/sched/idle.c -+++ b/kernel/sched/idle.c -@@ -327,7 +327,7 @@ static void do_idle(void) - * RCU relies on this call to be done outside of an RCU read-side - * critical section. - */ -- flush_smp_call_function_from_idle(); -+ flush_smp_call_function_queue(); - schedule_idle(); - - if (unlikely(klp_patch_pending(current))) ---- a/kernel/sched/smp.h -+++ b/kernel/sched/smp.h -@@ -9,7 +9,7 @@ extern void sched_ttwu_pending(void *arg - extern void send_call_function_single_ipi(int cpu); - - #ifdef CONFIG_SMP --extern void flush_smp_call_function_from_idle(void); -+extern void flush_smp_call_function_queue(void); - #else --static inline void flush_smp_call_function_from_idle(void) { } -+static inline void flush_smp_call_function_queue(void) { } - #endif ---- a/kernel/smp.c -+++ b/kernel/smp.c -@@ -96,7 +96,7 @@ static DEFINE_PER_CPU_ALIGNED(struct cal - - static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); - --static void flush_smp_call_function_queue(bool warn_cpu_offline); -+static void __flush_smp_call_function_queue(bool warn_cpu_offline); - - int smpcfd_prepare_cpu(unsigned int cpu) - { -@@ -141,7 +141,7 @@ int smpcfd_dying_cpu(unsigned int cpu) - * ensure that the outgoing CPU doesn't go offline with work - * still pending. - */ -- flush_smp_call_function_queue(false); -+ __flush_smp_call_function_queue(false); - irq_work_run(); - return 0; - } -@@ -541,11 +541,11 @@ void generic_smp_call_function_single_in - { - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->gotipi, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_GOTIPI); -- flush_smp_call_function_queue(true); -+ __flush_smp_call_function_queue(true); - } - - /** -- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks -+ * __flush_smp_call_function_queue - Flush pending smp-call-function callbacks - * - * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an - * offline CPU. Skip this check if set to 'false'. -@@ -558,7 +558,7 @@ void generic_smp_call_function_single_in - * Loop through the call_single_queue and run all the queued callbacks. - * Must be called with interrupts disabled. - */ --static void flush_smp_call_function_queue(bool warn_cpu_offline) -+static void __flush_smp_call_function_queue(bool warn_cpu_offline) - { - call_single_data_t *csd, *csd_next; - struct llist_node *entry, *prev; -@@ -681,7 +681,20 @@ static void flush_smp_call_function_queu - smp_processor_id(), CFD_SEQ_HDLEND); - } - --void flush_smp_call_function_from_idle(void) -+ -+/** -+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks -+ * from task context (idle, migration thread) -+ * -+ * When TIF_POLLING_NRFLAG is supported and a CPU is in idle and has it -+ * set, then remote CPUs can avoid sending IPIs and wake the idle CPU by -+ * setting TIF_NEED_RESCHED. The idle task on the woken up CPU has to -+ * handle queued SMP function calls before scheduling. -+ * -+ * The migration thread has to ensure that an eventually pending wakeup has -+ * been handled before it migrates a task. -+ */ -+void flush_smp_call_function_queue(void) - { - unsigned long flags; - -@@ -691,7 +704,7 @@ void flush_smp_call_function_from_idle(v - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_IDLE); - local_irq_save(flags); -- flush_smp_call_function_queue(true); -+ __flush_smp_call_function_queue(true); - if (local_softirq_pending()) - do_softirq(); - diff --git a/patches/0003-printk-add-missing-memory-barrier-to-wake_up_klogd.patch b/patches/0003-printk-add-missing-memory-barrier-to-wake_up_klogd.patch deleted file mode 100644 index f12d17680c71..000000000000 --- a/patches/0003-printk-add-missing-memory-barrier-to-wake_up_klogd.patch +++ /dev/null @@ -1,89 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:25 +0206 -Subject: [PATCH 03/18] printk: add missing memory barrier to wake_up_klogd() - -It is important that any new records are visible to preparing -waiters before the waker checks if the wait queue is empty. -Otherwise it is possible that: - -- there are new records available -- the waker sees an empty wait queue and does not wake -- the preparing waiter sees no new records and begins to wait - -This is exactly the problem that the function description of -waitqueue_active() warns about. - -Use wq_has_sleeper() instead of waitqueue_active() because it -includes the necessary full memory barrier. - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-4-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 39 ++++++++++++++++++++++++++++++++++++--- - 1 file changed, 36 insertions(+), 3 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -746,8 +746,19 @@ static ssize_t devkmsg_read(struct file - goto out; - } - -+ /* -+ * Guarantee this task is visible on the waitqueue before -+ * checking the wake condition. -+ * -+ * The full memory barrier within set_current_state() of -+ * prepare_to_wait_event() pairs with the full memory barrier -+ * within wq_has_sleeper(). -+ * -+ * This pairs with wake_up_klogd:A. -+ */ - ret = wait_event_interruptible(log_wait, -- prb_read_valid(prb, atomic64_read(&user->seq), r)); -+ prb_read_valid(prb, -+ atomic64_read(&user->seq), r)); /* LMM(devkmsg_read:A) */ - if (ret) - goto out; - } -@@ -1513,7 +1524,18 @@ static int syslog_print(char __user *buf - seq = syslog_seq; - - mutex_unlock(&syslog_lock); -- len = wait_event_interruptible(log_wait, prb_read_valid(prb, seq, NULL)); -+ /* -+ * Guarantee this task is visible on the waitqueue before -+ * checking the wake condition. -+ * -+ * The full memory barrier within set_current_state() of -+ * prepare_to_wait_event() pairs with the full memory barrier -+ * within wq_has_sleeper(). -+ * -+ * This pairs with wake_up_klogd:A. -+ */ -+ len = wait_event_interruptible(log_wait, -+ prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ - mutex_lock(&syslog_lock); - - if (len) -@@ -3316,7 +3338,18 @@ void wake_up_klogd(void) - return; - - preempt_disable(); -- if (waitqueue_active(&log_wait)) { -+ /* -+ * Guarantee any new records can be seen by tasks preparing to wait -+ * before this context checks if the wait queue is empty. -+ * -+ * The full memory barrier within wq_has_sleeper() pairs with the full -+ * memory barrier within set_current_state() of -+ * prepare_to_wait_event(), which is called after ___wait_event() adds -+ * the waiter but before it has checked the wait condition. -+ * -+ * This pairs with devkmsg_read:A and syslog_print:A. -+ */ -+ if (wq_has_sleeper(&log_wait)) { /* LMM(wake_up_klogd:A) */ - this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); - irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); - } diff --git a/patches/0003-ptrace-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch b/patches/0003-ptrace-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch deleted file mode 100644 index 150d99b8ab84..000000000000 --- a/patches/0003-ptrace-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch +++ /dev/null @@ -1,133 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:36 -0500 -Subject: [PATCH 03/12] ptrace/um: Replace PT_DTRACE with TIF_SINGLESTEP - -User mode linux is the last user of the PT_DTRACE flag. Using the flag to indicate -single stepping is a little confusing and worse changing tsk->ptrace without locking -could potentionally cause problems. - -So use a thread info flag with a better name instead of flag in tsk->ptrace. - -Remove the definition PT_DTRACE as uml is the last user. - -Cc: stable@vger.kernel.org -Acked-by: Johannes Berg -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-3-ebiederm@xmission.com ---- - arch/um/include/asm/thread_info.h | 2 ++ - arch/um/kernel/exec.c | 2 +- - arch/um/kernel/process.c | 2 +- - arch/um/kernel/ptrace.c | 8 ++++---- - arch/um/kernel/signal.c | 4 ++-- - include/linux/ptrace.h | 1 - - 6 files changed, 10 insertions(+), 9 deletions(-) - ---- a/arch/um/include/asm/thread_info.h -+++ b/arch/um/include/asm/thread_info.h -@@ -60,6 +60,7 @@ static inline struct thread_info *curren - #define TIF_RESTORE_SIGMASK 7 - #define TIF_NOTIFY_RESUME 8 - #define TIF_SECCOMP 9 /* secure computing */ -+#define TIF_SINGLESTEP 10 /* single stepping userspace */ - - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) -@@ -68,5 +69,6 @@ static inline struct thread_info *curren - #define _TIF_MEMDIE (1 << TIF_MEMDIE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) -+#define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) - - #endif ---- a/arch/um/kernel/exec.c -+++ b/arch/um/kernel/exec.c -@@ -43,7 +43,7 @@ void start_thread(struct pt_regs *regs, - { - PT_REGS_IP(regs) = eip; - PT_REGS_SP(regs) = esp; -- current->ptrace &= ~PT_DTRACE; -+ clear_thread_flag(TIF_SINGLESTEP); - #ifdef SUBARCH_EXECVE1 - SUBARCH_EXECVE1(regs->regs); - #endif ---- a/arch/um/kernel/process.c -+++ b/arch/um/kernel/process.c -@@ -335,7 +335,7 @@ int singlestepping(void * t) - { - struct task_struct *task = t ? t : current; - -- if (!(task->ptrace & PT_DTRACE)) -+ if (!test_thread_flag(TIF_SINGLESTEP)) - return 0; - - if (task->thread.singlestep_syscall) ---- a/arch/um/kernel/ptrace.c -+++ b/arch/um/kernel/ptrace.c -@@ -11,7 +11,7 @@ - - void user_enable_single_step(struct task_struct *child) - { -- child->ptrace |= PT_DTRACE; -+ set_tsk_thread_flag(child, TIF_SINGLESTEP); - child->thread.singlestep_syscall = 0; - - #ifdef SUBARCH_SET_SINGLESTEPPING -@@ -21,7 +21,7 @@ void user_enable_single_step(struct task - - void user_disable_single_step(struct task_struct *child) - { -- child->ptrace &= ~PT_DTRACE; -+ clear_tsk_thread_flag(child, TIF_SINGLESTEP); - child->thread.singlestep_syscall = 0; - - #ifdef SUBARCH_SET_SINGLESTEPPING -@@ -120,7 +120,7 @@ static void send_sigtrap(struct uml_pt_r - } - - /* -- * XXX Check PT_DTRACE vs TIF_SINGLESTEP for singlestepping check and -+ * XXX Check TIF_SINGLESTEP for singlestepping check and - * PT_PTRACED vs TIF_SYSCALL_TRACE for syscall tracing check - */ - int syscall_trace_enter(struct pt_regs *regs) -@@ -144,7 +144,7 @@ void syscall_trace_leave(struct pt_regs - audit_syscall_exit(regs); - - /* Fake a debug trap */ -- if (ptraced & PT_DTRACE) -+ if (test_thread_flag(TIF_SINGLESTEP)) - send_sigtrap(®s->regs, 0); - - if (!test_thread_flag(TIF_SYSCALL_TRACE)) ---- a/arch/um/kernel/signal.c -+++ b/arch/um/kernel/signal.c -@@ -53,7 +53,7 @@ static void handle_signal(struct ksignal - unsigned long sp; - int err; - -- if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED)) -+ if (test_thread_flag(TIF_SINGLESTEP) && (current->ptrace & PT_PTRACED)) - singlestep = 1; - - /* Did we come from a system call? */ -@@ -128,7 +128,7 @@ void do_signal(struct pt_regs *regs) - * on the host. The tracing thread will check this flag and - * PTRACE_SYSCALL if necessary. - */ -- if (current->ptrace & PT_DTRACE) -+ if (test_thread_flag(TIF_SINGLESTEP)) - current->thread.singlestep_syscall = - is_syscall(PT_REGS_IP(¤t->thread.regs)); - ---- a/include/linux/ptrace.h -+++ b/include/linux/ptrace.h -@@ -30,7 +30,6 @@ extern int ptrace_access_vm(struct task_ - - #define PT_SEIZED 0x00010000 /* SEIZE used, enable new behavior */ - #define PT_PTRACED 0x00000001 --#define PT_DTRACE 0x00000002 /* delayed trace (used on m68k, i386) */ - - #define PT_OPT_FLAG_SHIFT 3 - /* PT_TRACE_* event enable flags */ diff --git a/patches/0003-scsi-libfc-Remove-get_cpu-semantics-in-fc_exch_em_al.patch b/patches/0003-scsi-libfc-Remove-get_cpu-semantics-in-fc_exch_em_al.patch deleted file mode 100644 index 6cc4c9121000..000000000000 --- a/patches/0003-scsi-libfc-Remove-get_cpu-semantics-in-fc_exch_em_al.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Davidlohr Bueso -Date: Fri, 6 May 2022 12:57:57 +0200 -Subject: [PATCH 3/4] scsi: libfc: Remove get_cpu() semantics in - fc_exch_em_alloc() - -The get_cpu() in fc_exch_em_alloc() was introduced in: - - f018b73af6db ([SCSI] libfc, libfcoe, fcoe: use smp_processor_id() only when preempt disabled) - -for no other reason than to simply use smp_processor_id() -without getting a warning, because everything is done with -the pool->lock held anyway. However, get_cpu(), by disabling -preemption, does not play well with PREEMPT_RT, particularly -when acquiring a regular (and thus sleepable) spinlock. - -Therefore remove the get_cpu() and just use the unstable value -as we will have CPU locality guarantees next by taking the lock. -The window of migration, as noted by Sebastian, is small and -even if it happens the result is correct. - -Signed-off-by: Davidlohr Bueso -Acked-by: Sebastian Andrzej Siewior -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20211117025956.79616-2-dave@stgolabs.net -Link: https://lore.kernel.org/r/20220506105758.283887-4-bigeasy@linutronix.de ---- - drivers/scsi/libfc/fc_exch.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/scsi/libfc/fc_exch.c -+++ b/drivers/scsi/libfc/fc_exch.c -@@ -825,10 +825,9 @@ static struct fc_exch *fc_exch_em_alloc( - } - memset(ep, 0, sizeof(*ep)); - -- cpu = get_cpu(); -+ cpu = raw_smp_processor_id(); - pool = per_cpu_ptr(mp->pool, cpu); - spin_lock_bh(&pool->lock); -- put_cpu(); - - /* peek cache of free slot */ - if (pool->left != FC_XID_UNKNOWN) { diff --git a/patches/0003-smp-Make-softirq-handling-RT-safe-in-flush_smp_call_.patch b/patches/0003-smp-Make-softirq-handling-RT-safe-in-flush_smp_call_.patch deleted file mode 100644 index f0aaf37a3623..000000000000 --- a/patches/0003-smp-Make-softirq-handling-RT-safe-in-flush_smp_call_.patch +++ /dev/null @@ -1,106 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 13 Apr 2022 15:31:05 +0200 -Subject: [PATCH 3/3] smp: Make softirq handling RT safe in - flush_smp_call_function_queue() - -flush_smp_call_function_queue() invokes do_softirq() which is not available -on PREEMPT_RT. flush_smp_call_function_queue() is invoked from the idle -task and the migration task with preemption or interrupts disabled. - -So RT kernels cannot process soft interrupts in that context as that has to -acquire 'sleeping spinlocks' which is not possible with preemption or -interrupts disabled and forbidden from the idle task anyway. - -The currently known SMP function call which raises a soft interrupt is in -the block layer, but this functionality is not enabled on RT kernels due to -latency and performance reasons. - -RT could wake up ksoftirqd unconditionally, but this wants to be avoided if -there were soft interrupts pending already when this is invoked in the -context of the migration task. The migration task might have preempted a -threaded interrupt handler which raised a soft interrupt, but did not reach -the local_bh_enable() to process it. The "running" ksoftirqd might prevent -the handling in the interrupt thread context which is causing latency -issues. - -Add a new function which handles this case explicitely for RT and falls -back to do_softirq() on !RT kernels. In the RT case this warns when one of -the flushed SMP function calls raised a soft interrupt so this can be -investigated. - -[ tglx: Moved the RT part out of SMP code ] - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/YgKgL6aPj8aBES6G@linutronix.de -Link: https://lore.kernel.org/r/20220413133024.356509586@linutronix.de ---- - include/linux/interrupt.h | 9 +++++++++ - kernel/smp.c | 5 ++++- - kernel/softirq.c | 13 +++++++++++++ - 3 files changed, 26 insertions(+), 1 deletion(-) - ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -607,6 +607,15 @@ struct softirq_action - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); - -+#ifdef CONFIG_PREEMPT_RT -+extern void do_softirq_post_smp_call_flush(unsigned int was_pending); -+#else -+static inline void do_softirq_post_smp_call_flush(unsigned int unused) -+{ -+ do_softirq(); -+} -+#endif -+ - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); - extern void __raise_softirq_irqoff(unsigned int nr); ---- a/kernel/smp.c -+++ b/kernel/smp.c -@@ -696,6 +696,7 @@ static void __flush_smp_call_function_qu - */ - void flush_smp_call_function_queue(void) - { -+ unsigned int was_pending; - unsigned long flags; - - if (llist_empty(this_cpu_ptr(&call_single_queue))) -@@ -704,9 +705,11 @@ void flush_smp_call_function_queue(void) - cfd_seq_store(this_cpu_ptr(&cfd_seq_local)->idle, CFD_SEQ_NOCPU, - smp_processor_id(), CFD_SEQ_IDLE); - local_irq_save(flags); -+ /* Get the already pending soft interrupts for RT enabled kernels */ -+ was_pending = local_softirq_pending(); - __flush_smp_call_function_queue(true); - if (local_softirq_pending()) -- do_softirq(); -+ do_softirq_post_smp_call_flush(was_pending); - - local_irq_restore(flags); - } ---- a/kernel/softirq.c -+++ b/kernel/softirq.c -@@ -294,6 +294,19 @@ static inline void invoke_softirq(void) - wakeup_softirqd(); - } - -+/* -+ * flush_smp_call_function_queue() can raise a soft interrupt in a function -+ * call. On RT kernels this is undesired and the only known functionality -+ * in the block layer which does this is disabled on RT. If soft interrupts -+ * get raised which haven't been raised before the flush, warn so it can be -+ * investigated. -+ */ -+void do_softirq_post_smp_call_flush(unsigned int was_pending) -+{ -+ if (WARN_ON_ONCE(was_pending != local_softirq_pending())) -+ invoke_softirq(); -+} -+ - #else /* CONFIG_PREEMPT_RT */ - - /* diff --git a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch index 682209bf95f6..374f6dfdabb5 100644 --- a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch +++ b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch @@ -36,9 +36,9 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c -@@ -518,7 +518,8 @@ void intel_pipe_update_start(struct inte +@@ -522,7 +522,8 @@ void intel_pipe_update_start(struct inte */ - intel_psr_wait_for_idle(new_crtc_state); + intel_psr_wait_for_idle_locked(new_crtc_state); - local_irq_disable(); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior crtc->debug.min_vbl = min; crtc->debug.max_vbl = max; -@@ -543,11 +544,13 @@ void intel_pipe_update_start(struct inte +@@ -547,11 +548,13 @@ void intel_pipe_update_start(struct inte break; } @@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior } finish_wait(wq, &wait); -@@ -580,7 +583,8 @@ void intel_pipe_update_start(struct inte +@@ -584,7 +587,8 @@ void intel_pipe_update_start(struct inte return; irq_disable: @@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE) -@@ -679,7 +683,8 @@ void intel_pipe_update_end(struct intel_ +@@ -685,7 +689,8 @@ void intel_pipe_update_end(struct intel_ */ intel_vrr_send_push(new_crtc_state); diff --git a/patches/0004-printk-wake-up-all-waiters.patch b/patches/0004-printk-wake-up-all-waiters.patch deleted file mode 100644 index d4e52e5cf4fc..000000000000 --- a/patches/0004-printk-wake-up-all-waiters.patch +++ /dev/null @@ -1,27 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:26 +0206 -Subject: [PATCH 04/18] printk: wake up all waiters - -There can be multiple tasks waiting for new records. They should -all be woken. Use wake_up_interruptible_all() instead of -wake_up_interruptible(). - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-5-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -3326,7 +3326,7 @@ static void wake_up_klogd_work_func(stru - } - - if (pending & PRINTK_PENDING_WAKEUP) -- wake_up_interruptible(&log_wait); -+ wake_up_interruptible_all(&log_wait); - } - - static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = diff --git a/patches/0004-ptrace-xtensa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch b/patches/0004-ptrace-xtensa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch deleted file mode 100644 index 2ca6f66a3805..000000000000 --- a/patches/0004-ptrace-xtensa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch +++ /dev/null @@ -1,77 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:37 -0500 -Subject: [PATCH 04/12] ptrace/xtensa: Replace PT_SINGLESTEP with - TIF_SINGLESTEP - -xtensa is the last user of the PT_SINGLESTEP flag. Changing tsk->ptrace in -user_enable_single_step and user_disable_single_step without locking could -potentiallly cause problems. - -So use a thread info flag instead of a flag in tsk->ptrace. Use TIF_SINGLESTEP -that xtensa already had defined but unused. - -Remove the definitions of PT_SINGLESTEP and PT_BLOCKSTEP as they have no more users. - -Cc: stable@vger.kernel.org -Acked-by: Max Filippov -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-4-ebiederm@xmission.com ---- - arch/xtensa/kernel/ptrace.c | 4 ++-- - arch/xtensa/kernel/signal.c | 4 ++-- - include/linux/ptrace.h | 6 ------ - 3 files changed, 4 insertions(+), 10 deletions(-) - ---- a/arch/xtensa/kernel/ptrace.c -+++ b/arch/xtensa/kernel/ptrace.c -@@ -225,12 +225,12 @@ const struct user_regset_view *task_user - - void user_enable_single_step(struct task_struct *child) - { -- child->ptrace |= PT_SINGLESTEP; -+ set_tsk_thread_flag(child, TIF_SINGLESTEP); - } - - void user_disable_single_step(struct task_struct *child) - { -- child->ptrace &= ~PT_SINGLESTEP; -+ clear_tsk_thread_flag(child, TIF_SINGLESTEP); - } - - /* ---- a/arch/xtensa/kernel/signal.c -+++ b/arch/xtensa/kernel/signal.c -@@ -473,7 +473,7 @@ static void do_signal(struct pt_regs *re - /* Set up the stack frame */ - ret = setup_frame(&ksig, sigmask_to_save(), regs); - signal_setup_done(ret, &ksig, 0); -- if (current->ptrace & PT_SINGLESTEP) -+ if (test_thread_flag(TIF_SINGLESTEP)) - task_pt_regs(current)->icountlevel = 1; - - return; -@@ -499,7 +499,7 @@ static void do_signal(struct pt_regs *re - /* If there's no signal to deliver, we just restore the saved mask. */ - restore_saved_sigmask(); - -- if (current->ptrace & PT_SINGLESTEP) -+ if (test_thread_flag(TIF_SINGLESTEP)) - task_pt_regs(current)->icountlevel = 1; - return; - } ---- a/include/linux/ptrace.h -+++ b/include/linux/ptrace.h -@@ -46,12 +46,6 @@ extern int ptrace_access_vm(struct task_ - #define PT_EXITKILL (PTRACE_O_EXITKILL << PT_OPT_FLAG_SHIFT) - #define PT_SUSPEND_SECCOMP (PTRACE_O_SUSPEND_SECCOMP << PT_OPT_FLAG_SHIFT) - --/* single stepping state bits (used on ARM and PA-RISC) */ --#define PT_SINGLESTEP_BIT 31 --#define PT_SINGLESTEP (1< -Date: Fri, 6 May 2022 12:57:58 +0200 -Subject: [PATCH 4/4] scsi: bnx2fc: Avoid using get_cpu() in - bnx2fc_cmd_alloc(). - -Using get_cpu() leads to disabling preemption and in this context it is -not possible to acquire the following spinlock_t on PREEMPT_RT because -it becomes a sleeping lock. - -Commit - 0ea5c27583e1c ("[SCSI] bnx2fc: common free list for cleanup commands") - -says that it is using get_cpu() as a fix in case the CPU is preempted. -While this might be true, the important part is that it is now using the -same CPU for locking and unlocking while previously it always relied on -smp_processor_id(). -The date structure itself is protected with a lock so it does not rely -on CPU-local access. - -Replace get_cpu() with raw_smp_processor_id() to obtain the current CPU -number which is used as an index for the per-CPU resource. - -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220506105758.283887-5-bigeasy@linutronix.de ---- - drivers/scsi/bnx2fc/bnx2fc_io.c | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/scsi/bnx2fc/bnx2fc_io.c -+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c -@@ -472,7 +472,7 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(stru - u32 free_sqes; - u32 max_sqes; - u16 xid; -- int index = get_cpu(); -+ int index = raw_smp_processor_id(); - - max_sqes = BNX2FC_SCSI_MAX_SQES; - /* -@@ -485,7 +485,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(stru - (tgt->num_active_ios.counter >= max_sqes) || - (free_sqes + max_sqes <= BNX2FC_SQ_WQES_MAX)) { - spin_unlock_bh(&cmd_mgr->free_list_lock[index]); -- put_cpu(); - return NULL; - } - -@@ -498,7 +497,6 @@ struct bnx2fc_cmd *bnx2fc_cmd_alloc(stru - atomic_inc(&tgt->num_active_ios); - atomic_dec(&tgt->free_sqes); - spin_unlock_bh(&cmd_mgr->free_list_lock[index]); -- put_cpu(); - - INIT_LIST_HEAD(&io_req->link); - diff --git a/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch b/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch index f8c949d0d686..71b65c1abc06 100644 --- a/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch +++ b/patches/0005-drm-i915-Don-t-check-for-atomic-context-on-PREEMPT_R.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h -@@ -329,7 +329,7 @@ wait_remaining_ms_from_jiffies(unsigned +@@ -334,7 +334,7 @@ wait_remaining_ms_from_jiffies(unsigned #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) /* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ diff --git a/patches/0005-printk-wake-waiters-for-safe-and-NMI-contexts.patch b/patches/0005-printk-wake-waiters-for-safe-and-NMI-contexts.patch deleted file mode 100644 index 51681069edfe..000000000000 --- a/patches/0005-printk-wake-waiters-for-safe-and-NMI-contexts.patch +++ /dev/null @@ -1,86 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:27 +0206 -Subject: [PATCH 05/18] printk: wake waiters for safe and NMI contexts - -When printk() is called from safe or NMI contexts, it will directly -store the record (vprintk_store()) and then defer the console output. -However, defer_console_output() only causes console printing and does -not wake any waiters of new records. - -Wake waiters from defer_console_output() so that they also are aware -of the new records from safe and NMI contexts. - -Fixes: 03fc7f9c99c1 ("printk/nmi: Prevent deadlock when accessing the main log buffer in NMI") -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-6-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 28 ++++++++++++++++------------ - 1 file changed, 16 insertions(+), 12 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -754,7 +754,7 @@ static ssize_t devkmsg_read(struct file - * prepare_to_wait_event() pairs with the full memory barrier - * within wq_has_sleeper(). - * -- * This pairs with wake_up_klogd:A. -+ * This pairs with __wake_up_klogd:A. - */ - ret = wait_event_interruptible(log_wait, - prb_read_valid(prb, -@@ -1532,7 +1532,7 @@ static int syslog_print(char __user *buf - * prepare_to_wait_event() pairs with the full memory barrier - * within wq_has_sleeper(). - * -- * This pairs with wake_up_klogd:A. -+ * This pairs with __wake_up_klogd:A. - */ - len = wait_event_interruptible(log_wait, - prb_read_valid(prb, seq, NULL)); /* LMM(syslog_print:A) */ -@@ -3332,7 +3332,7 @@ static void wake_up_klogd_work_func(stru - static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = - IRQ_WORK_INIT_LAZY(wake_up_klogd_work_func); - --void wake_up_klogd(void) -+static void __wake_up_klogd(int val) - { - if (!printk_percpu_data_ready()) - return; -@@ -3349,22 +3349,26 @@ void wake_up_klogd(void) - * - * This pairs with devkmsg_read:A and syslog_print:A. - */ -- if (wq_has_sleeper(&log_wait)) { /* LMM(wake_up_klogd:A) */ -- this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); -+ if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ -+ (val & PRINTK_PENDING_OUTPUT)) { -+ this_cpu_or(printk_pending, val); - irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); - } - preempt_enable(); - } - --void defer_console_output(void) -+void wake_up_klogd(void) - { -- if (!printk_percpu_data_ready()) -- return; -+ __wake_up_klogd(PRINTK_PENDING_WAKEUP); -+} - -- preempt_disable(); -- this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT); -- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); -- preempt_enable(); -+void defer_console_output(void) -+{ -+ /* -+ * New messages may have been added directly to the ringbuffer -+ * using vprintk_store(), so wake any waiters as well. -+ */ -+ __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); - } - - void printk_trigger_flush(void) diff --git a/patches/0005-ptrace-Remove-arch_ptrace_attach.patch b/patches/0005-ptrace-Remove-arch_ptrace_attach.patch deleted file mode 100644 index e9e518ef7cdf..000000000000 --- a/patches/0005-ptrace-Remove-arch_ptrace_attach.patch +++ /dev/null @@ -1,157 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:38 -0500 -Subject: [PATCH 05/12] ptrace: Remove arch_ptrace_attach - -The last remaining implementation of arch_ptrace_attach is ia64's -ptrace_attach_sync_user_rbs which was added at the end of 2007 in -commit aa91a2e90044 ("[IA64] Synchronize RBS on PTRACE_ATTACH"). - -Reading the comments and examining the code ptrace_attach_sync_user_rbs -has the sole purpose of saving registers to the stack when ptrace_attach -changes TASK_STOPPED to TASK_TRACED. In all other cases arch_ptrace_stop -takes care of the register saving. - -In commit d79fdd6d96f4 ("ptrace: Clean transitions between TASK_STOPPED and TRACED") -modified ptrace_attach to wake up the thread and enter ptrace_stop normally even -when the thread starts out stopped. - -This makes ptrace_attach_sync_user_rbs completely unnecessary. So just -remove it. - -Cc: linux-ia64@vger.kernel.org -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-5-ebiederm@xmission.com ---- - arch/ia64/include/asm/ptrace.h | 4 -- - arch/ia64/kernel/ptrace.c | 57 ----------------------------------------- - kernel/ptrace.c | 18 ------------ - 3 files changed, 79 deletions(-) - ---- a/arch/ia64/include/asm/ptrace.h -+++ b/arch/ia64/include/asm/ptrace.h -@@ -139,10 +139,6 @@ static inline long regs_return_value(str - #define arch_ptrace_stop_needed() \ - (!test_thread_flag(TIF_RESTORE_RSE)) - -- extern void ptrace_attach_sync_user_rbs (struct task_struct *); -- #define arch_ptrace_attach(child) \ -- ptrace_attach_sync_user_rbs(child) -- - #define arch_has_single_step() (1) - #define arch_has_block_step() (1) - ---- a/arch/ia64/kernel/ptrace.c -+++ b/arch/ia64/kernel/ptrace.c -@@ -618,63 +618,6 @@ void ia64_sync_krbs(void) - } - - /* -- * After PTRACE_ATTACH, a thread's register backing store area in user -- * space is assumed to contain correct data whenever the thread is -- * stopped. arch_ptrace_stop takes care of this on tracing stops. -- * But if the child was already stopped for job control when we attach -- * to it, then it might not ever get into ptrace_stop by the time we -- * want to examine the user memory containing the RBS. -- */ --void --ptrace_attach_sync_user_rbs (struct task_struct *child) --{ -- int stopped = 0; -- struct unw_frame_info info; -- -- /* -- * If the child is in TASK_STOPPED, we need to change that to -- * TASK_TRACED momentarily while we operate on it. This ensures -- * that the child won't be woken up and return to user mode while -- * we are doing the sync. (It can only be woken up for SIGKILL.) -- */ -- -- read_lock(&tasklist_lock); -- if (child->sighand) { -- spin_lock_irq(&child->sighand->siglock); -- if (READ_ONCE(child->__state) == TASK_STOPPED && -- !test_and_set_tsk_thread_flag(child, TIF_RESTORE_RSE)) { -- set_notify_resume(child); -- -- WRITE_ONCE(child->__state, TASK_TRACED); -- stopped = 1; -- } -- spin_unlock_irq(&child->sighand->siglock); -- } -- read_unlock(&tasklist_lock); -- -- if (!stopped) -- return; -- -- unw_init_from_blocked_task(&info, child); -- do_sync_rbs(&info, ia64_sync_user_rbs); -- -- /* -- * Now move the child back into TASK_STOPPED if it should be in a -- * job control stop, so that SIGCONT can be used to wake it up. -- */ -- read_lock(&tasklist_lock); -- if (child->sighand) { -- spin_lock_irq(&child->sighand->siglock); -- if (READ_ONCE(child->__state) == TASK_TRACED && -- (child->signal->flags & SIGNAL_STOP_STOPPED)) { -- WRITE_ONCE(child->__state, TASK_STOPPED); -- } -- spin_unlock_irq(&child->sighand->siglock); -- } -- read_unlock(&tasklist_lock); --} -- --/* - * Write f32-f127 back to task->thread.fph if it has been modified. - */ - inline void ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -1285,10 +1285,6 @@ int ptrace_request(struct task_struct *c - return ret; - } - --#ifndef arch_ptrace_attach --#define arch_ptrace_attach(child) do { } while (0) --#endif -- - SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr, - unsigned long, data) - { -@@ -1297,8 +1293,6 @@ SYSCALL_DEFINE4(ptrace, long, request, l - - if (request == PTRACE_TRACEME) { - ret = ptrace_traceme(); -- if (!ret) -- arch_ptrace_attach(current); - goto out; - } - -@@ -1310,12 +1304,6 @@ SYSCALL_DEFINE4(ptrace, long, request, l - - if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { - ret = ptrace_attach(child, request, addr, data); -- /* -- * Some architectures need to do book-keeping after -- * a ptrace attach. -- */ -- if (!ret) -- arch_ptrace_attach(child); - goto out_put_task_struct; - } - -@@ -1455,12 +1443,6 @@ COMPAT_SYSCALL_DEFINE4(ptrace, compat_lo - - if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) { - ret = ptrace_attach(child, request, addr, data); -- /* -- * Some architectures need to do book-keeping after -- * a ptrace attach. -- */ -- if (!ret) -- arch_ptrace_attach(child); - goto out_put_task_struct; - } - diff --git a/patches/0006-printk-get-caller_id-timestamp-after-migration-disab.patch b/patches/0006-printk-get-caller_id-timestamp-after-migration-disab.patch deleted file mode 100644 index 063b4589c1f1..000000000000 --- a/patches/0006-printk-get-caller_id-timestamp-after-migration-disab.patch +++ /dev/null @@ -1,66 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:28 +0206 -Subject: [PATCH 06/18] printk: get caller_id/timestamp after migration disable - -Currently the local CPU timestamp and caller_id for the record are -collected while migration is enabled. Since this information is -CPU-specific, it should be collected with migration disabled. - -Migration is disabled immediately after collecting this information -anyway, so just move the information collection to after the -migration disabling. - -Signed-off-by: John Ogness -Reviewed-by: Sergey Senozhatsky -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-7-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 10 ++++++---- - 1 file changed, 6 insertions(+), 4 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -2063,7 +2063,7 @@ static inline void printk_delay(void) - static inline u32 printk_caller_id(void) - { - return in_task() ? task_pid_nr(current) : -- 0x80000000 + raw_smp_processor_id(); -+ 0x80000000 + smp_processor_id(); - } - - /** -@@ -2145,7 +2145,6 @@ int vprintk_store(int facility, int leve - const struct dev_printk_info *dev_info, - const char *fmt, va_list args) - { -- const u32 caller_id = printk_caller_id(); - struct prb_reserved_entry e; - enum printk_info_flags flags = 0; - struct printk_record r; -@@ -2155,10 +2154,14 @@ int vprintk_store(int facility, int leve - u8 *recursion_ptr; - u16 reserve_size; - va_list args2; -+ u32 caller_id; - u16 text_len; - int ret = 0; - u64 ts_nsec; - -+ if (!printk_enter_irqsave(recursion_ptr, irqflags)) -+ return 0; -+ - /* - * Since the duration of printk() can vary depending on the message - * and state of the ringbuffer, grab the timestamp now so that it is -@@ -2167,8 +2170,7 @@ int vprintk_store(int facility, int leve - */ - ts_nsec = local_clock(); - -- if (!printk_enter_irqsave(recursion_ptr, irqflags)) -- return 0; -+ caller_id = printk_caller_id(); - - /* - * The sprintf needs to come first since the syslog prefix might be diff --git a/patches/0006-signal-Use-lockdep_assert_held-instead-of-assert_spi.patch b/patches/0006-signal-Use-lockdep_assert_held-instead-of-assert_spi.patch deleted file mode 100644 index 1ffb6a73f3c9..000000000000 --- a/patches/0006-signal-Use-lockdep_assert_held-instead-of-assert_spi.patch +++ /dev/null @@ -1,39 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:39 -0500 -Subject: [PATCH 06/12] signal: Use lockdep_assert_held instead of - assert_spin_locked - -The distinction is that assert_spin_locked() checks if the lock is -held *by*anyone* whereas lockdep_assert_held() asserts the current -context holds the lock. Also, the check goes away if you build -without lockdep. - -Suggested-by: Peter Zijlstra -Link: https://lkml.kernel.org/r/Ympr/+PX4XgT/UKU@hirez.programming.kicks-ass.net -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-6-ebiederm@xmission.com ---- - kernel/signal.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -884,7 +884,7 @@ static int check_kill_permission(int sig - static void ptrace_trap_notify(struct task_struct *t) - { - WARN_ON_ONCE(!(t->ptrace & PT_SEIZED)); -- assert_spin_locked(&t->sighand->siglock); -+ lockdep_assert_held(&t->sighand->siglock); - - task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); - ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); -@@ -1079,7 +1079,7 @@ static int __send_signal_locked(int sig, - int override_rlimit; - int ret = 0, result; - -- assert_spin_locked(&t->sighand->siglock); -+ lockdep_assert_held(&t->sighand->siglock); - - result = TRACE_SIGNAL_IGNORED; - if (!prepare_signal(sig, t, force)) diff --git a/patches/0007-printk-call-boot_delay_msec-in-printk_delay.patch b/patches/0007-printk-call-boot_delay_msec-in-printk_delay.patch deleted file mode 100644 index ce6ce26e4a24..000000000000 --- a/patches/0007-printk-call-boot_delay_msec-in-printk_delay.patch +++ /dev/null @@ -1,41 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:29 +0206 -Subject: [PATCH 07/18] printk: call boot_delay_msec() in printk_delay() - -boot_delay_msec() is always called immediately before printk_delay() -so just call it from within printk_delay(). - -Signed-off-by: John Ogness -Reviewed-by: Sergey Senozhatsky -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-8-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 7 ++++--- - 1 file changed, 4 insertions(+), 3 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -2048,8 +2048,10 @@ static u8 *__printk_recursion_counter(vo - - int printk_delay_msec __read_mostly; - --static inline void printk_delay(void) -+static inline void printk_delay(int level) - { -+ boot_delay_msec(level); -+ - if (unlikely(printk_delay_msec)) { - int m = printk_delay_msec; - -@@ -2274,8 +2276,7 @@ asmlinkage int vprintk_emit(int facility - in_sched = true; - } - -- boot_delay_msec(level); -- printk_delay(); -+ printk_delay(level); - - printed_len = vprintk_store(facility, level, dev_info, fmt, args); - diff --git a/patches/0007-ptrace-Reimplement-PTRACE_KILL-by-always-sending-SIG.patch b/patches/0007-ptrace-Reimplement-PTRACE_KILL-by-always-sending-SIG.patch deleted file mode 100644 index 6525db41415f..000000000000 --- a/patches/0007-ptrace-Reimplement-PTRACE_KILL-by-always-sending-SIG.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:40 -0500 -Subject: [PATCH 07/12] ptrace: Reimplement PTRACE_KILL by always sending - SIGKILL - -The current implementation of PTRACE_KILL is buggy and has been for -many years as it assumes it's target has stopped in ptrace_stop. At a -quick skim it looks like this assumption has existed since ptrace -support was added in linux v1.0. - -While PTRACE_KILL has been deprecated we can not remove it as -a quick search with google code search reveals many existing -programs calling it. - -When the ptracee is not stopped at ptrace_stop some fields would be -set that are ignored except in ptrace_stop. Making the userspace -visible behavior of PTRACE_KILL a noop in those case. - -As the usual rules are not obeyed it is not clear what the -consequences are of calling PTRACE_KILL on a running process. -Presumably userspace does not do this as it achieves nothing. - -Replace the implementation of PTRACE_KILL with a simple -send_sig_info(SIGKILL) followed by a return 0. This changes the -observable user space behavior only in that PTRACE_KILL on a process -not stopped in ptrace_stop will also kill it. As that has always -been the intent of the code this seems like a reasonable change. - -Cc: stable@vger.kernel.org -Reported-by: Al Viro -Suggested-by: Al Viro -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-7-ebiederm@xmission.com ---- - arch/x86/kernel/step.c | 3 +-- - kernel/ptrace.c | 5 ++--- - 2 files changed, 3 insertions(+), 5 deletions(-) - ---- a/arch/x86/kernel/step.c -+++ b/arch/x86/kernel/step.c -@@ -180,8 +180,7 @@ void set_task_blockstep(struct task_stru - * - * NOTE: this means that set/clear TIF_BLOCKSTEP is only safe if - * task is current or it can't be running, otherwise we can race -- * with __switch_to_xtra(). We rely on ptrace_freeze_traced() but -- * PTRACE_KILL is not safe. -+ * with __switch_to_xtra(). We rely on ptrace_freeze_traced(). - */ - local_irq_disable(); - debugctl = get_debugctlmsr(); ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -1236,9 +1236,8 @@ int ptrace_request(struct task_struct *c - return ptrace_resume(child, request, data); - - case PTRACE_KILL: -- if (child->exit_state) /* already dead */ -- return 0; -- return ptrace_resume(child, request, SIGKILL); -+ send_sig_info(SIGKILL, SEND_SIG_NOINFO, child); -+ return 0; - - #ifdef CONFIG_HAVE_ARCH_TRACEHOOK - case PTRACE_GETREGSET: diff --git a/patches/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch b/patches/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch index 89d12313d287..b09a46817ea8 100644 --- a/patches/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch +++ b/patches/0008-drm-i915-gt-Queue-and-wait-for-the-irq_work-item.patch @@ -26,7 +26,7 @@ Reviewed-by: Maarten Lankhorst --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c -@@ -311,10 +311,9 @@ void __intel_breadcrumbs_park(struct int +@@ -312,10 +312,9 @@ void __intel_breadcrumbs_park(struct int /* Kick the work once more to drain the signalers, and disarm the irq */ irq_work_sync(&b->irq_work); while (READ_ONCE(b->irq_armed) && !atomic_read(&b->active)) { diff --git a/patches/0008-printk-add-con_printk-macro-for-console-details.patch b/patches/0008-printk-add-con_printk-macro-for-console-details.patch deleted file mode 100644 index 5a854e15ff73..000000000000 --- a/patches/0008-printk-add-con_printk-macro-for-console-details.patch +++ /dev/null @@ -1,55 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:30 +0206 -Subject: [PATCH 08/18] printk: add con_printk() macro for console details - -It is useful to generate log messages that include details about -the related console. Rather than duplicate the code to assemble -the details, put that code into a macro con_printk(). - -Once console printers become threaded, this macro will find more -users. - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-9-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 13 +++++++------ - 1 file changed, 7 insertions(+), 6 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -3015,6 +3015,11 @@ static void try_enable_default_console(s - newcon->flags |= CON_CONSDEV; - } - -+#define con_printk(lvl, con, fmt, ...) \ -+ printk(lvl pr_fmt("%sconsole [%s%d] " fmt), \ -+ (con->flags & CON_BOOT) ? "boot" : "", \ -+ con->name, con->index, ##__VA_ARGS__) -+ - /* - * The console driver calls this routine during kernel initialization - * to register the console printing procedure with printk() and to -@@ -3153,9 +3158,7 @@ void register_console(struct console *ne - * users know there might be something in the kernel's log buffer that - * went to the bootconsole (that they do not see on the real console) - */ -- pr_info("%sconsole [%s%d] enabled\n", -- (newcon->flags & CON_BOOT) ? "boot" : "" , -- newcon->name, newcon->index); -+ con_printk(KERN_INFO, newcon, "enabled\n"); - if (bootcon_enabled && - ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV) && - !keep_bootcon) { -@@ -3174,9 +3177,7 @@ int unregister_console(struct console *c - struct console *con; - int res; - -- pr_info("%sconsole [%s%d] disabled\n", -- (console->flags & CON_BOOT) ? "boot" : "" , -- console->name, console->index); -+ con_printk(KERN_INFO, console, "disabled\n"); - - res = _braille_unregister_console(console); - if (res < 0) diff --git a/patches/0008-ptrace-Document-that-wait_task_inactive-can-t-fail.patch b/patches/0008-ptrace-Document-that-wait_task_inactive-can-t-fail.patch deleted file mode 100644 index 694933e0faac..000000000000 --- a/patches/0008-ptrace-Document-that-wait_task_inactive-can-t-fail.patch +++ /dev/null @@ -1,65 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:41 -0500 -Subject: [PATCH 08/12] ptrace: Document that wait_task_inactive can't fail - -After ptrace_freeze_traced succeeds it is known that the the tracee -has a __state value of __TASK_TRACED and that no __ptrace_unlink will -happen because the tracer is waiting for the tracee, and the tracee is -in ptrace_stop. - -The function ptrace_freeze_traced can succeed at any point after -ptrace_stop has set TASK_TRACED and dropped siglock. The read_lock on -tasklist_lock only excludes ptrace_attach. - -This means that the !current->ptrace which executes under a read_lock -of tasklist_lock will never see a ptrace_freeze_trace as the tracer -must have gone away before the tasklist_lock was taken and -ptrace_attach can not occur until the read_lock is dropped. As -ptrace_freeze_traced depends upon ptrace_attach running before it can -run that excludes ptrace_freeze_traced until __state is set to -TASK_RUNNING. This means that task_is_traced will fail in -ptrace_freeze_attach and ptrace_freeze_attached will fail. - -On the current->ptrace branch of ptrace_stop which will be reached any -time after ptrace_freeze_traced has succeed it is known that __state -is __TASK_TRACED and schedule() will be called with that state. - -Use a WARN_ON_ONCE to document that wait_task_inactive(TASK_TRACED) -should never fail. Remove the stale comment about may_ptrace_stop. - -Strictly speaking this is not true because if PREEMPT_RT is enabled -wait_task_inactive can fail because __state can be changed. I don't -see this as a problem as the ptrace code is currently broken on -PREMPT_RT, and this is one of the issues. Failing and warning when -the assumptions of the code are broken is good. - -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-8-ebiederm@xmission.com ---- - kernel/ptrace.c | 14 +++----------- - 1 file changed, 3 insertions(+), 11 deletions(-) - ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -266,17 +266,9 @@ static int ptrace_check_attach(struct ta - } - read_unlock(&tasklist_lock); - -- if (!ret && !ignore_state) { -- if (!wait_task_inactive(child, __TASK_TRACED)) { -- /* -- * This can only happen if may_ptrace_stop() fails and -- * ptrace_stop() changes ->state back to TASK_RUNNING, -- * so we should not worry about leaking __TASK_TRACED. -- */ -- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED); -- ret = -ESRCH; -- } -- } -+ if (!ret && !ignore_state && -+ WARN_ON_ONCE(!wait_task_inactive(child, __TASK_TRACED))) -+ ret = -ESRCH; - - return ret; - } diff --git a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch index d1db49f55add..bb4521f52995 100644 --- a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch +++ b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch @@ -27,7 +27,7 @@ Reviewed-by: Maarten Lankhorst --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c -@@ -1290,7 +1290,7 @@ static void execlists_dequeue(struct int +@@ -1289,7 +1289,7 @@ static void execlists_dequeue(struct int * and context switches) submission. */ @@ -36,7 +36,7 @@ Reviewed-by: Maarten Lankhorst /* * If the queue is higher priority than the last -@@ -1390,7 +1390,7 @@ static void execlists_dequeue(struct int +@@ -1389,7 +1389,7 @@ static void execlists_dequeue(struct int * Even if ELSP[1] is occupied and not worthy * of timeslices, our queue might be. */ @@ -45,7 +45,7 @@ Reviewed-by: Maarten Lankhorst return; } } -@@ -1416,7 +1416,7 @@ static void execlists_dequeue(struct int +@@ -1415,7 +1415,7 @@ static void execlists_dequeue(struct int if (last && !can_merge_rq(last, rq)) { spin_unlock(&ve->base.sched_engine->lock); @@ -54,7 +54,7 @@ Reviewed-by: Maarten Lankhorst return; /* leave this for another sibling */ } -@@ -1578,7 +1578,7 @@ static void execlists_dequeue(struct int +@@ -1577,7 +1577,7 @@ static void execlists_dequeue(struct int */ sched_engine->queue_priority_hint = queue_prio(sched_engine); i915_sched_engine_reset_on_empty(sched_engine); @@ -63,7 +63,7 @@ Reviewed-by: Maarten Lankhorst /* * We can skip poking the HW if we ended up with exactly the same set -@@ -1604,13 +1604,6 @@ static void execlists_dequeue(struct int +@@ -1603,13 +1603,6 @@ static void execlists_dequeue(struct int } } @@ -77,7 +77,7 @@ Reviewed-by: Maarten Lankhorst static void clear_ports(struct i915_request **ports, int count) { memset_p((void **)ports, NULL, count); -@@ -2431,7 +2424,7 @@ static void execlists_submission_tasklet +@@ -2439,7 +2432,7 @@ static void execlists_submission_tasklet } if (!engine->execlists.pending[0]) { diff --git a/patches/0009-printk-refactor-and-rework-printing-logic.patch b/patches/0009-printk-refactor-and-rework-printing-logic.patch deleted file mode 100644 index 42c448f373a9..000000000000 --- a/patches/0009-printk-refactor-and-rework-printing-logic.patch +++ /dev/null @@ -1,617 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:31 +0206 -Subject: [PATCH 09/18] printk: refactor and rework printing logic - -Refactor/rework printing logic in order to prepare for moving to -threaded console printing. - -- Move @console_seq into struct console so that the current - "position" of each console can be tracked individually. - -- Move @console_dropped into struct console so that the current drop - count of each console can be tracked individually. - -- Modify printing logic so that each console independently loads, - prepares, and prints its next record. - -- Remove exclusive_console logic. Since console positions are - handled independently, replaying past records occurs naturally. - -- Update the comments explaining why preemption is disabled while - printing from printk() context. - -With these changes, there is a change in behavior: the console -replaying the log (formerly exclusive console) will no longer block -other consoles. New messages appear on the other consoles while the -newly added console is still replaying. - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-10-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/console.h | 2 - kernel/printk/printk.c | 441 ++++++++++++++++++++++++------------------------ - 2 files changed, 230 insertions(+), 213 deletions(-) - ---- a/include/linux/console.h -+++ b/include/linux/console.h -@@ -151,6 +151,8 @@ struct console { - int cflag; - uint ispeed; - uint ospeed; -+ u64 seq; -+ unsigned long dropped; - void *data; - struct console *next; - }; ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -281,11 +281,6 @@ static bool panic_in_progress(void) - static int console_locked, console_suspended; - - /* -- * If exclusive_console is non-NULL then only this console is to be printed to. -- */ --static struct console *exclusive_console; -- --/* - * Array of consoles built from command line options (console=) - */ - -@@ -374,12 +369,6 @@ static u64 syslog_seq; - static size_t syslog_partial; - static bool syslog_time; - --/* All 3 protected by @console_sem. */ --/* the next printk record to write to the console */ --static u64 console_seq; --static u64 exclusive_console_stop_seq; --static unsigned long console_dropped; -- - struct latched_seq { - seqcount_latch_t latch; - u64 val[2]; -@@ -1933,47 +1922,26 @@ static int console_trylock_spinning(void - } - - /* -- * Call the console drivers, asking them to write out -- * log_buf[start] to log_buf[end - 1]. -- * The console_lock must be held. -+ * Call the specified console driver, asking it to write out the specified -+ * text and length. For non-extended consoles, if any records have been -+ * dropped, a dropped message will be written out first. - */ --static void call_console_drivers(const char *ext_text, size_t ext_len, -- const char *text, size_t len) -+static void call_console_driver(struct console *con, const char *text, size_t len) - { - static char dropped_text[64]; -- size_t dropped_len = 0; -- struct console *con; -+ size_t dropped_len; - - trace_console_rcuidle(text, len); - -- if (!console_drivers) -- return; -- -- if (console_dropped) { -+ if (con->dropped && !(con->flags & CON_EXTENDED)) { - dropped_len = snprintf(dropped_text, sizeof(dropped_text), - "** %lu printk messages dropped **\n", -- console_dropped); -- console_dropped = 0; -+ con->dropped); -+ con->dropped = 0; -+ con->write(con, dropped_text, dropped_len); - } - -- for_each_console(con) { -- if (exclusive_console && con != exclusive_console) -- continue; -- if (!(con->flags & CON_ENABLED)) -- continue; -- if (!con->write) -- continue; -- if (!cpu_online(smp_processor_id()) && -- !(con->flags & CON_ANYTIME)) -- continue; -- if (con->flags & CON_EXTENDED) -- con->write(con, ext_text, ext_len); -- else { -- if (dropped_len) -- con->write(con, dropped_text, dropped_len); -- con->write(con, text, len); -- } -- } -+ con->write(con, text, len); - } - - /* -@@ -2283,15 +2251,18 @@ asmlinkage int vprintk_emit(int facility - /* If called from the scheduler, we can not call up(). */ - if (!in_sched) { - /* -- * Disable preemption to avoid being preempted while holding -- * console_sem which would prevent anyone from printing to -- * console -+ * The caller may be holding system-critical or -+ * timing-sensitive locks. Disable preemption during -+ * printing of all remaining records to all consoles so that -+ * this context can return as soon as possible. Hopefully -+ * another printk() caller will take over the printing. - */ - preempt_disable(); - /* - * Try to acquire and then immediately release the console -- * semaphore. The release will print out buffers and wake up -- * /dev/kmsg and syslog() users. -+ * semaphore. The release will print out buffers. With the -+ * spinning variant, this context tries to take over the -+ * printing from another printing context. - */ - if (console_trylock_spinning()) - console_unlock(); -@@ -2329,11 +2300,9 @@ EXPORT_SYMBOL(_printk); - - #define prb_read_valid(rb, seq, r) false - #define prb_first_valid_seq(rb) 0 -+#define prb_next_seq(rb) 0 - - static u64 syslog_seq; --static u64 console_seq; --static u64 exclusive_console_stop_seq; --static unsigned long console_dropped; - - static size_t record_print_text(const struct printk_record *r, - bool syslog, bool time) -@@ -2350,8 +2319,7 @@ static ssize_t msg_print_ext_body(char * - struct dev_printk_info *dev_info) { return 0; } - static void console_lock_spinning_enable(void) { } - static int console_lock_spinning_disable_and_check(void) { return 0; } --static void call_console_drivers(const char *ext_text, size_t ext_len, -- const char *text, size_t len) {} -+static void call_console_driver(struct console *con, const char *text, size_t len) { } - static bool suppress_message_printing(int level) { return false; } - - #endif /* CONFIG_PRINTK */ -@@ -2622,22 +2590,6 @@ int is_console_locked(void) - EXPORT_SYMBOL(is_console_locked); - - /* -- * Check if we have any console that is capable of printing while cpu is -- * booting or shutting down. Requires console_sem. -- */ --static int have_callable_console(void) --{ -- struct console *con; -- -- for_each_console(con) -- if ((con->flags & CON_ENABLED) && -- (con->flags & CON_ANYTIME)) -- return 1; -- -- return 0; --} -- --/* - * Return true when this CPU should unlock console_sem without pushing all - * messages to the console. This reduces the chance that the console is - * locked when the panic CPU tries to use it. -@@ -2657,15 +2609,182 @@ static bool abandon_console_lock_in_pani - } - - /* -- * Can we actually use the console at this time on this cpu? -+ * Check if the given console is currently capable and allowed to print -+ * records. -+ * -+ * Requires the console_lock. -+ */ -+static inline bool console_is_usable(struct console *con) -+{ -+ if (!(con->flags & CON_ENABLED)) -+ return false; -+ -+ if (!con->write) -+ return false; -+ -+ /* -+ * Console drivers may assume that per-cpu resources have been -+ * allocated. So unless they're explicitly marked as being able to -+ * cope (CON_ANYTIME) don't call them until this CPU is officially up. -+ */ -+ if (!cpu_online(raw_smp_processor_id()) && -+ !(con->flags & CON_ANYTIME)) -+ return false; -+ -+ return true; -+} -+ -+static void __console_unlock(void) -+{ -+ console_locked = 0; -+ up_console_sem(); -+} -+ -+/* -+ * Print one record for the given console. The record printed is whatever -+ * record is the next available record for the given console. -+ * -+ * @handover will be set to true if a printk waiter has taken over the -+ * console_lock, in which case the caller is no longer holding the -+ * console_lock. Otherwise it is set to false. -+ * -+ * Returns false if the given console has no next record to print, otherwise -+ * true. -+ * -+ * Requires the console_lock. -+ */ -+static bool console_emit_next_record(struct console *con, bool *handover) -+{ -+ static char ext_text[CONSOLE_EXT_LOG_MAX]; -+ static char text[CONSOLE_LOG_MAX]; -+ static int panic_console_dropped; -+ struct printk_info info; -+ struct printk_record r; -+ unsigned long flags; -+ char *write_text; -+ size_t len; -+ -+ prb_rec_init_rd(&r, &info, text, sizeof(text)); -+ -+ *handover = false; -+ -+ if (!prb_read_valid(prb, con->seq, &r)) -+ return false; -+ -+ if (con->seq != r.info->seq) { -+ con->dropped += r.info->seq - con->seq; -+ con->seq = r.info->seq; -+ if (panic_in_progress() && panic_console_dropped++ > 10) { -+ suppress_panic_printk = 1; -+ pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); -+ } -+ } -+ -+ /* Skip record that has level above the console loglevel. */ -+ if (suppress_message_printing(r.info->level)) { -+ con->seq++; -+ goto skip; -+ } -+ -+ if (con->flags & CON_EXTENDED) { -+ write_text = &ext_text[0]; -+ len = info_print_ext_header(ext_text, sizeof(ext_text), r.info); -+ len += msg_print_ext_body(ext_text + len, sizeof(ext_text) - len, -+ &r.text_buf[0], r.info->text_len, &r.info->dev_info); -+ } else { -+ write_text = &text[0]; -+ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); -+ } -+ -+ /* -+ * While actively printing out messages, if another printk() -+ * were to occur on another CPU, it may wait for this one to -+ * finish. This task can not be preempted if there is a -+ * waiter waiting to take over. -+ * -+ * Interrupts are disabled because the hand over to a waiter -+ * must not be interrupted until the hand over is completed -+ * (@console_waiter is cleared). -+ */ -+ printk_safe_enter_irqsave(flags); -+ console_lock_spinning_enable(); -+ -+ stop_critical_timings(); /* don't trace print latency */ -+ call_console_driver(con, write_text, len); -+ start_critical_timings(); -+ -+ con->seq++; -+ -+ *handover = console_lock_spinning_disable_and_check(); -+ printk_safe_exit_irqrestore(flags); -+skip: -+ return true; -+} -+ -+/* -+ * Print out all remaining records to all consoles. -+ * -+ * @do_cond_resched is set by the caller. It can be true only in schedulable -+ * context. -+ * -+ * @next_seq is set to the sequence number after the last available record. -+ * The value is valid only when this function returns true. It means that all -+ * usable consoles are completely flushed. - * -- * Console drivers may assume that per-cpu resources have been allocated. So -- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't -- * call them until this CPU is officially up. -+ * @handover will be set to true if a printk waiter has taken over the -+ * console_lock, in which case the caller is no longer holding the -+ * console_lock. Otherwise it is set to false. -+ * -+ * Returns true when there was at least one usable console and all messages -+ * were flushed to all usable consoles. A returned false informs the caller -+ * that everything was not flushed (either there were no usable consoles or -+ * another context has taken over printing or it is a panic situation and this -+ * is not the panic CPU). Regardless the reason, the caller should assume it -+ * is not useful to immediately try again. -+ * -+ * Requires the console_lock. - */ --static inline int can_use_console(void) -+static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) - { -- return cpu_online(raw_smp_processor_id()) || have_callable_console(); -+ bool any_usable = false; -+ struct console *con; -+ bool any_progress; -+ -+ *next_seq = 0; -+ *handover = false; -+ -+ do { -+ any_progress = false; -+ -+ for_each_console(con) { -+ bool progress; -+ -+ if (!console_is_usable(con)) -+ continue; -+ any_usable = true; -+ -+ progress = console_emit_next_record(con, handover); -+ if (*handover) -+ return false; -+ -+ /* Track the next of the highest seq flushed. */ -+ if (con->seq > *next_seq) -+ *next_seq = con->seq; -+ -+ if (!progress) -+ continue; -+ any_progress = true; -+ -+ /* Allow panic_cpu to take over the consoles safely. */ -+ if (abandon_console_lock_in_panic()) -+ return false; -+ -+ if (do_cond_resched) -+ cond_resched(); -+ } -+ } while (any_progress); -+ -+ return any_usable; - } - - /** -@@ -2678,28 +2797,20 @@ static inline int can_use_console(void) - * by printk(). If this is the case, console_unlock(); emits - * the output prior to releasing the lock. - * -- * If there is output waiting, we wake /dev/kmsg and syslog() users. -- * - * console_unlock(); may be called from any context. - */ - void console_unlock(void) - { -- static char ext_text[CONSOLE_EXT_LOG_MAX]; -- static char text[CONSOLE_LOG_MAX]; -- static int panic_console_dropped; -- unsigned long flags; -- bool do_cond_resched, retry; -- struct printk_info info; -- struct printk_record r; -- u64 __maybe_unused next_seq; -+ bool do_cond_resched; -+ bool handover; -+ bool flushed; -+ u64 next_seq; - - if (console_suspended) { - up_console_sem(); - return; - } - -- prb_rec_init_rd(&r, &info, text, sizeof(text)); -- - /* - * Console drivers are called with interrupts disabled, so - * @console_may_schedule should be cleared before; however, we may -@@ -2708,125 +2819,34 @@ void console_unlock(void) - * between lines if allowable. Not doing so can cause a very long - * scheduling stall on a slow console leading to RCU stall and - * softlockup warnings which exacerbate the issue with more -- * messages practically incapacitating the system. -- * -- * console_trylock() is not able to detect the preemptive -- * context reliably. Therefore the value must be stored before -- * and cleared after the "again" goto label. -+ * messages practically incapacitating the system. Therefore, create -+ * a local to use for the printing loop. - */ - do_cond_resched = console_may_schedule; --again: -- console_may_schedule = 0; - -- /* -- * We released the console_sem lock, so we need to recheck if -- * cpu is online and (if not) is there at least one CON_ANYTIME -- * console. -- */ -- if (!can_use_console()) { -- console_locked = 0; -- up_console_sem(); -- return; -- } -- -- for (;;) { -- size_t ext_len = 0; -- int handover; -- size_t len; -- --skip: -- if (!prb_read_valid(prb, console_seq, &r)) -- break; -- -- if (console_seq != r.info->seq) { -- console_dropped += r.info->seq - console_seq; -- console_seq = r.info->seq; -- if (panic_in_progress() && panic_console_dropped++ > 10) { -- suppress_panic_printk = 1; -- pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); -- } -- } -- -- if (suppress_message_printing(r.info->level)) { -- /* -- * Skip record we have buffered and already printed -- * directly to the console when we received it, and -- * record that has level above the console loglevel. -- */ -- console_seq++; -- goto skip; -- } -+ do { -+ console_may_schedule = 0; - -- /* Output to all consoles once old messages replayed. */ -- if (unlikely(exclusive_console && -- console_seq >= exclusive_console_stop_seq)) { -- exclusive_console = NULL; -- } -+ flushed = console_flush_all(do_cond_resched, &next_seq, &handover); -+ if (!handover) -+ __console_unlock(); - - /* -- * Handle extended console text first because later -- * record_print_text() will modify the record buffer in-place. -+ * Abort if there was a failure to flush all messages to all -+ * usable consoles. Either it is not possible to flush (in -+ * which case it would be an infinite loop of retrying) or -+ * another context has taken over printing. - */ -- if (nr_ext_console_drivers) { -- ext_len = info_print_ext_header(ext_text, -- sizeof(ext_text), -- r.info); -- ext_len += msg_print_ext_body(ext_text + ext_len, -- sizeof(ext_text) - ext_len, -- &r.text_buf[0], -- r.info->text_len, -- &r.info->dev_info); -- } -- len = record_print_text(&r, -- console_msg_format & MSG_FORMAT_SYSLOG, -- printk_time); -- console_seq++; -+ if (!flushed) -+ break; - - /* -- * While actively printing out messages, if another printk() -- * were to occur on another CPU, it may wait for this one to -- * finish. This task can not be preempted if there is a -- * waiter waiting to take over. -- * -- * Interrupts are disabled because the hand over to a waiter -- * must not be interrupted until the hand over is completed -- * (@console_waiter is cleared). -+ * Some context may have added new records after -+ * console_flush_all() but before unlocking the console. -+ * Re-check if there is a new record to flush. If the trylock -+ * fails, another context is already handling the printing. - */ -- printk_safe_enter_irqsave(flags); -- console_lock_spinning_enable(); -- -- stop_critical_timings(); /* don't trace print latency */ -- call_console_drivers(ext_text, ext_len, text, len); -- start_critical_timings(); -- -- handover = console_lock_spinning_disable_and_check(); -- printk_safe_exit_irqrestore(flags); -- if (handover) -- return; -- -- /* Allow panic_cpu to take over the consoles safely */ -- if (abandon_console_lock_in_panic()) -- break; -- -- if (do_cond_resched) -- cond_resched(); -- } -- -- /* Get consistent value of the next-to-be-used sequence number. */ -- next_seq = console_seq; -- -- console_locked = 0; -- up_console_sem(); -- -- /* -- * Someone could have filled up the buffer again, so re-check if there's -- * something to flush. In case we cannot trylock the console_sem again, -- * there's a new owner and the console_unlock() from them will do the -- * flush, no worries. -- */ -- retry = prb_read_valid(prb, next_seq, NULL); -- if (retry && !abandon_console_lock_in_panic() && console_trylock()) -- goto again; -+ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock()); - } - EXPORT_SYMBOL(console_unlock); - -@@ -2886,8 +2906,14 @@ void console_flush_on_panic(enum con_flu - console_trylock(); - console_may_schedule = 0; - -- if (mode == CONSOLE_REPLAY_ALL) -- console_seq = prb_first_valid_seq(prb); -+ if (mode == CONSOLE_REPLAY_ALL) { -+ struct console *c; -+ u64 seq; -+ -+ seq = prb_first_valid_seq(prb); -+ for_each_console(c) -+ c->seq = seq; -+ } - console_unlock(); - } - -@@ -3127,26 +3153,15 @@ void register_console(struct console *ne - if (newcon->flags & CON_EXTENDED) - nr_ext_console_drivers++; - -+ newcon->dropped = 0; - if (newcon->flags & CON_PRINTBUFFER) { -- /* -- * console_unlock(); will print out the buffered messages -- * for us. -- * -- * We're about to replay the log buffer. Only do this to the -- * just-registered console to avoid excessive message spam to -- * the already-registered consoles. -- * -- * Set exclusive_console with disabled interrupts to reduce -- * race window with eventual console_flush_on_panic() that -- * ignores console_lock. -- */ -- exclusive_console = newcon; -- exclusive_console_stop_seq = console_seq; -- - /* Get a consistent copy of @syslog_seq. */ - mutex_lock(&syslog_lock); -- console_seq = syslog_seq; -+ newcon->seq = syslog_seq; - mutex_unlock(&syslog_lock); -+ } else { -+ /* Begin with next message. */ -+ newcon->seq = prb_next_seq(prb); - } - console_unlock(); - console_sysfs_notify(); diff --git a/patches/0009-ptrace-Admit-ptrace_stop-can-generate-spuriuos-SIGTR.patch b/patches/0009-ptrace-Admit-ptrace_stop-can-generate-spuriuos-SIGTR.patch deleted file mode 100644 index 0d5055d2a5cb..000000000000 --- a/patches/0009-ptrace-Admit-ptrace_stop-can-generate-spuriuos-SIGTR.patch +++ /dev/null @@ -1,197 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:42 -0500 -Subject: [PATCH 09/12] ptrace: Admit ptrace_stop can generate spuriuos - SIGTRAPs - -Long ago and far away there was a BUG_ON at the start of ptrace_stop -that did "BUG_ON(!(current->ptrace & PT_PTRACED));" [1]. The BUG_ON -had never triggered but examination of the code showed that the BUG_ON -could actually trigger. To complement removing the BUG_ON an attempt -to better handle the race was added. - -The code detected the tracer had gone away and did not call -do_notify_parent_cldstop. The code also attempted to prevent -ptrace_report_syscall from sending spurious SIGTRAPs when the tracer -went away. - -The code to detect when the tracer had gone away before sending a -signal to tracer was a legitimate fix and continues to work to this -date. - -The code to prevent sending spurious SIGTRAPs is a failure. At the -time and until today the code only catches it when the tracer goes -away after siglock is dropped and before read_lock is acquired. If -the tracer goes away after read_lock is dropped a spurious SIGTRAP can -still be sent to the tracee. The tracer going away after read_lock -is dropped is the far likelier case as it is the bigger window. - -Given that the attempt to prevent the generation of a SIGTRAP was a -failure and continues to be a failure remove the code that attempts to -do that. This simplifies the code in ptrace_stop and makes -ptrace_stop much easier to reason about. - -To successfully deal with the tracer going away, all of the tracer's -instrumentation of the child would need to be removed, and reliably -detecting when the tracer has set a signal to continue with would need -to be implemented. - -[1] 66519f549ae5 ("[PATCH] fix ptracer death race yielding bogus BUG_ON") - -History-Tree: https://git.kernel.org/pub/scm/linux/kernel/git/tglx/history.git -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-9-ebiederm@xmission.com ---- - kernel/signal.c | 92 +++++++++++++++++++++++--------------------------------- - 1 file changed, 38 insertions(+), 54 deletions(-) - ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2187,13 +2187,12 @@ static void do_notify_parent_cldstop(str - * with. If the code did not stop because the tracer is gone, - * the stop signal remains unchanged unless clear_code. - */ --static int ptrace_stop(int exit_code, int why, int clear_code, -- unsigned long message, kernel_siginfo_t *info) -+static int ptrace_stop(int exit_code, int why, unsigned long message, -+ kernel_siginfo_t *info) - __releases(¤t->sighand->siglock) - __acquires(¤t->sighand->siglock) - { - bool gstop_done = false; -- bool read_code = true; - - if (arch_ptrace_stop_needed()) { - /* -@@ -2212,7 +2211,14 @@ static int ptrace_stop(int exit_code, in - /* - * schedule() will not sleep if there is a pending signal that - * can awaken the task. -+ * -+ * After this point ptrace_signal_wake_up will clear TASK_TRACED -+ * if ptrace_unlink happens. Handle previous ptrace_unlinks -+ * here to prevent ptrace_stop sleeping in schedule. - */ -+ if (!current->ptrace) -+ return exit_code; -+ - set_special_state(TASK_TRACED); - - /* -@@ -2259,54 +2265,33 @@ static int ptrace_stop(int exit_code, in - - spin_unlock_irq(¤t->sighand->siglock); - read_lock(&tasklist_lock); -- if (likely(current->ptrace)) { -- /* -- * Notify parents of the stop. -- * -- * While ptraced, there are two parents - the ptracer and -- * the real_parent of the group_leader. The ptracer should -- * know about every stop while the real parent is only -- * interested in the completion of group stop. The states -- * for the two don't interact with each other. Notify -- * separately unless they're gonna be duplicates. -- */ -+ /* -+ * Notify parents of the stop. -+ * -+ * While ptraced, there are two parents - the ptracer and -+ * the real_parent of the group_leader. The ptracer should -+ * know about every stop while the real parent is only -+ * interested in the completion of group stop. The states -+ * for the two don't interact with each other. Notify -+ * separately unless they're gonna be duplicates. -+ */ -+ if (current->ptrace) - do_notify_parent_cldstop(current, true, why); -- if (gstop_done && ptrace_reparented(current)) -- do_notify_parent_cldstop(current, false, why); -- -- /* -- * Don't want to allow preemption here, because -- * sys_ptrace() needs this task to be inactive. -- * -- * XXX: implement read_unlock_no_resched(). -- */ -- preempt_disable(); -- read_unlock(&tasklist_lock); -- cgroup_enter_frozen(); -- preempt_enable_no_resched(); -- freezable_schedule(); -- cgroup_leave_frozen(true); -- } else { -- /* -- * By the time we got the lock, our tracer went away. -- * Don't drop the lock yet, another tracer may come. -- * -- * If @gstop_done, the ptracer went away between group stop -- * completion and here. During detach, it would have set -- * JOBCTL_STOP_PENDING on us and we'll re-enter -- * TASK_STOPPED in do_signal_stop() on return, so notifying -- * the real parent of the group stop completion is enough. -- */ -- if (gstop_done) -- do_notify_parent_cldstop(current, false, why); -+ if (gstop_done && (!current->ptrace || ptrace_reparented(current))) -+ do_notify_parent_cldstop(current, false, why); - -- /* tasklist protects us from ptrace_freeze_traced() */ -- __set_current_state(TASK_RUNNING); -- read_code = false; -- if (clear_code) -- exit_code = 0; -- read_unlock(&tasklist_lock); -- } -+ /* -+ * Don't want to allow preemption here, because -+ * sys_ptrace() needs this task to be inactive. -+ * -+ * XXX: implement read_unlock_no_resched(). -+ */ -+ preempt_disable(); -+ read_unlock(&tasklist_lock); -+ cgroup_enter_frozen(); -+ preempt_enable_no_resched(); -+ freezable_schedule(); -+ cgroup_leave_frozen(true); - - /* - * We are back. Now reacquire the siglock before touching -@@ -2314,8 +2299,7 @@ static int ptrace_stop(int exit_code, in - * any signal-sending on another CPU that wants to examine it. - */ - spin_lock_irq(¤t->sighand->siglock); -- if (read_code) -- exit_code = current->exit_code; -+ exit_code = current->exit_code; - current->last_siginfo = NULL; - current->ptrace_message = 0; - current->exit_code = 0; -@@ -2343,7 +2327,7 @@ static int ptrace_do_notify(int signr, i - info.si_uid = from_kuid_munged(current_user_ns(), current_uid()); - - /* Let the debugger run. */ -- return ptrace_stop(exit_code, why, 1, message, &info); -+ return ptrace_stop(exit_code, why, message, &info); - } - - int ptrace_notify(int exit_code, unsigned long message) -@@ -2515,7 +2499,7 @@ static void do_jobctl_trap(void) - CLD_STOPPED, 0); - } else { - WARN_ON_ONCE(!signr); -- ptrace_stop(signr, CLD_STOPPED, 0, 0, NULL); -+ ptrace_stop(signr, CLD_STOPPED, 0, NULL); - } - } - -@@ -2568,7 +2552,7 @@ static int ptrace_signal(int signr, kern - * comment in dequeue_signal(). - */ - current->jobctl |= JOBCTL_STOP_DEQUEUED; -- signr = ptrace_stop(signr, CLD_TRAPPED, 0, 0, info); -+ signr = ptrace_stop(signr, CLD_TRAPPED, 0, info); - - /* We're back. Did the debugger cancel the sig? */ - if (signr == 0) diff --git a/patches/0010-printk-move-buffer-definitions-into-console_emit_nex.patch b/patches/0010-printk-move-buffer-definitions-into-console_emit_nex.patch deleted file mode 100644 index 272ef33df02c..000000000000 --- a/patches/0010-printk-move-buffer-definitions-into-console_emit_nex.patch +++ /dev/null @@ -1,183 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:32 +0206 -Subject: [PATCH 10/18] printk: move buffer definitions into - console_emit_next_record() caller - -Extended consoles print extended messages and do not print messages about -dropped records. - -Non-extended consoles print "normal" messages as well as extra messages -about dropped records. - -Currently the buffers for these various message types are defined within -the functions that might use them and their usage is based upon the -CON_EXTENDED flag. This will be a problem when moving to kthread printers -because each printer must be able to provide its own buffers. - -Move all the message buffer definitions outside of -console_emit_next_record(). The caller knows if extended or dropped -messages should be printed and can specify the appropriate buffers to -use. The console_emit_next_record() and call_console_driver() functions -can know what to print based on whether specified buffers are non-NULL. - -With this change, buffer definition/allocation/specification is separated -from the code that does the various types of string printing. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-11-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 60 +++++++++++++++++++++++++++++++++++-------------- - 1 file changed, 43 insertions(+), 17 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -394,6 +394,9 @@ static struct latched_seq clear_seq = { - /* the maximum size of a formatted record (i.e. with prefix added per line) */ - #define CONSOLE_LOG_MAX 1024 - -+/* the maximum size for a dropped text message */ -+#define DROPPED_TEXT_MAX 64 -+ - /* the maximum size allowed to be reserved for a record */ - #define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX) - -@@ -1923,18 +1926,18 @@ static int console_trylock_spinning(void - - /* - * Call the specified console driver, asking it to write out the specified -- * text and length. For non-extended consoles, if any records have been -+ * text and length. If @dropped_text is non-NULL and any records have been - * dropped, a dropped message will be written out first. - */ --static void call_console_driver(struct console *con, const char *text, size_t len) -+static void call_console_driver(struct console *con, const char *text, size_t len, -+ char *dropped_text) - { -- static char dropped_text[64]; - size_t dropped_len; - - trace_console_rcuidle(text, len); - -- if (con->dropped && !(con->flags & CON_EXTENDED)) { -- dropped_len = snprintf(dropped_text, sizeof(dropped_text), -+ if (con->dropped && dropped_text) { -+ dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX, - "** %lu printk messages dropped **\n", - con->dropped); - con->dropped = 0; -@@ -2296,6 +2299,7 @@ EXPORT_SYMBOL(_printk); - #else /* CONFIG_PRINTK */ - - #define CONSOLE_LOG_MAX 0 -+#define DROPPED_TEXT_MAX 0 - #define printk_time false - - #define prb_read_valid(rb, seq, r) false -@@ -2319,7 +2323,10 @@ static ssize_t msg_print_ext_body(char * - struct dev_printk_info *dev_info) { return 0; } - static void console_lock_spinning_enable(void) { } - static int console_lock_spinning_disable_and_check(void) { return 0; } --static void call_console_driver(struct console *con, const char *text, size_t len) { } -+static void call_console_driver(struct console *con, const char *text, size_t len, -+ char *dropped_text) -+{ -+} - static bool suppress_message_printing(int level) { return false; } - - #endif /* CONFIG_PRINTK */ -@@ -2644,6 +2651,14 @@ static void __console_unlock(void) - * Print one record for the given console. The record printed is whatever - * record is the next available record for the given console. - * -+ * @text is a buffer of size CONSOLE_LOG_MAX. -+ * -+ * If extended messages should be printed, @ext_text is a buffer of size -+ * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL. -+ * -+ * If dropped messages should be printed, @dropped_text is a buffer of size -+ * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL. -+ * - * @handover will be set to true if a printk waiter has taken over the - * console_lock, in which case the caller is no longer holding the - * console_lock. Otherwise it is set to false. -@@ -2653,10 +2668,9 @@ static void __console_unlock(void) - * - * Requires the console_lock. - */ --static bool console_emit_next_record(struct console *con, bool *handover) -+static bool console_emit_next_record(struct console *con, char *text, char *ext_text, -+ char *dropped_text, bool *handover) - { -- static char ext_text[CONSOLE_EXT_LOG_MAX]; -- static char text[CONSOLE_LOG_MAX]; - static int panic_console_dropped; - struct printk_info info; - struct printk_record r; -@@ -2664,7 +2678,7 @@ static bool console_emit_next_record(str - char *write_text; - size_t len; - -- prb_rec_init_rd(&r, &info, text, sizeof(text)); -+ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX); - - *handover = false; - -@@ -2686,13 +2700,13 @@ static bool console_emit_next_record(str - goto skip; - } - -- if (con->flags & CON_EXTENDED) { -- write_text = &ext_text[0]; -- len = info_print_ext_header(ext_text, sizeof(ext_text), r.info); -- len += msg_print_ext_body(ext_text + len, sizeof(ext_text) - len, -+ if (ext_text) { -+ write_text = ext_text; -+ len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info); -+ len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len, - &r.text_buf[0], r.info->text_len, &r.info->dev_info); - } else { -- write_text = &text[0]; -+ write_text = text; - len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); - } - -@@ -2710,7 +2724,7 @@ static bool console_emit_next_record(str - console_lock_spinning_enable(); - - stop_critical_timings(); /* don't trace print latency */ -- call_console_driver(con, write_text, len); -+ call_console_driver(con, write_text, len, dropped_text); - start_critical_timings(); - - con->seq++; -@@ -2746,6 +2760,9 @@ static bool console_emit_next_record(str - */ - static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover) - { -+ static char dropped_text[DROPPED_TEXT_MAX]; -+ static char ext_text[CONSOLE_EXT_LOG_MAX]; -+ static char text[CONSOLE_LOG_MAX]; - bool any_usable = false; - struct console *con; - bool any_progress; -@@ -2763,7 +2780,16 @@ static bool console_flush_all(bool do_co - continue; - any_usable = true; - -- progress = console_emit_next_record(con, handover); -+ if (con->flags & CON_EXTENDED) { -+ /* Extended consoles do not print "dropped messages". */ -+ progress = console_emit_next_record(con, &text[0], -+ &ext_text[0], NULL, -+ handover); -+ } else { -+ progress = console_emit_next_record(con, &text[0], -+ NULL, &dropped_text[0], -+ handover); -+ } - if (*handover) - return false; - diff --git a/patches/0010-ptrace-Don-t-change-__state.patch b/patches/0010-ptrace-Don-t-change-__state.patch deleted file mode 100644 index 19123078a935..000000000000 --- a/patches/0010-ptrace-Don-t-change-__state.patch +++ /dev/null @@ -1,182 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:43 -0500 -Subject: [PATCH 10/12] ptrace: Don't change __state - -Stop playing with tsk->__state to remove TASK_WAKEKILL while a ptrace -command is executing. - -Instead remove TASK_WAKEKILL from the definition of TASK_TRACED, and -implement a new jobctl flag TASK_PTRACE_FROZEN. This new flag is set -in jobctl_freeze_task and cleared when ptrace_stop is awoken or in -jobctl_unfreeze_task (when ptrace_stop remains asleep). - -In signal_wake_up add __TASK_TRACED to state along with TASK_WAKEKILL -when the wake up is for a fatal signal. Skip adding __TASK_TRACED -when TASK_PTRACE_FROZEN is not set. This has the same effect as -changing TASK_TRACED to __TASK_TRACED as all of the wake_ups that use -TASK_KILLABLE go through signal_wake_up. - -Handle a ptrace_stop being called with a pending fatal signal. -Previously it would have been handled by schedule simply failing to -sleep. As TASK_WAKEKILL is no longer part of TASK_TRACED schedule -will sleep with a fatal_signal_pending. The code in signal_wake_up -guarantees that the code will be awaked by any fatal signal that -codes after TASK_TRACED is set. - -Previously the __state value of __TASK_TRACED was changed to -TASK_RUNNING when woken up or back to TASK_TRACED when the code was -left in ptrace_stop. Now when woken up ptrace_stop now clears -JOBCTL_PTRACE_FROZEN and when left sleeping ptrace_unfreezed_traced -clears JOBCTL_PTRACE_FROZEN. - -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-10-ebiederm@xmission.com ---- - include/linux/sched.h | 2 +- - include/linux/sched/jobctl.h | 2 ++ - include/linux/sched/signal.h | 5 +++-- - kernel/ptrace.c | 21 ++++++++------------- - kernel/sched/core.c | 5 +---- - kernel/signal.c | 14 ++++++-------- - 6 files changed, 21 insertions(+), 28 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -103,7 +103,7 @@ struct task_group; - /* Convenience macros for the sake of set_current_state: */ - #define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE) - #define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED) --#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED) -+#define TASK_TRACED __TASK_TRACED - - #define TASK_IDLE (TASK_UNINTERRUPTIBLE | TASK_NOLOAD) - ---- a/include/linux/sched/jobctl.h -+++ b/include/linux/sched/jobctl.h -@@ -19,6 +19,7 @@ struct task_struct; - #define JOBCTL_TRAPPING_BIT 21 /* switching to TRACED */ - #define JOBCTL_LISTENING_BIT 22 /* ptracer is listening for events */ - #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ -+#define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */ - - #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) - #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) -@@ -28,6 +29,7 @@ struct task_struct; - #define JOBCTL_TRAPPING (1UL << JOBCTL_TRAPPING_BIT) - #define JOBCTL_LISTENING (1UL << JOBCTL_LISTENING_BIT) - #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) -+#define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT) - - #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) - #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) ---- a/include/linux/sched/signal.h -+++ b/include/linux/sched/signal.h -@@ -435,9 +435,10 @@ extern void calculate_sigpending(void); - - extern void signal_wake_up_state(struct task_struct *t, unsigned int state); - --static inline void signal_wake_up(struct task_struct *t, bool resume) -+static inline void signal_wake_up(struct task_struct *t, bool fatal) - { -- signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0); -+ fatal = fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN); -+ signal_wake_up_state(t, fatal ? TASK_WAKEKILL | __TASK_TRACED : 0); - } - static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) - { ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -197,7 +197,7 @@ static bool ptrace_freeze_traced(struct - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !looks_like_a_spurious_pid(task) && - !__fatal_signal_pending(task)) { -- WRITE_ONCE(task->__state, __TASK_TRACED); -+ task->jobctl |= JOBCTL_PTRACE_FROZEN; - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); -@@ -207,23 +207,19 @@ static bool ptrace_freeze_traced(struct - - static void ptrace_unfreeze_traced(struct task_struct *task) - { -- if (READ_ONCE(task->__state) != __TASK_TRACED) -- return; -- -- WARN_ON(!task->ptrace || task->parent != current); -+ unsigned long flags; - - /* -- * PTRACE_LISTEN can allow ptrace_trap_notify to wake us up remotely. -- * Recheck state under the lock to close this race. -+ * The child may be awake and may have cleared -+ * JOBCTL_PTRACE_FROZEN (see ptrace_resume). The child will -+ * not set JOBCTL_PTRACE_FROZEN or enter __TASK_TRACED anew. - */ -- spin_lock_irq(&task->sighand->siglock); -- if (READ_ONCE(task->__state) == __TASK_TRACED) { -+ if (lock_task_sighand(task, &flags)) { -+ task->jobctl &= ~JOBCTL_PTRACE_FROZEN; - if (__fatal_signal_pending(task)) - wake_up_state(task, __TASK_TRACED); -- else -- WRITE_ONCE(task->__state, TASK_TRACED); -+ unlock_task_sighand(task, &flags); - } -- spin_unlock_irq(&task->sighand->siglock); - } - - /** -@@ -256,7 +252,6 @@ static int ptrace_check_attach(struct ta - */ - read_lock(&tasklist_lock); - if (child->ptrace && child->parent == current) { -- WARN_ON(READ_ONCE(child->__state) == __TASK_TRACED); - /* - * child->sighand can't be NULL, release_task() - * does ptrace_unlink() before __exit_signal(). ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -6313,10 +6313,7 @@ static void __sched notrace __schedule(u - - /* - * We must load prev->state once (task_struct::state is volatile), such -- * that: -- * -- * - we form a control dependency vs deactivate_task() below. -- * - ptrace_{,un}freeze_traced() can change ->state underneath us. -+ * that we form a control dependency vs deactivate_task() below. - */ - prev_state = READ_ONCE(prev->__state); - if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -2209,14 +2209,12 @@ static int ptrace_stop(int exit_code, in - } - - /* -- * schedule() will not sleep if there is a pending signal that -- * can awaken the task. -- * -- * After this point ptrace_signal_wake_up will clear TASK_TRACED -- * if ptrace_unlink happens. Handle previous ptrace_unlinks -- * here to prevent ptrace_stop sleeping in schedule. -+ * After this point ptrace_signal_wake_up or signal_wake_up -+ * will clear TASK_TRACED if ptrace_unlink happens or a fatal -+ * signal comes in. Handle previous ptrace_unlinks and fatal -+ * signals here to prevent ptrace_stop sleeping in schedule. - */ -- if (!current->ptrace) -+ if (!current->ptrace || __fatal_signal_pending(current)) - return exit_code; - - set_special_state(TASK_TRACED); -@@ -2305,7 +2303,7 @@ static int ptrace_stop(int exit_code, in - current->exit_code = 0; - - /* LISTENING can be set only during STOP traps, clear it */ -- current->jobctl &= ~JOBCTL_LISTENING; -+ current->jobctl &= ~(JOBCTL_LISTENING | JOBCTL_PTRACE_FROZEN); - - /* - * Queued signals ignored us while we were stopped for tracing. diff --git a/patches/0011-printk-add-pr_flush.patch b/patches/0011-printk-add-pr_flush.patch deleted file mode 100644 index f7ebf3bbc231..000000000000 --- a/patches/0011-printk-add-pr_flush.patch +++ /dev/null @@ -1,184 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:33 +0206 -Subject: [PATCH 11/18] printk: add pr_flush() - -Provide a might-sleep function to allow waiting for console printers -to catch up to the latest logged message. - -Use pr_flush() whenever it is desirable to get buffered messages -printed before continuing: suspend_console(), resume_console(), -console_stop(), console_start(), console_unblank(). - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-12-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/printk.h | 7 ++++ - kernel/printk/printk.c | 83 +++++++++++++++++++++++++++++++++++++++++++++++++ - 2 files changed, 90 insertions(+) - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -170,6 +170,8 @@ extern void __printk_safe_exit(void); - #define printk_deferred_enter __printk_safe_enter - #define printk_deferred_exit __printk_safe_exit - -+extern bool pr_flush(int timeout_ms, bool reset_on_progress); -+ - /* - * Please don't use printk_ratelimit(), because it shares ratelimiting state - * with all other unrelated printk_ratelimit() callsites. Instead use -@@ -220,6 +222,11 @@ static inline void printk_deferred_exit( - { - } - -+static inline bool pr_flush(int timeout_ms, bool reset_on_progress) -+{ -+ return true; -+} -+ - static inline int printk_ratelimit(void) - { - return 0; ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -2296,6 +2296,8 @@ asmlinkage __visible int _printk(const c - } - EXPORT_SYMBOL(_printk); - -+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); -+ - #else /* CONFIG_PRINTK */ - - #define CONSOLE_LOG_MAX 0 -@@ -2328,6 +2330,7 @@ static void call_console_driver(struct c - { - } - static bool suppress_message_printing(int level) { return false; } -+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } - - #endif /* CONFIG_PRINTK */ - -@@ -2515,6 +2518,7 @@ void suspend_console(void) - if (!console_suspend_enabled) - return; - pr_info("Suspending console(s) (use no_console_suspend to debug)\n"); -+ pr_flush(1000, true); - console_lock(); - console_suspended = 1; - up_console_sem(); -@@ -2527,6 +2531,7 @@ void resume_console(void) - down_console_sem(); - console_suspended = 0; - console_unlock(); -+ pr_flush(1000, true); - } - - /** -@@ -2912,6 +2917,9 @@ void console_unblank(void) - if ((c->flags & CON_ENABLED) && c->unblank) - c->unblank(); - console_unlock(); -+ -+ if (!oops_in_progress) -+ pr_flush(1000, true); - } - - /** -@@ -2970,6 +2978,7 @@ struct tty_driver *console_device(int *i - */ - void console_stop(struct console *console) - { -+ __pr_flush(console, 1000, true); - console_lock(); - console->flags &= ~CON_ENABLED; - console_unlock(); -@@ -2981,6 +2990,7 @@ void console_start(struct console *conso - console_lock(); - console->flags |= CON_ENABLED; - console_unlock(); -+ __pr_flush(console, 1000, true); - } - EXPORT_SYMBOL(console_start); - -@@ -3352,6 +3362,79 @@ static int __init printk_late_init(void) - late_initcall(printk_late_init); - - #if defined CONFIG_PRINTK -+/* If @con is specified, only wait for that console. Otherwise wait for all. */ -+static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) -+{ -+ int remaining = timeout_ms; -+ struct console *c; -+ u64 last_diff = 0; -+ u64 printk_seq; -+ u64 diff; -+ u64 seq; -+ -+ might_sleep(); -+ -+ seq = prb_next_seq(prb); -+ -+ for (;;) { -+ diff = 0; -+ -+ console_lock(); -+ for_each_console(c) { -+ if (con && con != c) -+ continue; -+ if (!console_is_usable(c)) -+ continue; -+ printk_seq = c->seq; -+ if (printk_seq < seq) -+ diff += seq - printk_seq; -+ } -+ console_unlock(); -+ -+ if (diff != last_diff && reset_on_progress) -+ remaining = timeout_ms; -+ -+ if (diff == 0 || remaining == 0) -+ break; -+ -+ if (remaining < 0) { -+ /* no timeout limit */ -+ msleep(100); -+ } else if (remaining < 100) { -+ msleep(remaining); -+ remaining = 0; -+ } else { -+ msleep(100); -+ remaining -= 100; -+ } -+ -+ last_diff = diff; -+ } -+ -+ return (diff == 0); -+} -+ -+/** -+ * pr_flush() - Wait for printing threads to catch up. -+ * -+ * @timeout_ms: The maximum time (in ms) to wait. -+ * @reset_on_progress: Reset the timeout if forward progress is seen. -+ * -+ * A value of 0 for @timeout_ms means no waiting will occur. A value of -1 -+ * represents infinite waiting. -+ * -+ * If @reset_on_progress is true, the timeout will be reset whenever any -+ * printer has been seen to make some forward progress. -+ * -+ * Context: Process context. May sleep while acquiring console lock. -+ * Return: true if all enabled printers are caught up. -+ */ -+bool pr_flush(int timeout_ms, bool reset_on_progress) -+{ -+ return __pr_flush(NULL, timeout_ms, reset_on_progress); -+} -+EXPORT_SYMBOL(pr_flush); -+ - /* - * Delayed printk version, for scheduler-internal messages: - */ diff --git a/patches/0011-ptrace-Always-take-siglock-in-ptrace_resume.patch b/patches/0011-ptrace-Always-take-siglock-in-ptrace_resume.patch deleted file mode 100644 index 3d575e50489d..000000000000 --- a/patches/0011-ptrace-Always-take-siglock-in-ptrace_resume.patch +++ /dev/null @@ -1,46 +0,0 @@ -From: "Eric W. Biederman" -Date: Thu, 5 May 2022 13:26:44 -0500 -Subject: [PATCH 11/12] ptrace: Always take siglock in ptrace_resume - -Make code analysis simpler and future changes easier by -always taking siglock in ptrace_resume. - -Signed-off-by: "Eric W. Biederman" -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-11-ebiederm@xmission.com ---- - kernel/ptrace.c | 13 ++----------- - 1 file changed, 2 insertions(+), 11 deletions(-) - ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -837,8 +837,6 @@ static long ptrace_get_rseq_configuratio - static int ptrace_resume(struct task_struct *child, long request, - unsigned long data) - { -- bool need_siglock; -- - if (!valid_signal(data)) - return -EIO; - -@@ -874,18 +872,11 @@ static int ptrace_resume(struct task_str - * Note that we need siglock even if ->exit_code == data and/or this - * status was not reported yet, the new status must not be cleared by - * wait_task_stopped() after resume. -- * -- * If data == 0 we do not care if wait_task_stopped() reports the old -- * status and clears the code too; this can't race with the tracee, it -- * takes siglock after resume. - */ -- need_siglock = data && !thread_group_empty(current); -- if (need_siglock) -- spin_lock_irq(&child->sighand->siglock); -+ spin_lock_irq(&child->sighand->siglock); - child->exit_code = data; - wake_up_state(child, __TASK_TRACED); -- if (need_siglock) -- spin_unlock_irq(&child->sighand->siglock); -+ spin_unlock_irq(&child->sighand->siglock); - - return 0; - } diff --git a/patches/0012-printk-add-functions-to-prefer-direct-printing.patch b/patches/0012-printk-add-functions-to-prefer-direct-printing.patch deleted file mode 100644 index 73591bbdd0d2..000000000000 --- a/patches/0012-printk-add-functions-to-prefer-direct-printing.patch +++ /dev/null @@ -1,330 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:34 +0206 -Subject: [PATCH 12/18] printk: add functions to prefer direct printing - -Once kthread printing is available, console printing will no longer -occur in the context of the printk caller. However, there are some -special contexts where it is desirable for the printk caller to -directly print out kernel messages. Using pr_flush() to wait for -threaded printers is only possible if the caller is in a sleepable -context and the kthreads are active. That is not always the case. - -Introduce printk_prefer_direct_enter() and printk_prefer_direct_exit() -functions to explicitly (and globally) activate/deactivate preferred -direct console printing. The term "direct console printing" refers to -printing to all enabled consoles from the context of the printk -caller. The term "prefer" is used because this type of printing is -only best effort. If the console is currently locked or other -printers are already actively printing, the printk caller will need -to rely on the other contexts to handle the printing. - -This preferred direct printing is how all printing has been handled -until now (unless it was explicitly deferred). - -When kthread printing is introduced, there may be some unanticipated -problems due to kthreads being unable to flush important messages. -In order to minimize such risks, preferred direct printing is -activated for the primary important messages when the system -experiences general types of major errors. These are: - - - emergency reboot/shutdown - - cpu and rcu stalls - - hard and soft lockups - - hung tasks - - warn - - sysrq - -Note that since kthread printing does not yet exist, no behavior -changes result from this commit. This is only implementing the -counter and marking the various places where preferred direct -printing is active. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Acked-by: Paul E. McKenney # for RCU -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-13-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - drivers/tty/sysrq.c | 2 ++ - include/linux/printk.h | 11 +++++++++++ - kernel/hung_task.c | 11 ++++++++++- - kernel/panic.c | 4 ++++ - kernel/printk/printk.c | 28 ++++++++++++++++++++++++++++ - kernel/rcu/tree_stall.h | 2 ++ - kernel/reboot.c | 14 +++++++++++++- - kernel/watchdog.c | 4 ++++ - kernel/watchdog_hld.c | 4 ++++ - 9 files changed, 78 insertions(+), 2 deletions(-) - ---- a/drivers/tty/sysrq.c -+++ b/drivers/tty/sysrq.c -@@ -578,6 +578,7 @@ void __handle_sysrq(int key, bool check_ - - rcu_sysrq_start(); - rcu_read_lock(); -+ printk_prefer_direct_enter(); - /* - * Raise the apparent loglevel to maximum so that the sysrq header - * is shown to provide the user with positive feedback. We do not -@@ -619,6 +620,7 @@ void __handle_sysrq(int key, bool check_ - pr_cont("\n"); - console_loglevel = orig_log_level; - } -+ printk_prefer_direct_exit(); - rcu_read_unlock(); - rcu_sysrq_end(); - ---- a/include/linux/printk.h -+++ b/include/linux/printk.h -@@ -170,6 +170,9 @@ extern void __printk_safe_exit(void); - #define printk_deferred_enter __printk_safe_enter - #define printk_deferred_exit __printk_safe_exit - -+extern void printk_prefer_direct_enter(void); -+extern void printk_prefer_direct_exit(void); -+ - extern bool pr_flush(int timeout_ms, bool reset_on_progress); - - /* -@@ -222,6 +225,14 @@ static inline void printk_deferred_exit( - { - } - -+static inline void printk_prefer_direct_enter(void) -+{ -+} -+ -+static inline void printk_prefer_direct_exit(void) -+{ -+} -+ - static inline bool pr_flush(int timeout_ms, bool reset_on_progress) - { - return true; ---- a/kernel/hung_task.c -+++ b/kernel/hung_task.c -@@ -127,6 +127,8 @@ static void check_hung_task(struct task_ - * complain: - */ - if (sysctl_hung_task_warnings) { -+ printk_prefer_direct_enter(); -+ - if (sysctl_hung_task_warnings > 0) - sysctl_hung_task_warnings--; - pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", -@@ -142,6 +144,8 @@ static void check_hung_task(struct task_ - - if (sysctl_hung_task_all_cpu_backtrace) - hung_task_show_all_bt = true; -+ -+ printk_prefer_direct_exit(); - } - - touch_nmi_watchdog(); -@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_t - } - unlock: - rcu_read_unlock(); -- if (hung_task_show_lock) -+ if (hung_task_show_lock) { -+ printk_prefer_direct_enter(); - debug_show_all_locks(); -+ printk_prefer_direct_exit(); -+ } - - if (hung_task_show_all_bt) { - hung_task_show_all_bt = false; -+ printk_prefer_direct_enter(); - trigger_all_cpu_backtrace(); -+ printk_prefer_direct_exit(); - } - - if (hung_task_call_panic) ---- a/kernel/panic.c -+++ b/kernel/panic.c -@@ -579,6 +579,8 @@ void __warn(const char *file, int line, - { - disable_trace_on_warning(); - -+ printk_prefer_direct_enter(); -+ - if (file) - pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n", - raw_smp_processor_id(), current->pid, file, line, -@@ -608,6 +610,8 @@ void __warn(const char *file, int line, - - /* Just a warning, don't kill lockdep. */ - add_taint(taint, LOCKDEP_STILL_OK); -+ -+ printk_prefer_direct_exit(); - } - - #ifndef __WARN_FLAGS ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -362,6 +362,34 @@ static int console_msg_format = MSG_FORM - static DEFINE_MUTEX(syslog_lock); - - #ifdef CONFIG_PRINTK -+static atomic_t printk_prefer_direct = ATOMIC_INIT(0); -+ -+/** -+ * printk_prefer_direct_enter - cause printk() calls to attempt direct -+ * printing to all enabled consoles -+ * -+ * Since it is not possible to call into the console printing code from any -+ * context, there is no guarantee that direct printing will occur. -+ * -+ * This globally effects all printk() callers. -+ * -+ * Context: Any context. -+ */ -+void printk_prefer_direct_enter(void) -+{ -+ atomic_inc(&printk_prefer_direct); -+} -+ -+/** -+ * printk_prefer_direct_exit - restore printk() behavior -+ * -+ * Context: Any context. -+ */ -+void printk_prefer_direct_exit(void) -+{ -+ WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0); -+} -+ - DECLARE_WAIT_QUEUE_HEAD(log_wait); - /* All 3 protected by @syslog_lock. */ - /* the next printk record to read by syslog(READ) or /proc/kmsg */ ---- a/kernel/rcu/tree_stall.h -+++ b/kernel/rcu/tree_stall.h -@@ -619,6 +619,7 @@ static void print_cpu_stall(unsigned lon - * See Documentation/RCU/stallwarn.rst for info on how to debug - * RCU CPU stall warnings. - */ -+ printk_prefer_direct_enter(); - trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected")); - pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name); - raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags); -@@ -656,6 +657,7 @@ static void print_cpu_stall(unsigned lon - */ - set_tsk_need_resched(current); - set_preempt_need_resched(); -+ printk_prefer_direct_exit(); - } - - static void check_cpu_stall(struct rcu_data *rdp) ---- a/kernel/reboot.c -+++ b/kernel/reboot.c -@@ -447,9 +447,11 @@ static int __orderly_reboot(void) - ret = run_cmd(reboot_cmd); - - if (ret) { -+ printk_prefer_direct_enter(); - pr_warn("Failed to start orderly reboot: forcing the issue\n"); - emergency_sync(); - kernel_restart(NULL); -+ printk_prefer_direct_exit(); - } - - return ret; -@@ -462,6 +464,7 @@ static int __orderly_poweroff(bool force - ret = run_cmd(poweroff_cmd); - - if (ret && force) { -+ printk_prefer_direct_enter(); - pr_warn("Failed to start orderly shutdown: forcing the issue\n"); - - /* -@@ -471,6 +474,7 @@ static int __orderly_poweroff(bool force - */ - emergency_sync(); - kernel_power_off(); -+ printk_prefer_direct_exit(); - } - - return ret; -@@ -528,6 +532,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot); - */ - static void hw_failure_emergency_poweroff_func(struct work_struct *work) - { -+ printk_prefer_direct_enter(); -+ - /* - * We have reached here after the emergency shutdown waiting period has - * expired. This means orderly_poweroff has not been able to shut off -@@ -544,6 +550,8 @@ static void hw_failure_emergency_powerof - */ - pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n"); - emergency_restart(); -+ -+ printk_prefer_direct_exit(); - } - - static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work, -@@ -582,11 +590,13 @@ void hw_protection_shutdown(const char * - { - static atomic_t allow_proceed = ATOMIC_INIT(1); - -+ printk_prefer_direct_enter(); -+ - pr_emerg("HARDWARE PROTECTION shutdown (%s)\n", reason); - - /* Shutdown should be initiated only once. */ - if (!atomic_dec_and_test(&allow_proceed)) -- return; -+ goto out; - - /* - * Queue a backup emergency shutdown in the event of -@@ -594,6 +604,8 @@ void hw_protection_shutdown(const char * - */ - hw_failure_emergency_poweroff(ms_until_forced); - orderly_poweroff(true); -+out: -+ printk_prefer_direct_exit(); - } - EXPORT_SYMBOL_GPL(hw_protection_shutdown); - ---- a/kernel/watchdog.c -+++ b/kernel/watchdog.c -@@ -424,6 +424,8 @@ static enum hrtimer_restart watchdog_tim - /* Start period for the next softlockup warning. */ - update_report_ts(); - -+ printk_prefer_direct_enter(); -+ - pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", - smp_processor_id(), duration, - current->comm, task_pid_nr(current)); -@@ -442,6 +444,8 @@ static enum hrtimer_restart watchdog_tim - add_taint(TAINT_SOFTLOCKUP, LOCKDEP_STILL_OK); - if (softlockup_panic) - panic("softlockup: hung tasks"); -+ -+ printk_prefer_direct_exit(); - } - - return HRTIMER_RESTART; ---- a/kernel/watchdog_hld.c -+++ b/kernel/watchdog_hld.c -@@ -135,6 +135,8 @@ static void watchdog_overflow_callback(s - if (__this_cpu_read(hard_watchdog_warn) == true) - return; - -+ printk_prefer_direct_enter(); -+ - pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", - this_cpu); - print_modules(); -@@ -155,6 +157,8 @@ static void watchdog_overflow_callback(s - if (hardlockup_panic) - nmi_panic(regs, "Hard LOCKUP"); - -+ printk_prefer_direct_exit(); -+ - __this_cpu_write(hard_watchdog_warn, true); - return; - } diff --git a/patches/0012-sched-signal-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch b/patches/0012-sched-signal-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch deleted file mode 100644 index 94d2a619af0f..000000000000 --- a/patches/0012-sched-signal-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch +++ /dev/null @@ -1,212 +0,0 @@ -From: Peter Zijlstra -Date: Thu, 5 May 2022 13:26:45 -0500 -Subject: [PATCH 12/12] sched,signal,ptrace: Rework TASK_TRACED, TASK_STOPPED - state - -Currently ptrace_stop() / do_signal_stop() rely on the special states -TASK_TRACED and TASK_STOPPED resp. to keep unique state. That is, this -state exists only in task->__state and nowhere else. - -There's two spots of bother with this: - - - PREEMPT_RT has task->saved_state which complicates matters, - meaning task_is_{traced,stopped}() needs to check an additional - variable. - - - An alternative freezer implementation that itself relies on a - special TASK state would loose TASK_TRACED/TASK_STOPPED and will - result in misbehaviour. - -As such, add additional state to task->jobctl to track this state -outside of task->__state. - -NOTE: this doesn't actually fix anything yet, just adds extra state. - ---EWB - * didn't add a unnecessary newline in signal.h - * Update t->jobctl in signal_wake_up and ptrace_signal_wake_up - instead of in signal_wake_up_state. This prevents the clearing - of TASK_STOPPED and TASK_TRACED from getting lost. - * Added warnings if JOBCTL_STOPPED or JOBCTL_TRACED are not cleared - -Signed-off-by: Peter Zijlstra (Intel) -Link: https://lkml.kernel.org/r/20220421150654.757693825@infradead.org -Signed-off-by: Eric W. Biederman -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220505182645.497868-12-ebiederm@xmission.com ---- - include/linux/sched.h | 8 +++----- - include/linux/sched/jobctl.h | 6 ++++++ - include/linux/sched/signal.h | 19 +++++++++++++++---- - kernel/ptrace.c | 16 +++++++++++++--- - kernel/signal.c | 10 ++++++++-- - 5 files changed, 45 insertions(+), 14 deletions(-) - ---- a/include/linux/sched.h -+++ b/include/linux/sched.h -@@ -118,11 +118,9 @@ struct task_group; - - #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING) - --#define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0) -- --#define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0) -- --#define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACED)) != 0) -+#define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0) -+#define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0) -+#define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED)) != 0) - - /* - * Special states are those that do not use the normal wait-loop pattern. See ---- a/include/linux/sched/jobctl.h -+++ b/include/linux/sched/jobctl.h -@@ -21,6 +21,9 @@ struct task_struct; - #define JOBCTL_TRAP_FREEZE_BIT 23 /* trap for cgroup freezer */ - #define JOBCTL_PTRACE_FROZEN_BIT 24 /* frozen for ptrace */ - -+#define JOBCTL_STOPPED_BIT 26 /* do_signal_stop() */ -+#define JOBCTL_TRACED_BIT 27 /* ptrace_stop() */ -+ - #define JOBCTL_STOP_DEQUEUED (1UL << JOBCTL_STOP_DEQUEUED_BIT) - #define JOBCTL_STOP_PENDING (1UL << JOBCTL_STOP_PENDING_BIT) - #define JOBCTL_STOP_CONSUME (1UL << JOBCTL_STOP_CONSUME_BIT) -@@ -31,6 +34,9 @@ struct task_struct; - #define JOBCTL_TRAP_FREEZE (1UL << JOBCTL_TRAP_FREEZE_BIT) - #define JOBCTL_PTRACE_FROZEN (1UL << JOBCTL_PTRACE_FROZEN_BIT) - -+#define JOBCTL_STOPPED (1UL << JOBCTL_STOPPED_BIT) -+#define JOBCTL_TRACED (1UL << JOBCTL_TRACED_BIT) -+ - #define JOBCTL_TRAP_MASK (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY) - #define JOBCTL_PENDING_MASK (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK) - ---- a/include/linux/sched/signal.h -+++ b/include/linux/sched/signal.h -@@ -294,8 +294,10 @@ static inline int kernel_dequeue_signal( - static inline void kernel_signal_stop(void) - { - spin_lock_irq(¤t->sighand->siglock); -- if (current->jobctl & JOBCTL_STOP_DEQUEUED) -+ if (current->jobctl & JOBCTL_STOP_DEQUEUED) { -+ current->jobctl |= JOBCTL_STOPPED; - set_special_state(TASK_STOPPED); -+ } - spin_unlock_irq(¤t->sighand->siglock); - - schedule(); -@@ -437,12 +439,21 @@ extern void signal_wake_up_state(struct - - static inline void signal_wake_up(struct task_struct *t, bool fatal) - { -- fatal = fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN); -- signal_wake_up_state(t, fatal ? TASK_WAKEKILL | __TASK_TRACED : 0); -+ unsigned int state = 0; -+ if (fatal && !(t->jobctl & JOBCTL_PTRACE_FROZEN)) { -+ t->jobctl &= ~(JOBCTL_STOPPED | JOBCTL_TRACED); -+ state = TASK_WAKEKILL | __TASK_TRACED; -+ } -+ signal_wake_up_state(t, state); - } - static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume) - { -- signal_wake_up_state(t, resume ? __TASK_TRACED : 0); -+ unsigned int state = 0; -+ if (resume) { -+ t->jobctl &= ~JOBCTL_TRACED; -+ state = __TASK_TRACED; -+ } -+ signal_wake_up_state(t, state); - } - - void task_join_group_stop(struct task_struct *task); ---- a/kernel/ptrace.c -+++ b/kernel/ptrace.c -@@ -185,7 +185,12 @@ static bool looks_like_a_spurious_pid(st - return true; - } - --/* Ensure that nothing can wake it up, even SIGKILL */ -+/* -+ * Ensure that nothing can wake it up, even SIGKILL -+ * -+ * A task is switched to this state while a ptrace operation is in progress; -+ * such that the ptrace operation is uninterruptible. -+ */ - static bool ptrace_freeze_traced(struct task_struct *task) - { - bool ret = false; -@@ -216,8 +221,10 @@ static void ptrace_unfreeze_traced(struc - */ - if (lock_task_sighand(task, &flags)) { - task->jobctl &= ~JOBCTL_PTRACE_FROZEN; -- if (__fatal_signal_pending(task)) -+ if (__fatal_signal_pending(task)) { -+ task->jobctl &= ~TASK_TRACED; - wake_up_state(task, __TASK_TRACED); -+ } - unlock_task_sighand(task, &flags); - } - } -@@ -462,8 +469,10 @@ static int ptrace_attach(struct task_str - * in and out of STOPPED are protected by siglock. - */ - if (task_is_stopped(task) && -- task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) -+ task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) { -+ task->jobctl &= ~JOBCTL_STOPPED; - signal_wake_up_state(task, __TASK_STOPPED); -+ } - - spin_unlock(&task->sighand->siglock); - -@@ -875,6 +884,7 @@ static int ptrace_resume(struct task_str - */ - spin_lock_irq(&child->sighand->siglock); - child->exit_code = data; -+ child->jobctl &= ~JOBCTL_TRACED; - wake_up_state(child, __TASK_TRACED); - spin_unlock_irq(&child->sighand->siglock); - ---- a/kernel/signal.c -+++ b/kernel/signal.c -@@ -762,7 +762,10 @@ static int dequeue_synchronous_signal(ke - */ - void signal_wake_up_state(struct task_struct *t, unsigned int state) - { -+ lockdep_assert_held(&t->sighand->siglock); -+ - set_tsk_thread_flag(t, TIF_SIGPENDING); -+ - /* - * TASK_WAKEKILL also means wake it up in the stopped/traced/killable - * case. We don't check t->state here because there is a race with it -@@ -930,9 +933,10 @@ static bool prepare_signal(int sig, stru - for_each_thread(p, t) { - flush_sigqueue_mask(&flush, &t->pending); - task_clear_jobctl_pending(t, JOBCTL_STOP_PENDING); -- if (likely(!(t->ptrace & PT_SEIZED))) -+ if (likely(!(t->ptrace & PT_SEIZED))) { -+ t->jobctl &= ~JOBCTL_STOPPED; - wake_up_state(t, __TASK_STOPPED); -- else -+ } else - ptrace_trap_notify(t); - } - -@@ -2218,6 +2222,7 @@ static int ptrace_stop(int exit_code, in - return exit_code; - - set_special_state(TASK_TRACED); -+ current->jobctl |= JOBCTL_TRACED; - - /* - * We're committing to trapping. TRACED should be visible before -@@ -2436,6 +2441,7 @@ static bool do_signal_stop(int signr) - if (task_participate_group_stop(current)) - notify = CLD_STOPPED; - -+ current->jobctl |= JOBCTL_STOPPED; - set_special_state(TASK_STOPPED); - spin_unlock_irq(¤t->sighand->siglock); - diff --git a/patches/0013-printk-add-kthread-console-printers.patch b/patches/0013-printk-add-kthread-console-printers.patch deleted file mode 100644 index e4972b11c211..000000000000 --- a/patches/0013-printk-add-kthread-console-printers.patch +++ /dev/null @@ -1,530 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:35 +0206 -Subject: [PATCH 13/18] printk: add kthread console printers - -Create a kthread for each console to perform console printing. During -normal operation (@system_state == SYSTEM_RUNNING), the kthread -printers are responsible for all printing on their respective -consoles. - -During non-normal operation, console printing is done as it has been: -within the context of the printk caller or within irqwork triggered -by the printk caller, referred to as direct printing. - -Since threaded console printers are responsible for all printing -during normal operation, this also includes messages generated via -deferred printk calls. If direct printing is in effect during a -deferred printk call, the queued irqwork will perform the direct -printing. To make it clear that this is the only time that the -irqwork will perform direct printing, rename the flag -PRINTK_PENDING_OUTPUT to PRINTK_PENDING_DIRECT_OUTPUT. - -Threaded console printers synchronize against each other and against -console lockers by taking the console lock for each message that is -printed. - -Note that the kthread printers do not care about direct printing. -They will always try to print if new records are available. They can -be blocked by direct printing, but will be woken again once direct -printing is finished. - -Console unregistration is a bit tricky because the associated -kthread printer cannot be stopped while the console lock is held. -A policy is implemented that states: whichever task clears -con->thread (under the console lock) is responsible for stopping -the kthread. unregister_console() will clear con->thread while -the console lock is held and then stop the kthread after releasing -the console lock. - -For consoles that have implemented the exit() callback, the kthread -is stopped before exit() is called. - -Signed-off-by: John Ogness -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-14-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/console.h | 2 - kernel/printk/printk.c | 303 ++++++++++++++++++++++++++++++++++++++++++++---- - 2 files changed, 283 insertions(+), 22 deletions(-) - ---- a/include/linux/console.h -+++ b/include/linux/console.h -@@ -153,6 +153,8 @@ struct console { - uint ospeed; - u64 seq; - unsigned long dropped; -+ struct task_struct *thread; -+ - void *data; - struct console *next; - }; ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -361,6 +361,13 @@ static int console_msg_format = MSG_FORM - /* syslog_lock protects syslog_* variables and write access to clear_seq. */ - static DEFINE_MUTEX(syslog_lock); - -+/* -+ * A flag to signify if printk_activate_kthreads() has already started the -+ * kthread printers. If true, any later registered consoles must start their -+ * own kthread directly. The flag is write protected by the console_lock. -+ */ -+static bool printk_kthreads_available; -+ - #ifdef CONFIG_PRINTK - static atomic_t printk_prefer_direct = ATOMIC_INIT(0); - -@@ -390,6 +397,14 @@ void printk_prefer_direct_exit(void) - WARN_ON(atomic_dec_if_positive(&printk_prefer_direct) < 0); - } - -+static inline bool allow_direct_printing(void) -+{ -+ return (!printk_kthreads_available || -+ system_state > SYSTEM_RUNNING || -+ oops_in_progress || -+ atomic_read(&printk_prefer_direct)); -+} -+ - DECLARE_WAIT_QUEUE_HEAD(log_wait); - /* All 3 protected by @syslog_lock. */ - /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -2280,10 +2295,10 @@ asmlinkage int vprintk_emit(int facility - printed_len = vprintk_store(facility, level, dev_info, fmt, args); - - /* If called from the scheduler, we can not call up(). */ -- if (!in_sched) { -+ if (!in_sched && allow_direct_printing()) { - /* - * The caller may be holding system-critical or -- * timing-sensitive locks. Disable preemption during -+ * timing-sensitive locks. Disable preemption during direct - * printing of all remaining records to all consoles so that - * this context can return as soon as possible. Hopefully - * another printk() caller will take over the printing. -@@ -2326,6 +2341,8 @@ EXPORT_SYMBOL(_printk); - - static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); - -+static void printk_start_kthread(struct console *con); -+ - #else /* CONFIG_PRINTK */ - - #define CONSOLE_LOG_MAX 0 -@@ -2359,6 +2376,8 @@ static void call_console_driver(struct c - } - static bool suppress_message_printing(int level) { return false; } - static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; } -+static void printk_start_kthread(struct console *con) { } -+static bool allow_direct_printing(void) { return true; } - - #endif /* CONFIG_PRINTK */ - -@@ -2559,6 +2578,13 @@ void resume_console(void) - down_console_sem(); - console_suspended = 0; - console_unlock(); -+ -+ /* -+ * While suspended, new records may have been added to the -+ * ringbuffer. Wake up the kthread printers to print them. -+ */ -+ wake_up_klogd(); -+ - pr_flush(1000, true); - } - -@@ -2577,6 +2603,9 @@ static int console_cpu_notify(unsigned i - /* If trylock fails, someone else is doing the printing */ - if (console_trylock()) - console_unlock(); -+ -+ /* Wake kthread printers. Some may have become usable. */ -+ wake_up_klogd(); - } - return 0; - } -@@ -2648,18 +2677,9 @@ static bool abandon_console_lock_in_pani - return atomic_read(&panic_cpu) != raw_smp_processor_id(); - } - --/* -- * Check if the given console is currently capable and allowed to print -- * records. -- * -- * Requires the console_lock. -- */ --static inline bool console_is_usable(struct console *con) -+static inline bool __console_is_usable(short flags) - { -- if (!(con->flags & CON_ENABLED)) -- return false; -- -- if (!con->write) -+ if (!(flags & CON_ENABLED)) - return false; - - /* -@@ -2668,12 +2688,26 @@ static inline bool console_is_usable(str - * cope (CON_ANYTIME) don't call them until this CPU is officially up. - */ - if (!cpu_online(raw_smp_processor_id()) && -- !(con->flags & CON_ANYTIME)) -+ !(flags & CON_ANYTIME)) - return false; - - return true; - } - -+/* -+ * Check if the given console is currently capable and allowed to print -+ * records. -+ * -+ * Requires the console_lock. -+ */ -+static inline bool console_is_usable(struct console *con) -+{ -+ if (!con->write) -+ return false; -+ -+ return __console_is_usable(con->flags); -+} -+ - static void __console_unlock(void) - { - console_locked = 0; -@@ -2786,8 +2820,8 @@ static bool console_emit_next_record(str - * were flushed to all usable consoles. A returned false informs the caller - * that everything was not flushed (either there were no usable consoles or - * another context has taken over printing or it is a panic situation and this -- * is not the panic CPU). Regardless the reason, the caller should assume it -- * is not useful to immediately try again. -+ * is not the panic CPU or direct printing is not preferred). Regardless the -+ * reason, the caller should assume it is not useful to immediately try again. - * - * Requires the console_lock. - */ -@@ -2804,6 +2838,10 @@ static bool console_flush_all(bool do_co - *handover = false; - - do { -+ /* Let the kthread printers do the work if they can. */ -+ if (!allow_direct_printing()) -+ return false; -+ - any_progress = false; - - for_each_console(con) { -@@ -3018,6 +3056,10 @@ void console_start(struct console *conso - console_lock(); - console->flags |= CON_ENABLED; - console_unlock(); -+ -+ /* Wake the newly enabled kthread printer. */ -+ wake_up_klogd(); -+ - __pr_flush(console, 1000, true); - } - EXPORT_SYMBOL(console_start); -@@ -3218,6 +3260,8 @@ void register_console(struct console *ne - nr_ext_console_drivers++; - - newcon->dropped = 0; -+ newcon->thread = NULL; -+ - if (newcon->flags & CON_PRINTBUFFER) { - /* Get a consistent copy of @syslog_seq. */ - mutex_lock(&syslog_lock); -@@ -3227,6 +3271,10 @@ void register_console(struct console *ne - /* Begin with next message. */ - newcon->seq = prb_next_seq(prb); - } -+ -+ if (printk_kthreads_available) -+ printk_start_kthread(newcon); -+ - console_unlock(); - console_sysfs_notify(); - -@@ -3253,6 +3301,7 @@ EXPORT_SYMBOL(register_console); - - int unregister_console(struct console *console) - { -+ struct task_struct *thd; - struct console *con; - int res; - -@@ -3293,7 +3342,20 @@ int unregister_console(struct console *c - console_drivers->flags |= CON_CONSDEV; - - console->flags &= ~CON_ENABLED; -+ -+ /* -+ * console->thread can only be cleared under the console lock. But -+ * stopping the thread must be done without the console lock. The -+ * task that clears @thread is the task that stops the kthread. -+ */ -+ thd = console->thread; -+ console->thread = NULL; -+ - console_unlock(); -+ -+ if (thd) -+ kthread_stop(thd); -+ - console_sysfs_notify(); - - if (console->exit) -@@ -3389,6 +3451,20 @@ static int __init printk_late_init(void) - } - late_initcall(printk_late_init); - -+static int __init printk_activate_kthreads(void) -+{ -+ struct console *con; -+ -+ console_lock(); -+ printk_kthreads_available = true; -+ for_each_console(con) -+ printk_start_kthread(con); -+ console_unlock(); -+ -+ return 0; -+} -+early_initcall(printk_activate_kthreads); -+ - #if defined CONFIG_PRINTK - /* If @con is specified, only wait for that console. Otherwise wait for all. */ - static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) -@@ -3463,11 +3539,180 @@ bool pr_flush(int timeout_ms, bool reset - } - EXPORT_SYMBOL(pr_flush); - -+static void __printk_fallback_preferred_direct(void) -+{ -+ printk_prefer_direct_enter(); -+ pr_err("falling back to preferred direct printing\n"); -+ printk_kthreads_available = false; -+} -+ -+/* -+ * Enter preferred direct printing, but never exit. Mark console threads as -+ * unavailable. The system is then forever in preferred direct printing and -+ * any printing threads will exit. -+ * -+ * Must *not* be called under console_lock. Use -+ * __printk_fallback_preferred_direct() if already holding console_lock. -+ */ -+static void printk_fallback_preferred_direct(void) -+{ -+ console_lock(); -+ __printk_fallback_preferred_direct(); -+ console_unlock(); -+} -+ -+static bool printer_should_wake(struct console *con, u64 seq) -+{ -+ short flags; -+ -+ if (kthread_should_stop() || !printk_kthreads_available) -+ return true; -+ -+ if (console_suspended) -+ return false; -+ -+ /* -+ * This is an unsafe read from con->flags, but a false positive is -+ * not a problem. Worst case it would allow the printer to wake up -+ * although it is disabled. But the printer will notice that when -+ * attempting to print and instead go back to sleep. -+ */ -+ flags = data_race(READ_ONCE(con->flags)); -+ -+ if (!__console_is_usable(flags)) -+ return false; -+ -+ return prb_read_valid(prb, seq, NULL); -+} -+ -+static int printk_kthread_func(void *data) -+{ -+ struct console *con = data; -+ char *dropped_text = NULL; -+ char *ext_text = NULL; -+ bool handover; -+ u64 seq = 0; -+ char *text; -+ int error; -+ -+ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); -+ if (!text) { -+ con_printk(KERN_ERR, con, "failed to allocate text buffer\n"); -+ printk_fallback_preferred_direct(); -+ goto out; -+ } -+ -+ if (con->flags & CON_EXTENDED) { -+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL); -+ if (!ext_text) { -+ con_printk(KERN_ERR, con, "failed to allocate ext_text buffer\n"); -+ printk_fallback_preferred_direct(); -+ goto out; -+ } -+ } else { -+ dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL); -+ if (!dropped_text) { -+ con_printk(KERN_ERR, con, "failed to allocate dropped_text buffer\n"); -+ printk_fallback_preferred_direct(); -+ goto out; -+ } -+ } -+ -+ con_printk(KERN_INFO, con, "printing thread started\n"); -+ -+ for (;;) { -+ /* -+ * Guarantee this task is visible on the waitqueue before -+ * checking the wake condition. -+ * -+ * The full memory barrier within set_current_state() of -+ * prepare_to_wait_event() pairs with the full memory barrier -+ * within wq_has_sleeper(). -+ * -+ * This pairs with __wake_up_klogd:A. -+ */ -+ error = wait_event_interruptible(log_wait, -+ printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */ -+ -+ if (kthread_should_stop() || !printk_kthreads_available) -+ break; -+ -+ if (error) -+ continue; -+ -+ console_lock(); -+ -+ if (console_suspended) { -+ up_console_sem(); -+ continue; -+ } -+ -+ if (!console_is_usable(con)) { -+ __console_unlock(); -+ continue; -+ } -+ -+ /* -+ * Even though the printk kthread is always preemptible, it is -+ * still not allowed to call cond_resched() from within -+ * console drivers. The task may become non-preemptible in the -+ * console driver call chain. For example, vt_console_print() -+ * takes a spinlock and then can call into fbcon_redraw(), -+ * which can conditionally invoke cond_resched(). -+ */ -+ console_may_schedule = 0; -+ console_emit_next_record(con, text, ext_text, dropped_text, &handover); -+ if (handover) -+ continue; -+ -+ seq = con->seq; -+ -+ __console_unlock(); -+ } -+ -+ con_printk(KERN_INFO, con, "printing thread stopped\n"); -+out: -+ kfree(dropped_text); -+ kfree(ext_text); -+ kfree(text); -+ -+ console_lock(); -+ /* -+ * If this kthread is being stopped by another task, con->thread will -+ * already be NULL. That is fine. The important thing is that it is -+ * NULL after the kthread exits. -+ */ -+ con->thread = NULL; -+ console_unlock(); -+ -+ return 0; -+} -+ -+/* Must be called under console_lock. */ -+static void printk_start_kthread(struct console *con) -+{ -+ /* -+ * Do not start a kthread if there is no write() callback. The -+ * kthreads assume the write() callback exists. -+ */ -+ if (!con->write) -+ return; -+ -+ con->thread = kthread_run(printk_kthread_func, con, -+ "pr/%s%d", con->name, con->index); -+ if (IS_ERR(con->thread)) { -+ con->thread = NULL; -+ con_printk(KERN_ERR, con, "unable to start printing thread\n"); -+ __printk_fallback_preferred_direct(); -+ return; -+ } -+} -+ - /* - * Delayed printk version, for scheduler-internal messages: - */ --#define PRINTK_PENDING_WAKEUP 0x01 --#define PRINTK_PENDING_OUTPUT 0x02 -+#define PRINTK_PENDING_WAKEUP 0x01 -+#define PRINTK_PENDING_DIRECT_OUTPUT 0x02 - - static DEFINE_PER_CPU(int, printk_pending); - -@@ -3475,10 +3720,14 @@ static void wake_up_klogd_work_func(stru - { - int pending = this_cpu_xchg(printk_pending, 0); - -- if (pending & PRINTK_PENDING_OUTPUT) { -+ if (pending & PRINTK_PENDING_DIRECT_OUTPUT) { -+ printk_prefer_direct_enter(); -+ - /* If trylock fails, someone else is doing the printing */ - if (console_trylock()) - console_unlock(); -+ -+ printk_prefer_direct_exit(); - } - - if (pending & PRINTK_PENDING_WAKEUP) -@@ -3503,10 +3752,11 @@ static void __wake_up_klogd(int val) - * prepare_to_wait_event(), which is called after ___wait_event() adds - * the waiter but before it has checked the wait condition. - * -- * This pairs with devkmsg_read:A and syslog_print:A. -+ * This pairs with devkmsg_read:A, syslog_print:A, and -+ * printk_kthread_func:A. - */ - if (wq_has_sleeper(&log_wait) || /* LMM(__wake_up_klogd:A) */ -- (val & PRINTK_PENDING_OUTPUT)) { -+ (val & PRINTK_PENDING_DIRECT_OUTPUT)) { - this_cpu_or(printk_pending, val); - irq_work_queue(this_cpu_ptr(&wake_up_klogd_work)); - } -@@ -3524,7 +3774,16 @@ void defer_console_output(void) - * New messages may have been added directly to the ringbuffer - * using vprintk_store(), so wake any waiters as well. - */ -- __wake_up_klogd(PRINTK_PENDING_WAKEUP | PRINTK_PENDING_OUTPUT); -+ int val = PRINTK_PENDING_WAKEUP; -+ -+ /* -+ * If console deferring was called with preferred direct printing, -+ * make the irqwork perform the direct printing. -+ */ -+ if (atomic_read(&printk_prefer_direct)) -+ val |= PRINTK_PENDING_DIRECT_OUTPUT; -+ -+ __wake_up_klogd(val); - } - - void printk_trigger_flush(void) diff --git a/patches/0014-printk-extend-console_lock-for-per-console-locking.patch b/patches/0014-printk-extend-console_lock-for-per-console-locking.patch deleted file mode 100644 index 13ba352f449e..000000000000 --- a/patches/0014-printk-extend-console_lock-for-per-console-locking.patch +++ /dev/null @@ -1,584 +0,0 @@ -From: John Ogness -Date: Mon, 25 Apr 2022 23:04:28 +0206 -Subject: [PATCH 14/18] printk: extend console_lock for per-console locking - -Currently threaded console printers synchronize against each -other using console_lock(). However, different console drivers -are unrelated and do not require any synchronization between -each other. Removing the synchronization between the threaded -console printers will allow each console to print at its own -speed. - -But the threaded consoles printers do still need to synchronize -against console_lock() callers. Introduce a per-console mutex -and a new console boolean field @blocked to provide this -synchronization. - -console_lock() is modified so that it must acquire the mutex -of each console in order to set the @blocked field. Console -printing threads will acquire their mutex while printing a -record. If @blocked was set, the thread will go back to sleep -instead of printing. - -The reason for the @blocked boolean field is so that -console_lock() callers do not need to acquire multiple console -mutexes simultaneously, which would introduce unnecessary -complexity due to nested mutex locking. Also, a new field -was chosen instead of adding a new @flags value so that the -blocked status could be checked without concern of reading -inconsistent values due to @flags updates from other contexts. - -Threaded console printers also need to synchronize against -console_trylock() callers. Since console_trylock() may be -called from any context, the per-console mutex cannot be used -for this synchronization. (mutex_trylock() cannot be called -from atomic contexts.) Introduce a global atomic counter to -identify if any threaded printers are active. The threaded -printers will also check the atomic counter to identify if the -console has been locked by another task via console_trylock(). - -Note that @console_sem is still used to provide synchronization -between console_lock() and console_trylock() callers. - -A locking overview for console_lock(), console_trylock(), and the -threaded printers is as follows (pseudo code): - -console_lock() -{ - down(&console_sem); - for_each_console(con) { - mutex_lock(&con->lock); - con->blocked = true; - mutex_unlock(&con->lock); - } - /* console_lock acquired */ -} - -console_trylock() -{ - if (down_trylock(&console_sem) == 0) { - if (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0) { - /* console_lock acquired */ - } - } -} - -threaded_printer() -{ - mutex_lock(&con->lock); - if (!con->blocked) { - /* console_lock() callers blocked */ - - if (atomic_inc_unless_negative(&console_kthreads_active)) { - /* console_trylock() callers blocked */ - - con->write(); - - atomic_dec(&console_lock_count); - } - } - mutex_unlock(&con->lock); -} - -The console owner and waiter logic now only applies between contexts -that have taken the console_lock via console_trylock(). Threaded -printers never take the console_lock, so they do not have a -console_lock to handover. Tasks that have used console_lock() will -block the threaded printers using a mutex and if the console_lock -is handed over to an atomic context, it would be unable to unblock -the threaded printers. However, the console_trylock() case is -really the only scenario that is interesting for handovers anyway. - -@panic_console_dropped must change to atomic_t since it is no longer -protected exclusively by the console_lock. - -Since threaded printers remain asleep if they see that the console -is locked, they now must be explicitly woken in __console_unlock(). -This means wake_up_klogd() calls following a console_unlock() are -no longer necessary and are removed. - -Also note that threaded printers no longer need to check -@console_suspended. The check for the @blocked field implicitly -covers the suspended console case. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior ---- - include/linux/console.h | 15 ++ - kernel/printk/printk.c | 261 +++++++++++++++++++++++++++++++++++++----------- - 2 files changed, 220 insertions(+), 56 deletions(-) - ---- a/include/linux/console.h -+++ b/include/linux/console.h -@@ -16,6 +16,7 @@ - - #include - #include -+#include - - struct vc_data; - struct console_font_op; -@@ -154,6 +155,20 @@ struct console { - u64 seq; - unsigned long dropped; - struct task_struct *thread; -+ bool blocked; -+ -+ /* -+ * The per-console lock is used by printing kthreads to synchronize -+ * this console with callers of console_lock(). This is necessary in -+ * order to allow printing kthreads to run in parallel to each other, -+ * while each safely accessing the @blocked field and synchronizing -+ * against direct printing via console_lock/console_unlock. -+ * -+ * Note: For synchronizing against direct printing via -+ * console_trylock/console_unlock, see the static global -+ * variable @console_kthreads_active. -+ */ -+ struct mutex lock; - - void *data; - struct console *next; ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -224,6 +224,33 @@ int devkmsg_sysctl_set_loglvl(struct ctl - static int nr_ext_console_drivers; - - /* -+ * Used to synchronize printing kthreads against direct printing via -+ * console_trylock/console_unlock. -+ * -+ * Values: -+ * -1 = console kthreads atomically blocked (via global trylock) -+ * 0 = no kthread printing, console not locked (via trylock) -+ * >0 = kthread(s) actively printing -+ * -+ * Note: For synchronizing against direct printing via -+ * console_lock/console_unlock, see the @lock variable in -+ * struct console. -+ */ -+static atomic_t console_kthreads_active = ATOMIC_INIT(0); -+ -+#define console_kthreads_atomic_tryblock() \ -+ (atomic_cmpxchg(&console_kthreads_active, 0, -1) == 0) -+#define console_kthreads_atomic_unblock() \ -+ atomic_cmpxchg(&console_kthreads_active, -1, 0) -+#define console_kthreads_atomically_blocked() \ -+ (atomic_read(&console_kthreads_active) == -1) -+ -+#define console_kthread_printing_tryenter() \ -+ atomic_inc_unless_negative(&console_kthreads_active) -+#define console_kthread_printing_exit() \ -+ atomic_dec(&console_kthreads_active) -+ -+/* - * Helper macros to handle lockdep when locking/unlocking console_sem. We use - * macros instead of functions so that _RET_IP_ contains useful information. - */ -@@ -271,6 +298,49 @@ static bool panic_in_progress(void) - } - - /* -+ * Tracks whether kthread printers are all blocked. A value of true implies -+ * that the console is locked via console_lock() or the console is suspended. -+ * Writing to this variable requires holding @console_sem. -+ */ -+static bool console_kthreads_blocked; -+ -+/* -+ * Block all kthread printers from a schedulable context. -+ * -+ * Requires holding @console_sem. -+ */ -+static void console_kthreads_block(void) -+{ -+ struct console *con; -+ -+ for_each_console(con) { -+ mutex_lock(&con->lock); -+ con->blocked = true; -+ mutex_unlock(&con->lock); -+ } -+ -+ console_kthreads_blocked = true; -+} -+ -+/* -+ * Unblock all kthread printers from a schedulable context. -+ * -+ * Requires holding @console_sem. -+ */ -+static void console_kthreads_unblock(void) -+{ -+ struct console *con; -+ -+ for_each_console(con) { -+ mutex_lock(&con->lock); -+ con->blocked = false; -+ mutex_unlock(&con->lock); -+ } -+ -+ console_kthreads_blocked = false; -+} -+ -+/* - * This is used for debugging the mess that is the VT code by - * keeping track if we have the console semaphore held. It's - * definitely not the perfect debug tool (we don't know if _WE_ -@@ -2578,13 +2648,6 @@ void resume_console(void) - down_console_sem(); - console_suspended = 0; - console_unlock(); -- -- /* -- * While suspended, new records may have been added to the -- * ringbuffer. Wake up the kthread printers to print them. -- */ -- wake_up_klogd(); -- - pr_flush(1000, true); - } - -@@ -2603,9 +2666,14 @@ static int console_cpu_notify(unsigned i - /* If trylock fails, someone else is doing the printing */ - if (console_trylock()) - console_unlock(); -- -- /* Wake kthread printers. Some may have become usable. */ -- wake_up_klogd(); -+ else { -+ /* -+ * If a new CPU comes online, the conditions for -+ * printer_should_wake() may have changed for some -+ * kthread printer with !CON_ANYTIME. -+ */ -+ wake_up_klogd(); -+ } - } - return 0; - } -@@ -2625,6 +2693,7 @@ void console_lock(void) - down_console_sem(); - if (console_suspended) - return; -+ console_kthreads_block(); - console_locked = 1; - console_may_schedule = 1; - } -@@ -2646,6 +2715,10 @@ int console_trylock(void) - up_console_sem(); - return 0; - } -+ if (!console_kthreads_atomic_tryblock()) { -+ up_console_sem(); -+ return 0; -+ } - console_locked = 1; - console_may_schedule = 0; - return 1; -@@ -2654,7 +2727,7 @@ EXPORT_SYMBOL(console_trylock); - - int is_console_locked(void) - { -- return console_locked; -+ return (console_locked || atomic_read(&console_kthreads_active)); - } - EXPORT_SYMBOL(is_console_locked); - -@@ -2698,7 +2771,7 @@ static inline bool __console_is_usable(s - * Check if the given console is currently capable and allowed to print - * records. - * -- * Requires the console_lock. -+ * Requires holding the console_lock. - */ - static inline bool console_is_usable(struct console *con) - { -@@ -2711,6 +2784,22 @@ static inline bool console_is_usable(str - static void __console_unlock(void) - { - console_locked = 0; -+ -+ /* -+ * Depending on whether console_lock() or console_trylock() was used, -+ * appropriately allow the kthread printers to continue. -+ */ -+ if (console_kthreads_blocked) -+ console_kthreads_unblock(); -+ else -+ console_kthreads_atomic_unblock(); -+ -+ /* -+ * New records may have arrived while the console was locked. -+ * Wake the kthread printers to print them. -+ */ -+ wake_up_klogd(); -+ - up_console_sem(); - } - -@@ -2728,17 +2817,19 @@ static void __console_unlock(void) - * - * @handover will be set to true if a printk waiter has taken over the - * console_lock, in which case the caller is no longer holding the -- * console_lock. Otherwise it is set to false. -+ * console_lock. Otherwise it is set to false. A NULL pointer may be provided -+ * to disable allowing the console_lock to be taken over by a printk waiter. - * - * Returns false if the given console has no next record to print, otherwise - * true. - * -- * Requires the console_lock. -+ * Requires the console_lock if @handover is non-NULL. -+ * Requires con->lock otherwise. - */ --static bool console_emit_next_record(struct console *con, char *text, char *ext_text, -- char *dropped_text, bool *handover) -+static bool __console_emit_next_record(struct console *con, char *text, char *ext_text, -+ char *dropped_text, bool *handover) - { -- static int panic_console_dropped; -+ static atomic_t panic_console_dropped = ATOMIC_INIT(0); - struct printk_info info; - struct printk_record r; - unsigned long flags; -@@ -2747,7 +2838,8 @@ static bool console_emit_next_record(str - - prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX); - -- *handover = false; -+ if (handover) -+ *handover = false; - - if (!prb_read_valid(prb, con->seq, &r)) - return false; -@@ -2755,7 +2847,8 @@ static bool console_emit_next_record(str - if (con->seq != r.info->seq) { - con->dropped += r.info->seq - con->seq; - con->seq = r.info->seq; -- if (panic_in_progress() && panic_console_dropped++ > 10) { -+ if (panic_in_progress() && -+ atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) { - suppress_panic_printk = 1; - pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n"); - } -@@ -2777,32 +2870,62 @@ static bool console_emit_next_record(str - len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time); - } - -- /* -- * While actively printing out messages, if another printk() -- * were to occur on another CPU, it may wait for this one to -- * finish. This task can not be preempted if there is a -- * waiter waiting to take over. -- * -- * Interrupts are disabled because the hand over to a waiter -- * must not be interrupted until the hand over is completed -- * (@console_waiter is cleared). -- */ -- printk_safe_enter_irqsave(flags); -- console_lock_spinning_enable(); -+ if (handover) { -+ /* -+ * While actively printing out messages, if another printk() -+ * were to occur on another CPU, it may wait for this one to -+ * finish. This task can not be preempted if there is a -+ * waiter waiting to take over. -+ * -+ * Interrupts are disabled because the hand over to a waiter -+ * must not be interrupted until the hand over is completed -+ * (@console_waiter is cleared). -+ */ -+ printk_safe_enter_irqsave(flags); -+ console_lock_spinning_enable(); -+ -+ /* don't trace irqsoff print latency */ -+ stop_critical_timings(); -+ } - -- stop_critical_timings(); /* don't trace print latency */ - call_console_driver(con, write_text, len, dropped_text); -- start_critical_timings(); - - con->seq++; - -- *handover = console_lock_spinning_disable_and_check(); -- printk_safe_exit_irqrestore(flags); -+ if (handover) { -+ start_critical_timings(); -+ *handover = console_lock_spinning_disable_and_check(); -+ printk_safe_exit_irqrestore(flags); -+ } - skip: - return true; - } - - /* -+ * Print a record for a given console, but allow another printk() caller to -+ * take over the console_lock and continue printing. -+ * -+ * Requires the console_lock, but depending on @handover after the call, the -+ * caller may no longer have the console_lock. -+ * -+ * See __console_emit_next_record() for argument and return details. -+ */ -+static bool console_emit_next_record_transferable(struct console *con, char *text, char *ext_text, -+ char *dropped_text, bool *handover) -+{ -+ /* -+ * Handovers are only supported if threaded printers are atomically -+ * blocked. The context taking over the console_lock may be atomic. -+ */ -+ if (!console_kthreads_atomically_blocked()) { -+ *handover = false; -+ handover = NULL; -+ } -+ -+ return __console_emit_next_record(con, text, ext_text, dropped_text, handover); -+} -+ -+/* - * Print out all remaining records to all consoles. - * - * @do_cond_resched is set by the caller. It can be true only in schedulable -@@ -2853,13 +2976,11 @@ static bool console_flush_all(bool do_co - - if (con->flags & CON_EXTENDED) { - /* Extended consoles do not print "dropped messages". */ -- progress = console_emit_next_record(con, &text[0], -- &ext_text[0], NULL, -- handover); -+ progress = console_emit_next_record_transferable(con, &text[0], -+ &ext_text[0], NULL, handover); - } else { -- progress = console_emit_next_record(con, &text[0], -- NULL, &dropped_text[0], -- handover); -+ progress = console_emit_next_record_transferable(con, &text[0], -+ NULL, &dropped_text[0], handover); - } - if (*handover) - return false; -@@ -2974,6 +3095,10 @@ void console_unblank(void) - if (oops_in_progress) { - if (down_trylock_console_sem() != 0) - return; -+ if (!console_kthreads_atomic_tryblock()) { -+ up_console_sem(); -+ return; -+ } - } else - console_lock(); - -@@ -3056,10 +3181,6 @@ void console_start(struct console *conso - console_lock(); - console->flags |= CON_ENABLED; - console_unlock(); -- -- /* Wake the newly enabled kthread printer. */ -- wake_up_klogd(); -- - __pr_flush(console, 1000, true); - } - EXPORT_SYMBOL(console_start); -@@ -3261,6 +3382,8 @@ void register_console(struct console *ne - - newcon->dropped = 0; - newcon->thread = NULL; -+ newcon->blocked = true; -+ mutex_init(&newcon->lock); - - if (newcon->flags & CON_PRINTBUFFER) { - /* Get a consistent copy of @syslog_seq. */ -@@ -3561,6 +3684,19 @@ static void printk_fallback_preferred_di - console_unlock(); - } - -+/* -+ * Print a record for a given console, not allowing another printk() caller -+ * to take over. This is appropriate for contexts that do not have the -+ * console_lock. -+ * -+ * See __console_emit_next_record() for argument and return details. -+ */ -+static bool console_emit_next_record(struct console *con, char *text, char *ext_text, -+ char *dropped_text) -+{ -+ return __console_emit_next_record(con, text, ext_text, dropped_text, NULL); -+} -+ - static bool printer_should_wake(struct console *con, u64 seq) - { - short flags; -@@ -3568,8 +3704,10 @@ static bool printer_should_wake(struct c - if (kthread_should_stop() || !printk_kthreads_available) - return true; - -- if (console_suspended) -+ if (con->blocked || -+ console_kthreads_atomically_blocked()) { - return false; -+ } - - /* - * This is an unsafe read from con->flags, but a false positive is -@@ -3590,7 +3728,6 @@ static int printk_kthread_func(void *dat - struct console *con = data; - char *dropped_text = NULL; - char *ext_text = NULL; -- bool handover; - u64 seq = 0; - char *text; - int error; -@@ -3640,15 +3777,27 @@ static int printk_kthread_func(void *dat - if (error) - continue; - -- console_lock(); -+ error = mutex_lock_interruptible(&con->lock); -+ if (error) -+ continue; - -- if (console_suspended) { -- up_console_sem(); -+ if (con->blocked || -+ !console_kthread_printing_tryenter()) { -+ /* Another context has locked the console_lock. */ -+ mutex_unlock(&con->lock); - continue; - } - -- if (!console_is_usable(con)) { -- __console_unlock(); -+ /* -+ * Although this context has not locked the console_lock, it -+ * is known that the console_lock is not locked and it is not -+ * possible for any other context to lock the console_lock. -+ * Therefore it is safe to read con->flags. -+ */ -+ -+ if (!__console_is_usable(con->flags)) { -+ console_kthread_printing_exit(); -+ mutex_unlock(&con->lock); - continue; - } - -@@ -3661,13 +3810,13 @@ static int printk_kthread_func(void *dat - * which can conditionally invoke cond_resched(). - */ - console_may_schedule = 0; -- console_emit_next_record(con, text, ext_text, dropped_text, &handover); -- if (handover) -- continue; -+ console_emit_next_record(con, text, ext_text, dropped_text); - - seq = con->seq; - -- __console_unlock(); -+ console_kthread_printing_exit(); -+ -+ mutex_unlock(&con->lock); - } - - con_printk(KERN_INFO, con, "printing thread stopped\n"); diff --git a/patches/0015-printk-remove-console_locked.patch b/patches/0015-printk-remove-console_locked.patch deleted file mode 100644 index edf30819d3d1..000000000000 --- a/patches/0015-printk-remove-console_locked.patch +++ /dev/null @@ -1,96 +0,0 @@ -From: John Ogness -Date: Wed, 20 Apr 2022 01:52:37 +0206 -Subject: [PATCH 15/18] printk: remove @console_locked - -The static global variable @console_locked is used to help debug -VT code to make sure that certain code paths are running with -the console_lock held. However, this information is also available -with the static global variable @console_kthreads_blocked (for -locking via console_lock()), and the static global variable -@console_kthreads_active (for locking via console_trylock()). - -Remove @console_locked and update is_console_locked() to use the -alternative variables. - -Signed-off-by: John Ogness -Reviewed-by: Petr Mladek -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/20220419234637.357112-16-john.ogness@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/printk/printk.c | 29 ++++++++++++++--------------- - 1 file changed, 14 insertions(+), 15 deletions(-) - ---- a/kernel/printk/printk.c -+++ b/kernel/printk/printk.c -@@ -340,15 +340,7 @@ static void console_kthreads_unblock(voi - console_kthreads_blocked = false; - } - --/* -- * This is used for debugging the mess that is the VT code by -- * keeping track if we have the console semaphore held. It's -- * definitely not the perfect debug tool (we don't know if _WE_ -- * hold it and are racing, but it helps tracking those weird code -- * paths in the console code where we end up in places I want -- * locked without the console semaphore held). -- */ --static int console_locked, console_suspended; -+static int console_suspended; - - /* - * Array of consoles built from command line options (console=) -@@ -2694,7 +2686,6 @@ void console_lock(void) - if (console_suspended) - return; - console_kthreads_block(); -- console_locked = 1; - console_may_schedule = 1; - } - EXPORT_SYMBOL(console_lock); -@@ -2719,15 +2710,26 @@ int console_trylock(void) - up_console_sem(); - return 0; - } -- console_locked = 1; - console_may_schedule = 0; - return 1; - } - EXPORT_SYMBOL(console_trylock); - -+/* -+ * This is used to help to make sure that certain paths within the VT code are -+ * running with the console lock held. It is definitely not the perfect debug -+ * tool (it is not known if the VT code is the task holding the console lock), -+ * but it helps tracking those weird code paths in the console code such as -+ * when the console is suspended: where the console is not locked but no -+ * console printing may occur. -+ * -+ * Note: This returns true when the console is suspended but is not locked. -+ * This is intentional because the VT code must consider that situation -+ * the same as if the console was locked. -+ */ - int is_console_locked(void) - { -- return (console_locked || atomic_read(&console_kthreads_active)); -+ return (console_kthreads_blocked || atomic_read(&console_kthreads_active)); - } - EXPORT_SYMBOL(is_console_locked); - -@@ -2783,8 +2785,6 @@ static inline bool console_is_usable(str - - static void __console_unlock(void) - { -- console_locked = 0; -- - /* - * Depending on whether console_lock() or console_trylock() was used, - * appropriately allow the kthread printers to continue. -@@ -3102,7 +3102,6 @@ void console_unblank(void) - } else - console_lock(); - -- console_locked = 1; - console_may_schedule = 0; - for_each_console(c) - if ((c->flags & CON_ENABLED) && c->unblank) diff --git a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch index f61250c2c520..24b0406fb22d 100644 --- a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch +++ b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior }; --- a/init/Kconfig +++ b/init/Kconfig -@@ -1545,6 +1545,10 @@ config PRINTK +@@ -1565,6 +1565,10 @@ config PRINTK very difficult to diagnose system problems, saying N here is strongly discouraged. @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior default y --- a/kernel/panic.c +++ b/kernel/panic.c -@@ -233,7 +233,6 @@ void panic(const char *fmt, ...) +@@ -257,7 +257,6 @@ void panic(const char *fmt, ...) panic_smp_self_stop(); console_verbose(); @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior va_start(args, fmt); len = vscnprintf(buf, sizeof(buf), fmt, args); va_end(args); -@@ -250,6 +249,11 @@ void panic(const char *fmt, ...) +@@ -274,6 +273,11 @@ void panic(const char *fmt, ...) dump_stack(); #endif @@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior #include #include #include -@@ -2035,21 +2036,30 @@ static int console_trylock_spinning(void +@@ -2060,19 +2061,28 @@ static int console_trylock_spinning(void * dropped, a dropped message will be written out first. */ static void call_console_driver(struct console *con, const char *text, size_t len, @@ -128,8 +128,6 @@ Signed-off-by: Sebastian Andrzej Siewior + unsigned long dropped = 0; size_t dropped_len; - trace_console_rcuidle(text, len); - - if (con->dropped && dropped_text) { + if (dropped_text) + dropped = atomic_long_xchg_relaxed(&con->dropped, 0); @@ -155,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -2401,6 +2411,76 @@ asmlinkage __visible int _printk(const c +@@ -2426,6 +2436,76 @@ asmlinkage __visible int _printk(const c } EXPORT_SYMBOL(_printk); @@ -232,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress); static void printk_start_kthread(struct console *con); -@@ -2415,6 +2495,8 @@ static void printk_start_kthread(struct +@@ -2440,6 +2520,8 @@ static void printk_start_kthread(struct #define prb_first_valid_seq(rb) 0 #define prb_next_seq(rb) 0 @@ -241,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior static u64 syslog_seq; static size_t record_print_text(const struct printk_record *r, -@@ -2433,7 +2515,7 @@ static ssize_t msg_print_ext_body(char * +@@ -2458,7 +2540,7 @@ static ssize_t msg_print_ext_body(char * static void console_lock_spinning_enable(void) { } static int console_lock_spinning_disable_and_check(void) { return 0; } static void call_console_driver(struct console *con, const char *text, size_t len, @@ -250,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior { } static bool suppress_message_printing(int level) { return false; } -@@ -2775,10 +2857,20 @@ static inline bool __console_is_usable(s +@@ -2800,10 +2882,20 @@ static inline bool __console_is_usable(s * * Requires holding the console_lock. */ @@ -273,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior return __console_is_usable(con->flags); } -@@ -2803,6 +2895,66 @@ static void __console_unlock(void) +@@ -2828,6 +2920,66 @@ static void __console_unlock(void) up_console_sem(); } @@ -340,7 +338,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Print one record for the given console. The record printed is whatever * record is the next available record for the given console. -@@ -2815,6 +2967,8 @@ static void __console_unlock(void) +@@ -2840,6 +2992,8 @@ static void __console_unlock(void) * If dropped messages should be printed, @dropped_text is a buffer of size * DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL. * @@ -349,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior * @handover will be set to true if a printk waiter has taken over the * console_lock, in which case the caller is no longer holding the * console_lock. Otherwise it is set to false. A NULL pointer may be provided -@@ -2827,7 +2981,8 @@ static void __console_unlock(void) +@@ -2852,7 +3006,8 @@ static void __console_unlock(void) * Requires con->lock otherwise. */ static bool __console_emit_next_record(struct console *con, char *text, char *ext_text, @@ -359,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior { static atomic_t panic_console_dropped = ATOMIC_INIT(0); struct printk_info info; -@@ -2835,18 +2990,22 @@ static bool __console_emit_next_record(s +@@ -2860,18 +3015,22 @@ static bool __console_emit_next_record(s unsigned long flags; char *write_text; size_t len; @@ -386,7 +384,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (panic_in_progress() && atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) { suppress_panic_printk = 1; -@@ -2856,7 +3015,7 @@ static bool __console_emit_next_record(s +@@ -2881,7 +3040,7 @@ static bool __console_emit_next_record(s /* Skip record that has level above the console loglevel. */ if (suppress_message_printing(r.info->level)) { @@ -395,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior goto skip; } -@@ -2888,9 +3047,9 @@ static bool __console_emit_next_record(s +@@ -2913,9 +3072,9 @@ static bool __console_emit_next_record(s stop_critical_timings(); } @@ -407,7 +405,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (handover) { start_critical_timings(); -@@ -2922,7 +3081,7 @@ static bool console_emit_next_record_tra +@@ -2947,7 +3106,7 @@ static bool console_emit_next_record_tra handover = NULL; } @@ -416,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior } /* -@@ -2970,7 +3129,7 @@ static bool console_flush_all(bool do_co +@@ -2995,7 +3154,7 @@ static bool console_flush_all(bool do_co for_each_console(con) { bool progress; @@ -425,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; any_usable = true; -@@ -3005,6 +3164,68 @@ static bool console_flush_all(bool do_co +@@ -3030,6 +3189,68 @@ static bool console_flush_all(bool do_co return any_usable; } @@ -494,7 +492,7 @@ Signed-off-by: Sebastian Andrzej Siewior /** * console_unlock - unlock the console system * -@@ -3120,6 +3341,11 @@ void console_unblank(void) +@@ -3145,6 +3366,11 @@ void console_unblank(void) */ void console_flush_on_panic(enum con_flush_mode mode) { @@ -506,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * If someone else is holding the console lock, trylock will fail * and may_schedule may be set. Ignore and proceed to unlock so -@@ -3136,7 +3362,7 @@ void console_flush_on_panic(enum con_flu +@@ -3161,7 +3387,7 @@ void console_flush_on_panic(enum con_flu seq = prb_first_valid_seq(prb); for_each_console(c) @@ -515,7 +513,7 @@ Signed-off-by: Sebastian Andrzej Siewior } console_unlock(); } -@@ -3379,19 +3605,22 @@ void register_console(struct console *ne +@@ -3404,19 +3630,22 @@ void register_console(struct console *ne if (newcon->flags & CON_EXTENDED) nr_ext_console_drivers++; @@ -541,7 +539,7 @@ Signed-off-by: Sebastian Andrzej Siewior } if (printk_kthreads_available) -@@ -3480,6 +3709,10 @@ int unregister_console(struct console *c +@@ -3505,6 +3734,10 @@ int unregister_console(struct console *c console_sysfs_notify(); @@ -552,7 +550,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (console->exit) res = console->exit(console); -@@ -3609,7 +3842,7 @@ static bool __pr_flush(struct console *c +@@ -3634,7 +3867,7 @@ static bool __pr_flush(struct console *c for_each_console(c) { if (con && con != c) continue; @@ -561,7 +559,7 @@ Signed-off-by: Sebastian Andrzej Siewior continue; printk_seq = c->seq; if (printk_seq < seq) -@@ -3691,9 +3924,10 @@ static void printk_fallback_preferred_di +@@ -3716,9 +3949,10 @@ static void printk_fallback_preferred_di * See __console_emit_next_record() for argument and return details. */ static bool console_emit_next_record(struct console *con, char *text, char *ext_text, @@ -574,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static bool printer_should_wake(struct console *con, u64 seq) -@@ -3731,6 +3965,11 @@ static int printk_kthread_func(void *dat +@@ -3756,6 +3990,11 @@ static int printk_kthread_func(void *dat char *text; int error; @@ -586,7 +584,7 @@ Signed-off-by: Sebastian Andrzej Siewior text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL); if (!text) { con_printk(KERN_ERR, con, "failed to allocate text buffer\n"); -@@ -3809,7 +4048,7 @@ static int printk_kthread_func(void *dat +@@ -3834,7 +4073,7 @@ static int printk_kthread_func(void *dat * which can conditionally invoke cond_resched(). */ console_may_schedule = 0; diff --git a/patches/0017-serial-8250-implement-write_atomic.patch b/patches/0017-serial-8250-implement-write_atomic.patch index a64abb45042e..d26ba2b2d498 100644 --- a/patches/0017-serial-8250-implement-write_atomic.patch +++ b/patches/0017-serial-8250-implement-write_atomic.patch @@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/tty/serial/8250/8250.h +++ b/drivers/tty/serial/8250/8250.h -@@ -132,12 +132,74 @@ static inline void serial_dl_write(struc +@@ -157,12 +157,74 @@ static inline void serial_dl_write(struc up->dl_write(up, value); } @@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior return true; } -@@ -146,7 +208,7 @@ static inline bool serial8250_clear_THRI +@@ -171,7 +233,7 @@ static inline bool serial8250_clear_THRI if (!(up->ier & UART_IER_THRI)) return false; up->ier &= ~UART_IER_THRI; @@ -210,7 +210,7 @@ Signed-off-by: Sebastian Andrzej Siewior return HRTIMER_NORESTART; --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c -@@ -255,8 +255,11 @@ static void serial8250_timeout(struct ti +@@ -254,8 +254,11 @@ static void serial8250_timeout(struct ti static void serial8250_backup_timeout(struct timer_list *t) { struct uart_8250_port *up = from_timer(up, t, timer); @@ -222,7 +222,7 @@ Signed-off-by: Sebastian Andrzej Siewior spin_lock_irqsave(&up->port.lock, flags); -@@ -265,8 +268,16 @@ static void serial8250_backup_timeout(st +@@ -264,8 +267,16 @@ static void serial8250_backup_timeout(st * based handler. */ if (up->port.irq) { @@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior } iir = serial_in(up, UART_IIR); -@@ -290,7 +301,7 @@ static void serial8250_backup_timeout(st +@@ -289,7 +300,7 @@ static void serial8250_backup_timeout(st serial8250_tx_chars(up); if (up->port.irq) @@ -248,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior spin_unlock_irqrestore(&up->port.lock, flags); -@@ -567,6 +578,14 @@ serial8250_register_ports(struct uart_dr +@@ -566,6 +577,14 @@ serial8250_register_ports(struct uart_dr #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -263,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior static void univ8250_console_write(struct console *co, const char *s, unsigned int count) { -@@ -660,6 +679,7 @@ static int univ8250_console_match(struct +@@ -659,6 +678,7 @@ static int univ8250_console_match(struct static struct console univ8250_console = { .name = "ttyS", @@ -271,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior .write = univ8250_console_write, .device = uart_console_device, .setup = univ8250_console_setup, -@@ -953,7 +973,7 @@ static void serial_8250_overrun_backoff_ +@@ -952,7 +972,7 @@ static void serial_8250_overrun_backoff_ spin_lock_irqsave(&port->lock, flags); up->ier |= UART_IER_RLSI | UART_IER_RDI; up->port.read_status_mask |= UART_LSR_DR; @@ -333,7 +333,7 @@ Signed-off-by: Sebastian Andrzej Siewior value |= UART_MCR_MDCE | UART_MCR_FCM; --- a/drivers/tty/serial/8250/8250_mtk.c +++ b/drivers/tty/serial/8250/8250_mtk.c -@@ -218,12 +218,40 @@ static void mtk8250_shutdown(struct uart +@@ -222,12 +222,40 @@ static void mtk8250_shutdown(struct uart static void mtk8250_disable_intrs(struct uart_8250_port *up, int mask) { @@ -467,7 +467,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -770,7 +770,7 @@ static void serial8250_set_sleep(struct +@@ -749,7 +749,7 @@ static void serial8250_set_sleep(struct serial_out(p, UART_EFR, UART_EFR_ECB); serial_out(p, UART_LCR, 0); } @@ -476,7 +476,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (p->capabilities & UART_CAP_EFR) { serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B); serial_out(p, UART_EFR, efr); -@@ -1044,8 +1044,11 @@ static int broken_efr(struct uart_8250_p +@@ -1023,8 +1023,11 @@ static int broken_efr(struct uart_8250_p */ static void autoconfig_16550a(struct uart_8250_port *up) { @@ -488,7 +488,7 @@ Signed-off-by: Sebastian Andrzej Siewior up->port.type = PORT_16550A; up->capabilities |= UART_CAP_FIFO; -@@ -1156,6 +1159,11 @@ static void autoconfig_16550a(struct uar +@@ -1135,6 +1138,11 @@ static void autoconfig_16550a(struct uar return; } @@ -500,7 +500,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Try writing and reading the UART_IER_UUE bit (b6). * If it works, this is probably one of the Xscale platform's -@@ -1191,6 +1199,9 @@ static void autoconfig_16550a(struct uar +@@ -1170,6 +1178,9 @@ static void autoconfig_16550a(struct uar } serial_out(up, UART_IER, iersave); @@ -510,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * We distinguish between 16550A and U6 16550A by counting * how many bytes are in the FIFO. -@@ -1213,8 +1224,10 @@ static void autoconfig(struct uart_8250_ +@@ -1192,8 +1203,10 @@ static void autoconfig(struct uart_8250_ unsigned char status1, scratch, scratch2, scratch3; unsigned char save_lcr, save_mcr; struct uart_port *port = &up->port; @@ -521,7 +521,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (!port->iobase && !port->mapbase && !port->membase) return; -@@ -1232,6 +1245,11 @@ static void autoconfig(struct uart_8250_ +@@ -1211,6 +1224,11 @@ static void autoconfig(struct uart_8250_ up->bugs = 0; if (!(port->flags & UPF_BUGGY_UART)) { @@ -533,7 +533,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Do a simple existence test first; if we fail this, * there's no point trying anything else. -@@ -1261,6 +1279,10 @@ static void autoconfig(struct uart_8250_ +@@ -1240,6 +1258,10 @@ static void autoconfig(struct uart_8250_ #endif scratch3 = serial_in(up, UART_IER) & 0x0f; serial_out(up, UART_IER, scratch); @@ -544,7 +544,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (scratch2 != 0 || scratch3 != 0x0F) { /* * We failed; there's nothing here -@@ -1358,10 +1380,7 @@ static void autoconfig(struct uart_8250_ +@@ -1337,10 +1359,7 @@ static void autoconfig(struct uart_8250_ serial8250_out_MCR(up, save_mcr); serial8250_clear_fifos(up); serial_in(up, UART_RX); @@ -556,7 +556,7 @@ Signed-off-by: Sebastian Andrzej Siewior out_unlock: spin_unlock_irqrestore(&port->lock, flags); -@@ -1387,7 +1406,9 @@ static void autoconfig_irq(struct uart_8 +@@ -1366,7 +1385,9 @@ static void autoconfig_irq(struct uart_8 unsigned char save_mcr, save_ier; unsigned char save_ICP = 0; unsigned int ICP = 0; @@ -566,7 +566,7 @@ Signed-off-by: Sebastian Andrzej Siewior int irq; if (port->flags & UPF_FOURPORT) { -@@ -1397,8 +1418,12 @@ static void autoconfig_irq(struct uart_8 +@@ -1376,8 +1397,12 @@ static void autoconfig_irq(struct uart_8 inb_p(ICP); } @@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* forget possible initially masked and pending IRQ */ probe_irq_off(probe_irq_on()); -@@ -1430,8 +1455,10 @@ static void autoconfig_irq(struct uart_8 +@@ -1409,8 +1434,10 @@ static void autoconfig_irq(struct uart_8 if (port->flags & UPF_FOURPORT) outb_p(save_ICP, ICP); @@ -592,7 +592,7 @@ Signed-off-by: Sebastian Andrzej Siewior port->irq = (irq > 0) ? irq : 0; } -@@ -1444,7 +1471,7 @@ static void serial8250_stop_rx(struct ua +@@ -1423,7 +1450,7 @@ static void serial8250_stop_rx(struct ua up->ier &= ~(UART_IER_RLSI | UART_IER_RDI); up->port.read_status_mask &= ~UART_LSR_DR; @@ -601,7 +601,7 @@ Signed-off-by: Sebastian Andrzej Siewior serial8250_rpm_put(up); } -@@ -1474,7 +1501,7 @@ void serial8250_em485_stop_tx(struct uar +@@ -1453,7 +1480,7 @@ void serial8250_em485_stop_tx(struct uar serial8250_clear_and_reinit_fifos(p); p->ier |= UART_IER_RLSI | UART_IER_RDI; @@ -610,7 +610,7 @@ Signed-off-by: Sebastian Andrzej Siewior } } EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx); -@@ -1711,7 +1738,7 @@ static void serial8250_disable_ms(struct +@@ -1707,7 +1734,7 @@ static void serial8250_disable_ms(struct mctrl_gpio_disable_ms(up->gpios); up->ier &= ~UART_IER_MSI; @@ -619,7 +619,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static void serial8250_enable_ms(struct uart_port *port) -@@ -1727,7 +1754,7 @@ static void serial8250_enable_ms(struct +@@ -1723,7 +1750,7 @@ static void serial8250_enable_ms(struct up->ier |= UART_IER_MSI; serial8250_rpm_get(up); @@ -628,7 +628,7 @@ Signed-off-by: Sebastian Andrzej Siewior serial8250_rpm_put(up); } -@@ -2146,14 +2173,7 @@ static void serial8250_put_poll_char(str +@@ -2152,14 +2179,7 @@ static void serial8250_put_poll_char(str struct uart_8250_port *up = up_to_u8250p(port); serial8250_rpm_get(up); @@ -644,7 +644,7 @@ Signed-off-by: Sebastian Andrzej Siewior wait_for_xmitr(up, BOTH_EMPTY); /* -@@ -2166,7 +2186,7 @@ static void serial8250_put_poll_char(str +@@ -2172,7 +2192,7 @@ static void serial8250_put_poll_char(str * and restore the IER */ wait_for_xmitr(up, BOTH_EMPTY); @@ -653,7 +653,7 @@ Signed-off-by: Sebastian Andrzej Siewior serial8250_rpm_put(up); } -@@ -2175,8 +2195,10 @@ static void serial8250_put_poll_char(str +@@ -2181,8 +2201,10 @@ static void serial8250_put_poll_char(str int serial8250_do_startup(struct uart_port *port) { struct uart_8250_port *up = up_to_u8250p(port); @@ -664,7 +664,7 @@ Signed-off-by: Sebastian Andrzej Siewior int retval; if (!port->fifosize) -@@ -2196,7 +2218,7 @@ int serial8250_do_startup(struct uart_po +@@ -2202,7 +2224,7 @@ int serial8250_do_startup(struct uart_po up->acr = 0; serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); serial_port_out(port, UART_EFR, UART_EFR_ECB); @@ -673,7 +673,7 @@ Signed-off-by: Sebastian Andrzej Siewior serial_port_out(port, UART_LCR, 0); serial_icr_write(up, UART_CSR, 0); /* Reset the UART */ serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B); -@@ -2206,7 +2228,7 @@ int serial8250_do_startup(struct uart_po +@@ -2212,7 +2234,7 @@ int serial8250_do_startup(struct uart_po if (port->type == PORT_DA830) { /* Reset the port */ @@ -682,7 +682,7 @@ Signed-off-by: Sebastian Andrzej Siewior serial_port_out(port, UART_DA830_PWREMU_MGMT, 0); mdelay(10); -@@ -2301,6 +2323,8 @@ int serial8250_do_startup(struct uart_po +@@ -2307,6 +2329,8 @@ int serial8250_do_startup(struct uart_po if (port->irq && (up->port.flags & UPF_SHARE_IRQ)) up->port.irqflags |= IRQF_SHARED; @@ -691,7 +691,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) { unsigned char iir1; -@@ -2317,6 +2341,9 @@ int serial8250_do_startup(struct uart_po +@@ -2323,6 +2347,9 @@ int serial8250_do_startup(struct uart_po */ spin_lock_irqsave(&port->lock, flags); @@ -701,7 +701,7 @@ Signed-off-by: Sebastian Andrzej Siewior wait_for_xmitr(up, UART_LSR_THRE); serial_port_out_sync(port, UART_IER, UART_IER_THRI); udelay(1); /* allow THRE to set */ -@@ -2327,6 +2354,9 @@ int serial8250_do_startup(struct uart_po +@@ -2333,6 +2360,9 @@ int serial8250_do_startup(struct uart_po iir = serial_port_in(port, UART_IIR); serial_port_out(port, UART_IER, 0); @@ -711,7 +711,7 @@ Signed-off-by: Sebastian Andrzej Siewior spin_unlock_irqrestore(&port->lock, flags); if (port->irqflags & IRQF_SHARED) -@@ -2383,10 +2413,14 @@ int serial8250_do_startup(struct uart_po +@@ -2389,10 +2419,14 @@ int serial8250_do_startup(struct uart_po * Do a quick test to see if we receive an interrupt when we enable * the TX irq. */ @@ -726,7 +726,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) { if (!(up->bugs & UART_BUG_TXEN)) { -@@ -2418,7 +2452,7 @@ int serial8250_do_startup(struct uart_po +@@ -2424,7 +2458,7 @@ int serial8250_do_startup(struct uart_po if (up->dma) { const char *msg = NULL; @@ -735,7 +735,7 @@ Signed-off-by: Sebastian Andrzej Siewior msg = "forbid DMA for kernel console"; else if (serial8250_request_dma(up)) msg = "failed to request DMA"; -@@ -2469,7 +2503,7 @@ void serial8250_do_shutdown(struct uart_ +@@ -2475,7 +2509,7 @@ void serial8250_do_shutdown(struct uart_ */ spin_lock_irqsave(&port->lock, flags); up->ier = 0; @@ -744,7 +744,7 @@ Signed-off-by: Sebastian Andrzej Siewior spin_unlock_irqrestore(&port->lock, flags); synchronize_irq(port->irq); -@@ -2837,7 +2871,7 @@ serial8250_do_set_termios(struct uart_po +@@ -2841,7 +2875,7 @@ serial8250_do_set_termios(struct uart_po if (up->capabilities & UART_CAP_RTOIE) up->ier |= UART_IER_RTOIE; @@ -753,7 +753,7 @@ Signed-off-by: Sebastian Andrzej Siewior if (up->capabilities & UART_CAP_EFR) { unsigned char efr = 0; -@@ -3303,7 +3337,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default +@@ -3307,7 +3341,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default #ifdef CONFIG_SERIAL_8250_CONSOLE @@ -762,7 +762,7 @@ Signed-off-by: Sebastian Andrzej Siewior { struct uart_8250_port *up = up_to_u8250p(port); -@@ -3311,6 +3345,18 @@ static void serial8250_console_putchar(s +@@ -3315,6 +3349,18 @@ static void serial8250_console_putchar(s serial_port_out(port, UART_TX, ch); } @@ -781,7 +781,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Restore serial console when h/w power-off detected */ -@@ -3332,6 +3378,32 @@ static void serial8250_console_restore(s +@@ -3336,6 +3382,32 @@ static void serial8250_console_restore(s serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS); } @@ -812,12 +812,12 @@ Signed-off-by: Sebastian Andrzej Siewior +} + /* - * Print a string to the serial port trying not to disturb - * any possible real use of the port... -@@ -3348,24 +3420,12 @@ void serial8250_console_write(struct uar + * Print a string to the serial port using the device FIFO + * +@@ -3381,24 +3453,12 @@ void serial8250_console_write(struct uar struct uart_port *port = &up->port; unsigned long flags; - unsigned int ier; + unsigned int ier, use_fifo; - int locked = 1; touch_nmi_watchdog(); @@ -841,17 +841,20 @@ Signed-off-by: Sebastian Andrzej Siewior /* check scratch reg to see if port powered off during system sleep */ if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) { -@@ -3379,7 +3439,9 @@ void serial8250_console_write(struct uar - mdelay(port->rs485.delay_rts_before_send); - } +@@ -3432,10 +3492,12 @@ void serial8250_console_write(struct uar + */ + !(up->port.flags & UPF_CONS_FLOW); + atomic_inc(&up->console_printing); - uart_console_write(port, s, count, serial8250_console_putchar); + if (likely(use_fifo)) + serial8250_console_fifo_write(up, s, count); + else + uart_console_write(port, s, count, serial8250_console_putchar); + atomic_dec(&up->console_printing); /* * Finally, wait for transmitter to become empty -@@ -3392,8 +3454,7 @@ void serial8250_console_write(struct uar +@@ -3448,8 +3510,7 @@ void serial8250_console_write(struct uar if (em485->tx_stopped) up->rs485_stop_tx(up); } @@ -861,7 +864,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * The receive handling will happen properly because the -@@ -3405,8 +3466,7 @@ void serial8250_console_write(struct uar +@@ -3461,8 +3522,7 @@ void serial8250_console_write(struct uar if (up->msr_saved_flags) serial8250_modem_status(up); @@ -871,7 +874,7 @@ Signed-off-by: Sebastian Andrzej Siewior } static unsigned int probe_baud(struct uart_port *port) -@@ -3426,6 +3486,7 @@ static unsigned int probe_baud(struct ua +@@ -3482,6 +3542,7 @@ static unsigned int probe_baud(struct ua int serial8250_console_setup(struct uart_port *port, char *options, bool probe) { @@ -879,7 +882,7 @@ Signed-off-by: Sebastian Andrzej Siewior int baud = 9600; int bits = 8; int parity = 'n'; -@@ -3435,6 +3496,8 @@ int serial8250_console_setup(struct uart +@@ -3491,6 +3552,8 @@ int serial8250_console_setup(struct uart if (!port->iobase && !port->membase) return -ENODEV; diff --git a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch index 196b31fdf845..662ff965dfb4 100644 --- a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch +++ b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1956,6 +1956,7 @@ static int console_lock_spinning_disable +@@ -1981,6 +1981,7 @@ static int console_lock_spinning_disable return 1; } @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior /** * console_trylock_spinning - try to get console_lock by busy waiting * -@@ -2029,6 +2030,7 @@ static int console_trylock_spinning(void +@@ -2054,6 +2055,7 @@ static int console_trylock_spinning(void return 1; } @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * Call the specified console driver, asking it to write out the specified -@@ -2368,6 +2370,18 @@ asmlinkage int vprintk_emit(int facility +@@ -2393,6 +2395,18 @@ asmlinkage int vprintk_emit(int facility /* If called from the scheduler, we can not call up(). */ if (!in_sched && allow_direct_printing()) { @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior /* * The caller may be holding system-critical or * timing-sensitive locks. Disable preemption during direct -@@ -2385,6 +2399,7 @@ asmlinkage int vprintk_emit(int facility +@@ -2410,6 +2424,7 @@ asmlinkage int vprintk_emit(int facility if (console_trylock_spinning()) console_unlock(); preempt_enable(); @@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior } wake_up_klogd(); -@@ -3075,8 +3090,12 @@ static bool console_emit_next_record_tra +@@ -3100,8 +3115,12 @@ static bool console_emit_next_record_tra /* * Handovers are only supported if threaded printers are atomically * blocked. The context taking over the console_lock may be atomic. diff --git a/patches/ARM64__Allow_to_enable_RT.patch b/patches/ARM64__Allow_to_enable_RT.patch index fd03bece904c..5f2f3d405a5e 100644 --- a/patches/ARM64__Allow_to_enable_RT.patch +++ b/patches/ARM64__Allow_to_enable_RT.patch @@ -16,10 +16,10 @@ Signed-off-by: Thomas Gleixner --- --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -91,6 +91,7 @@ config ARM64 - select ARCH_SUPPORTS_ATOMIC_RMW +@@ -93,6 +93,7 @@ config ARM64 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_NUMA_BALANCING + select ARCH_SUPPORTS_PAGE_TABLE_CHECK + select ARCH_SUPPORTS_RT select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT select ARCH_WANT_DEFAULT_BPF_JIT diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch index 34da917f8c9e..0e6dbb0e3c10 100644 --- a/patches/Add_localversion_for_-RT_release.patch +++ b/patches/Add_localversion_for_-RT_release.patch @@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt11 ++-rt1 diff --git a/patches/POWERPC__Allow_to_enable_RT.patch b/patches/POWERPC__Allow_to_enable_RT.patch index 5bedabc2150c..0c48effc78ab 100644 --- a/patches/POWERPC__Allow_to_enable_RT.patch +++ b/patches/POWERPC__Allow_to_enable_RT.patch @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner --- --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -149,6 +149,7 @@ config PPC +@@ -150,6 +150,7 @@ config PPC select ARCH_STACKWALK select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x @@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 select ARCH_USE_MEMTEST -@@ -220,6 +221,7 @@ config PPC +@@ -225,6 +226,7 @@ config PPC select HAVE_IOREMAP_PROT select HAVE_IRQ_EXIT_ON_IRQ_STACK select HAVE_IRQ_TIME_ACCOUNTING diff --git a/patches/Revert-drm-i915-Depend-on-PREEMPT_RT.patch b/patches/Revert-drm-i915-Depend-on-PREEMPT_RT.patch index 7869ab99cdfb..7766add9bfba 100644 --- a/patches/Revert-drm-i915-Depend-on-PREEMPT_RT.patch +++ b/patches/Revert-drm-i915-Depend-on-PREEMPT_RT.patch @@ -17,6 +17,6 @@ Signed-off-by: Sebastian Andrzej Siewior depends on DRM depends on X86 && PCI - depends on !PREEMPT_RT - select INTEL_GTT + select INTEL_GTT if X86 select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular diff --git a/patches/SUNRPC-Don-t-disable-preemption-while-calling-svc_po.patch b/patches/SUNRPC-Don-t-disable-preemption-while-calling-svc_po.patch deleted file mode 100644 index 32e08a35e22e..000000000000 --- a/patches/SUNRPC-Don-t-disable-preemption-while-calling-svc_po.patch +++ /dev/null @@ -1,55 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 4 May 2022 19:24:10 +0200 -Subject: [PATCH] SUNRPC: Don't disable preemption while calling - svc_pool_for_cpu(). - -svc_xprt_enqueue() disables preemption via get_cpu() and then asks for a -pool of a specific CPU (current) via svc_pool_for_cpu(). -With disabled preemption it acquires svc_pool::sp_lock, a spinlock_t, -which is a sleeping lock on PREEMPT_RT and can't be acquired with -disabled preemption. - -Disabling preemption is not required here. The pool is protected with a -lock so the following list access is safe even cross-CPU. The following -iteration through svc_pool::sp_all_threads is under RCU-readlock and -remaining operations within the loop are atomic and do not rely on -disabled-preemption. - -Use raw_smp_processor_id() as the argument for the requested CPU in -svc_pool_for_cpu(). - -Reported-by: Mike Galbraith -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/YnK2ujabd2+oCrT/@linutronix.de ---- - net/sunrpc/svc_xprt.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/net/sunrpc/svc_xprt.c -+++ b/net/sunrpc/svc_xprt.c -@@ -448,7 +448,6 @@ void svc_xprt_enqueue(struct svc_xprt *x - { - struct svc_pool *pool; - struct svc_rqst *rqstp = NULL; -- int cpu; - - if (!svc_xprt_ready(xprt)) - return; -@@ -461,8 +460,7 @@ void svc_xprt_enqueue(struct svc_xprt *x - if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) - return; - -- cpu = get_cpu(); -- pool = svc_pool_for_cpu(xprt->xpt_server, cpu); -+ pool = svc_pool_for_cpu(xprt->xpt_server, raw_smp_processor_id()); - - atomic_long_inc(&pool->sp_stats.packets); - -@@ -485,7 +483,6 @@ void svc_xprt_enqueue(struct svc_xprt *x - rqstp = NULL; - out_unlock: - rcu_read_unlock(); -- put_cpu(); - trace_svc_xprt_enqueue(xprt, rqstp); - } - EXPORT_SYMBOL_GPL(svc_xprt_enqueue); diff --git a/patches/arch_arm64__Add_lazy_preempt_support.patch b/patches/arch_arm64__Add_lazy_preempt_support.patch index 3339620a018a..643d302a1033 100644 --- a/patches/arch_arm64__Add_lazy_preempt_support.patch +++ b/patches/arch_arm64__Add_lazy_preempt_support.patch @@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner --- --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -194,6 +194,7 @@ config ARM64 +@@ -197,6 +197,7 @@ config ARM64 select HAVE_PERF_USER_STACK_DUMP select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API @@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner #define TIF_SYSCALL_TRACE 8 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ -@@ -98,8 +100,10 @@ int arch_dup_task_struct(struct task_str +@@ -100,8 +102,10 @@ int arch_dup_task_struct(struct task_str #define _TIF_SVE (1 << TIF_SVE) #define _TIF_MTE_ASYNC_FAULT (1 << TIF_MTE_ASYNC_FAULT) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) @@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ _TIF_UPROBE | _TIF_MTE_ASYNC_FAULT | \ _TIF_NOTIFY_SIGNAL) -@@ -108,6 +112,8 @@ int arch_dup_task_struct(struct task_str +@@ -110,6 +114,8 @@ int arch_dup_task_struct(struct task_str _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ _TIF_SYSCALL_EMU) @@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner #endif --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c -@@ -921,7 +921,7 @@ static void do_signal(struct pt_regs *re +@@ -1089,7 +1089,7 @@ static void do_signal(struct pt_regs *re void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags) { do { diff --git a/patches/arm64-mm-Make-arch_faults_on_old_pte-check-for-migra.patch b/patches/arm64-mm-Make-arch_faults_on_old_pte-check-for-migra.patch deleted file mode 100644 index 22f2191261e1..000000000000 --- a/patches/arm64-mm-Make-arch_faults_on_old_pte-check-for-migra.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Valentin Schneider -Date: Thu, 27 Jan 2022 19:24:37 +0000 -Subject: [PATCH] arm64: mm: Make arch_faults_on_old_pte() check for - migratability - -arch_faults_on_old_pte() relies on the calling context being -non-preemptible. CONFIG_PREEMPT_RT turns the PTE lock into a sleepable -spinlock, which doesn't disable preemption once acquired, triggering the -warning in arch_faults_on_old_pte(). - -It does however disable migration, ensuring the task remains on the same -CPU during the entirety of the critical section, making the read of -cpu_has_hw_af() safe and stable. - -Make arch_faults_on_old_pte() check cant_migrate() instead of preemptible(). - -Suggested-by: Sebastian Andrzej Siewior -Signed-off-by: Valentin Schneider -Link: https://lore.kernel.org/r/20220127192437.1192957-1-valentin.schneider@arm.com -Acked-by: Catalin Marinas ---- - arch/arm64/include/asm/pgtable.h | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -1001,7 +1001,8 @@ static inline void update_mmu_cache(stru - */ - static inline bool arch_faults_on_old_pte(void) - { -- WARN_ON(preemptible()); -+ /* The register read below requires a stable CPU to make any sense */ -+ cant_migrate(); - - return !cpu_has_hw_af(); - } diff --git a/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch b/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch deleted file mode 100644 index 995147c9595e..000000000000 --- a/patches/arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch +++ /dev/null @@ -1,43 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Jul 2021 12:52:14 +0200 -Subject: [PATCH] arm64/sve: Delay freeing memory in fpsimd_flush_thread() - -fpsimd_flush_thread() invokes kfree() via sve_free() within a preempt disabled -section which is not working on -RT. - -Delay freeing of memory until preemption is enabled again. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm64/kernel/fpsimd.c | 9 ++++++++- - 1 file changed, 8 insertions(+), 1 deletion(-) - ---- a/arch/arm64/kernel/fpsimd.c -+++ b/arch/arm64/kernel/fpsimd.c -@@ -1141,6 +1141,8 @@ static void fpsimd_flush_thread_vl(enum - - void fpsimd_flush_thread(void) - { -+ void *sve_state = NULL; -+ - if (!system_supports_fpsimd()) - return; - -@@ -1152,11 +1154,16 @@ void fpsimd_flush_thread(void) - - if (system_supports_sve()) { - clear_thread_flag(TIF_SVE); -- sve_free(current); -+ -+ /* Defer kfree() while in atomic context */ -+ sve_state = current->thread.sve_state; -+ current->thread.sve_state = NULL; -+ - fpsimd_flush_thread_vl(ARM64_VEC_SVE); - } - - put_cpu_fpsimd_context(); -+ kfree(sve_state); - } - - /* diff --git a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch b/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch deleted file mode 100644 index 4fa8df86d47a..000000000000 --- a/patches/arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch +++ /dev/null @@ -1,56 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Thu, 29 Jul 2021 10:36:30 +0200 -Subject: [PATCH] arm64/sve: Make kernel FPU protection RT friendly - -Non RT kernels need to protect FPU against preemption and bottom half -processing. This is achieved by disabling bottom halves via -local_bh_disable() which implictly disables preemption. - -On RT kernels this protection mechanism is not sufficient because -local_bh_disable() does not disable preemption. It serializes bottom half -related processing via a CPU local lock. - -As bottom halves are running always in thread context on RT kernels -disabling preemption is the proper choice as it implicitly prevents bottom -half processing. - -Signed-off-by: Sebastian Andrzej Siewior ---- - arch/arm64/kernel/fpsimd.c | 16 ++++++++++++++-- - 1 file changed, 14 insertions(+), 2 deletions(-) - ---- a/arch/arm64/kernel/fpsimd.c -+++ b/arch/arm64/kernel/fpsimd.c -@@ -206,10 +206,19 @@ static void __get_cpu_fpsimd_context(voi - * - * The double-underscore version must only be called if you know the task - * can't be preempted. -+ * -+ * On RT kernels local_bh_disable() is not sufficient because it only -+ * serializes soft interrupt related sections via a local lock, but stays -+ * preemptible. Disabling preemption is the right choice here as bottom -+ * half processing is always in thread context on RT kernels so it -+ * implicitly prevents bottom half processing as well. - */ - static void get_cpu_fpsimd_context(void) - { -- local_bh_disable(); -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) -+ local_bh_disable(); -+ else -+ preempt_disable(); - __get_cpu_fpsimd_context(); - } - -@@ -230,7 +239,10 @@ static void __put_cpu_fpsimd_context(voi - static void put_cpu_fpsimd_context(void) - { - __put_cpu_fpsimd_context(); -- local_bh_enable(); -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) -+ local_bh_enable(); -+ else -+ preempt_enable(); - } - - static bool have_cpu_fpsimd_context(void) diff --git a/patches/arm__Add_support_for_lazy_preemption.patch b/patches/arm__Add_support_for_lazy_preemption.patch index 75c7f1f824e3..a0ed87066591 100644 --- a/patches/arm__Add_support_for_lazy_preemption.patch +++ b/patches/arm__Add_support_for_lazy_preemption.patch @@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner DEFINE(TI_CPU_SAVE, offsetof(struct thread_info, cpu_context)); --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S -@@ -224,11 +224,18 @@ ENDPROC(__dabt_svc) +@@ -222,11 +222,18 @@ ENDPROC(__dabt_svc) #ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count @@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner #endif svc_exit r5, irq = 1 @ return from exception -@@ -243,8 +250,14 @@ ENDPROC(__irq_svc) +@@ -241,8 +248,14 @@ ENDPROC(__irq_svc) 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED diff --git a/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch b/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch index 0c19a3d95631..0f0b6c10bc1e 100644 --- a/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch +++ b/patches/blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch @@ -26,7 +26,7 @@ Link: https://lore.kernel.org/r/YnQHqx/5+54jd+U+@linutronix.de --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -2046,14 +2046,14 @@ static void __blk_mq_delay_run_hw_queue( +@@ -2083,14 +2083,14 @@ static void __blk_mq_delay_run_hw_queue( return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { diff --git a/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch b/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch deleted file mode 100644 index cb8e8d588157..000000000000 --- a/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch +++ /dev/null @@ -1,89 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Tue, 3 May 2022 08:30:01 +0200 -Subject: [PATCH] crypto: cryptd - Protect per-CPU resource by disabling BH. - -The access to cryptd_queue::cpu_queue is synchronized by disabling -preemption in cryptd_enqueue_request() and disabling BH in -cryptd_queue_worker(). This implies that access is allowed from BH. - -If cryptd_enqueue_request() is invoked from preemptible context _and_ -soft interrupt then this can lead to list corruption since -cryptd_enqueue_request() is not protected against access from -soft interrupt. - -Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable() -to ensure BH is always disabled. -Remove preempt_disable() from cryptd_queue_worker() since it is not -needed because local_bh_disable() ensures synchronisation. - -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lkml.kernel.org/r/YnKWuLQZdPwSdRTh@linutronix.de ---- - crypto/cryptd.c | 23 +++++++++++------------ - 1 file changed, 11 insertions(+), 12 deletions(-) - ---- a/crypto/cryptd.c -+++ b/crypto/cryptd.c -@@ -39,6 +39,10 @@ struct cryptd_cpu_queue { - }; - - struct cryptd_queue { -+ /* -+ * Protected by disabling BH to allow enqueueing from softinterrupt and -+ * dequeuing from kworker (cryptd_queue_worker()). -+ */ - struct cryptd_cpu_queue __percpu *cpu_queue; - }; - -@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cry - static int cryptd_enqueue_request(struct cryptd_queue *queue, - struct crypto_async_request *request) - { -- int cpu, err; -+ int err; - struct cryptd_cpu_queue *cpu_queue; - refcount_t *refcnt; - -- cpu = get_cpu(); -+ local_bh_disable(); - cpu_queue = this_cpu_ptr(queue->cpu_queue); - err = crypto_enqueue_request(&cpu_queue->queue, request); - - refcnt = crypto_tfm_ctx(request->tfm); - - if (err == -ENOSPC) -- goto out_put_cpu; -+ goto out; - -- queue_work_on(cpu, cryptd_wq, &cpu_queue->work); -+ queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work); - - if (!refcount_read(refcnt)) -- goto out_put_cpu; -+ goto out; - - refcount_inc(refcnt); - --out_put_cpu: -- put_cpu(); -+out: -+ local_bh_enable(); - - return err; - } -@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct w - cpu_queue = container_of(work, struct cryptd_cpu_queue, work); - /* - * Only handle one request at a time to avoid hogging crypto workqueue. -- * preempt_disable/enable is used to prevent being preempted by -- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent -- * cryptd_enqueue_request() being accessed from software interrupts. - */ - local_bh_disable(); -- preempt_disable(); - backlog = crypto_get_backlog(&cpu_queue->queue); - req = crypto_dequeue_request(&cpu_queue->queue); -- preempt_enable(); - local_bh_enable(); - - if (!req) diff --git a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch index 38d74ba73010..38b46754e2ce 100644 --- a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch +++ b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch @@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner static inline bool init_done(struct zram *zram) { -@@ -1194,6 +1229,7 @@ static bool zram_meta_alloc(struct zram +@@ -1195,6 +1230,7 @@ static bool zram_meta_alloc(struct zram if (!huge_class_size) huge_class_size = zs_huge_class_size(zram->mem_pool); diff --git a/patches/entry--Fix-the-preempt-lazy-fallout.patch b/patches/entry--Fix-the-preempt-lazy-fallout.patch index 6b35c197ea3a..1688d3f9bc2b 100644 --- a/patches/entry--Fix-the-preempt-lazy-fallout.patch +++ b/patches/entry--Fix-the-preempt-lazy-fallout.patch @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h -@@ -153,8 +153,6 @@ struct thread_info { +@@ -151,8 +151,6 @@ struct thread_info { #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) diff --git a/patches/genirq-Provide-generic_handle_domain_irq_safe.patch b/patches/genirq-Provide-generic_handle_domain_irq_safe.patch index 3deba9c63660..517b2adff54f 100644 --- a/patches/genirq-Provide-generic_handle_domain_irq_safe.patch +++ b/patches/genirq-Provide-generic_handle_domain_irq_safe.patch @@ -38,7 +38,7 @@ Link: https://lore.kernel.org/r/YnkfWFzvusFFktSt@linutronix.de --- a/drivers/bcma/driver_gpio.c +++ b/drivers/bcma/driver_gpio.c -@@ -113,7 +113,7 @@ static irqreturn_t bcma_gpio_irq_handler +@@ -115,7 +115,7 @@ static irqreturn_t bcma_gpio_irq_handler return IRQ_NONE; for_each_set_bit(gpio, &irqs, gc->ngpio) @@ -64,7 +64,7 @@ Link: https://lore.kernel.org/r/YnkfWFzvusFFktSt@linutronix.de } --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c -@@ -638,7 +638,7 @@ static bool do_amd_gpio_irq_handler(int +@@ -643,7 +643,7 @@ static bool do_amd_gpio_irq_handler(int if (!(regval & PIN_IRQ_PENDING) || !(regval & BIT(INTERRUPT_MASK_OFF))) continue; @@ -119,7 +119,7 @@ Link: https://lore.kernel.org/r/YnkfWFzvusFFktSt@linutronix.de --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c -@@ -706,6 +706,30 @@ int generic_handle_domain_irq(struct irq +@@ -705,6 +705,30 @@ int generic_handle_domain_irq(struct irq } EXPORT_SYMBOL_GPL(generic_handle_domain_irq); diff --git a/patches/genirq-irq_sim-Make-the-irq_work-always-run-in-hard-.patch b/patches/genirq-irq_sim-Make-the-irq_work-always-run-in-hard-.patch deleted file mode 100644 index d00e8b607af5..000000000000 --- a/patches/genirq-irq_sim-Make-the-irq_work-always-run-in-hard-.patch +++ /dev/null @@ -1,37 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 11 May 2022 13:07:50 +0200 -Subject: [PATCH] genirq/irq_sim: Make the irq_work always run in hard irq - context. -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -The IRQ simulator uses irq_work to trigger an interrupt. Without the -IRQ_WORK_HARD_IRQ flag the irq_work will be performed in thread context -on PREEMPT_RT. This causes locking errors later in handle_simple_irq() -which expects to be invoked with disabled interrupts. - -Triggering individual interrupts in hardirq context should not lead to -unexpected high latencies since this is also what the hardware -controller does. Also it is used as a simulator so… - -Use IRQ_WORK_INIT_HARD() to carry out the irq_work in hardirq context on -PREEMPT_RT. - -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/YnuZBoEVMGwKkLm+@linutronix.de ---- - kernel/irq/irq_sim.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/kernel/irq/irq_sim.c -+++ b/kernel/irq/irq_sim.c -@@ -181,7 +181,7 @@ struct irq_domain *irq_domain_create_sim - goto err_free_bitmap; - - work_ctx->irq_count = num_irqs; -- init_irq_work(&work_ctx->work, irq_sim_handle_irq); -+ work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq); - - return work_ctx->domain; - diff --git a/patches/lib-irq_poll-Prevent-softirq-pending-leak-in-irq_pol.patch b/patches/lib-irq_poll-Prevent-softirq-pending-leak-in-irq_pol.patch deleted file mode 100644 index 08bf96b04bb2..000000000000 --- a/patches/lib-irq_poll-Prevent-softirq-pending-leak-in-irq_pol.patch +++ /dev/null @@ -1,49 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Sun, 10 Apr 2022 14:49:36 +0200 -Subject: [PATCH] lib/irq_poll: Prevent softirq pending leak in - irq_poll_cpu_dead() - -irq_poll_cpu_dead() pulls the blk_cpu_iopoll backlog from the dead CPU and -raises the POLL softirq with __raise_softirq_irqoff() on the CPU it is -running on. That just sets the bit in the pending softirq mask. - -This means the handling of the softirq is delayed until the next interrupt -or a local_bh_disable/enable() pair. As a consequence the CPU on which this -code runs can reach idle with the POLL softirq pending, which triggers a -warning in the NOHZ idle code. - -Add a local_bh_disable/enable() pair around the interrupts disabled section -in irq_poll_cpu_dead(). local_bh_enable will handle the pending softirq. - -[tglx: Massaged changelog and comment] - -Signed-off-by: Sebastian Andrzej Siewior -Signed-off-by: Thomas Gleixner -Link: https://lore.kernel.org/r/87k0bxgl27.ffs@tglx ---- - lib/irq_poll.c | 8 ++++++-- - 1 file changed, 6 insertions(+), 2 deletions(-) - ---- a/lib/irq_poll.c -+++ b/lib/irq_poll.c -@@ -188,14 +188,18 @@ EXPORT_SYMBOL(irq_poll_init); - static int irq_poll_cpu_dead(unsigned int cpu) - { - /* -- * If a CPU goes away, splice its entries to the current CPU -- * and trigger a run of the softirq -+ * If a CPU goes away, splice its entries to the current CPU and -+ * set the POLL softirq bit. The local_bh_disable()/enable() pair -+ * ensures that it is handled. Otherwise the current CPU could -+ * reach idle with the POLL softirq pending. - */ -+ local_bh_disable(); - local_irq_disable(); - list_splice_init(&per_cpu(blk_cpu_iopoll, cpu), - this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); - local_irq_enable(); -+ local_bh_enable(); - - return 0; - } diff --git a/patches/locking-lockdep-Use-sched_clock-for-random-numbers.patch b/patches/locking-lockdep-Use-sched_clock-for-random-numbers.patch new file mode 100644 index 000000000000..d495c69340ad --- /dev/null +++ b/patches/locking-lockdep-Use-sched_clock-for-random-numbers.patch @@ -0,0 +1,35 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 17 May 2022 11:16:14 +0200 +Subject: [PATCH] locking/lockdep: Use sched_clock() for random numbers. + +Since the rewrote of prandom_u32(), in the commit mentioned below, the +function uses sleeping locks which extracing random numbers and filling +the batch. +This breaks lockdep on PREEMPT_RT because lock_pin_lock() disables +interrupts while calling __lock_pin_lock(). This can't be moved earlier +because the main user of the function (rq_pin_lock()) invokes that +function after disabling interrupts in order to acquire the lock. + +The cookie does not require random numbers as its goal is to provide a +random value in order to notice unexpected "unlock + lock" sites. + +Use sched_clock() to provide random numbers. + +Fixes: a0103f4d86f88 ("random32: use real rng for non-deterministic randomness") +Signed-off-by: Sebastian Andrzej Siewior +Link: https://lore.kernel.org/r/YoNn3pTkm5+QzE5k@linutronix.de +--- + kernel/locking/lockdep.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/kernel/locking/lockdep.c ++++ b/kernel/locking/lockdep.c +@@ -5432,7 +5432,7 @@ static struct pin_cookie __lock_pin_lock + * be guessable and still allows some pin nesting in + * our u32 pin_count. + */ +- cookie.val = 1 + (prandom_u32() >> 16); ++ cookie.val = 1 + (sched_clock() & 0xffff); + hlock->pin_count += cookie.val; + return cookie; + } diff --git a/patches/mm-slub-Move-the-stackdepot-related-allocation-out-o.patch b/patches/mm-slub-Move-the-stackdepot-related-allocation-out-o.patch new file mode 100644 index 000000000000..b47e224fb07a --- /dev/null +++ b/patches/mm-slub-Move-the-stackdepot-related-allocation-out-o.patch @@ -0,0 +1,91 @@ +From: Sebastian Andrzej Siewior +Date: Tue, 7 Jun 2022 17:20:10 +0200 +Subject: [PATCH] mm/slub: Move the stackdepot related allocation out of + IRQ-off section. + +The set_track() invocation in free_debug_processing() is invoked with +acquired slab_lock(). The lock disables interrupts on PREEMPT_RT and +this forbids to allocate memory which is done in stack_depot_save(). + +Split set_track() into two parts: set_track_prepare() which allocate +memory and set_track_update() which only performs the assignment of the +trace data structure. Use set_track_prepare() before disabling +interrupts. + +Fixes: 5cf909c553e9e ("mm/slub: use stackdepot to save stack trace in objects") +Signed-off-by: Sebastian Andrzej Siewior +Link: https://lore.kernel.org/r/Yp9sqoUi4fVa5ExF@linutronix.de +--- + mm/slub.c | 39 +++++++++++++++++++++++++++++++++------ + 1 file changed, 33 insertions(+), 6 deletions(-) + +--- a/mm/slub.c ++++ b/mm/slub.c +@@ -726,19 +726,42 @@ static struct track *get_track(struct km + return kasan_reset_tag(p + alloc); + } + +-static void noinline set_track(struct kmem_cache *s, void *object, +- enum track_item alloc, unsigned long addr) ++static noinline depot_stack_handle_t set_track_prepare(void) + { +- struct track *p = get_track(s, object, alloc); +- ++ depot_stack_handle_t handle = 0; + #ifdef CONFIG_STACKDEPOT + unsigned long entries[TRACK_ADDRS_COUNT]; + unsigned int nr_entries; + + nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3); +- p->handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); ++ handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT); + #endif ++ return handle; ++} ++ ++static void set_track_update(struct kmem_cache *s, void *object, ++ enum track_item alloc, unsigned long addr, ++ depot_stack_handle_t handle) ++{ ++ struct track *p = get_track(s, object, alloc); + ++#ifdef CONFIG_STACKDEPOT ++ p->handle = handle; ++#endif ++ p->addr = addr; ++ p->cpu = smp_processor_id(); ++ p->pid = current->pid; ++ p->when = jiffies; ++} ++ ++static __always_inline void set_track(struct kmem_cache *s, void *object, ++ enum track_item alloc, unsigned long addr) ++{ ++ struct track *p = get_track(s, object, alloc); ++ ++#ifdef CONFIG_STACKDEPOT ++ p->handle = set_track_prepare(); ++#endif + p->addr = addr; + p->cpu = smp_processor_id(); + p->pid = current->pid; +@@ -1373,6 +1396,10 @@ static noinline int free_debug_processin + int cnt = 0; + unsigned long flags, flags2; + int ret = 0; ++ depot_stack_handle_t handle = 0; ++ ++ if (s->flags & SLAB_STORE_USER) ++ handle = set_track_prepare(); + + spin_lock_irqsave(&n->list_lock, flags); + slab_lock(slab, &flags2); +@@ -1391,7 +1418,7 @@ static noinline int free_debug_processin + } + + if (s->flags & SLAB_STORE_USER) +- set_track(s, object, TRACK_FREE, addr); ++ set_track_update(s, object, TRACK_FREE, addr, handle); + trace(s, slab, object, 0); + /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */ + init_object(s, object, SLUB_RED_INACTIVE); diff --git a/patches/mm-vmalloc-Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch b/patches/mm-vmalloc-Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch deleted file mode 100644 index 66616f1b758b..000000000000 --- a/patches/mm-vmalloc-Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch +++ /dev/null @@ -1,58 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Wed, 4 May 2022 19:03:25 +0200 -Subject: [PATCH] mm/vmalloc: Use raw_cpu_ptr() for vmap_block_queue access. - -The per-CPU resource vmap_block_queue is accessed via the -get_cpu_var(). That macro disables preemption and then loads the pointer -from the current CPU. -This doesn't work on PREEMPT_RT because a spinlock_t is later accessed -within the preempt-disable section. - -There is no need to disable preemption while accessing the per-CPU -struct vmap_block_queue because the list is protected with a spinlock_t. -The per-CPU struct is also accessed cross-CPU in -purge_fragmented_blocks(). -It is possible that by using raw_cpu_ptr() the code migrates to another -CPU and uses struct from another CPU. This is fine because the list -locked and the locked section is very short. - -Use raw_cpu_ptr() to access vmap_block_queue. - -Signed-off-by: Sebastian Andrzej Siewior -Link: https://lore.kernel.org/r/YnKx3duAB53P7ojN@linutronix.de ---- - mm/vmalloc.c | 6 ++---- - 1 file changed, 2 insertions(+), 4 deletions(-) - ---- a/mm/vmalloc.c -+++ b/mm/vmalloc.c -@@ -1938,11 +1938,10 @@ static void *new_vmap_block(unsigned int - return ERR_PTR(err); - } - -- vbq = &get_cpu_var(vmap_block_queue); -+ vbq = raw_cpu_ptr(&vmap_block_queue); - spin_lock(&vbq->lock); - list_add_tail_rcu(&vb->free_list, &vbq->free); - spin_unlock(&vbq->lock); -- put_cpu_var(vmap_block_queue); - - return vaddr; - } -@@ -2021,7 +2020,7 @@ static void *vb_alloc(unsigned long size - order = get_order(size); - - rcu_read_lock(); -- vbq = &get_cpu_var(vmap_block_queue); -+ vbq = raw_cpu_ptr(&vmap_block_queue); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { - unsigned long pages_off; - -@@ -2044,7 +2043,6 @@ static void *vb_alloc(unsigned long size - break; - } - -- put_cpu_var(vmap_block_queue); - rcu_read_unlock(); - - /* Allocate new block if nothing was found */ diff --git a/patches/powerpc__Add_support_for_lazy_preemption.patch b/patches/powerpc__Add_support_for_lazy_preemption.patch index 50b7a014196f..46afa8bf6eb5 100644 --- a/patches/powerpc__Add_support_for_lazy_preemption.patch +++ b/patches/powerpc__Add_support_for_lazy_preemption.patch @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner --- --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -236,6 +236,7 @@ config PPC +@@ -241,6 +241,7 @@ config PPC select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP diff --git a/patches/rcu-tasks-Use-rcuwait-for-the-rcu_tasks_kthread.patch b/patches/rcu-tasks-Use-rcuwait-for-the-rcu_tasks_kthread.patch deleted file mode 100644 index 54244c7beb14..000000000000 --- a/patches/rcu-tasks-Use-rcuwait-for-the-rcu_tasks_kthread.patch +++ /dev/null @@ -1,76 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Fri, 4 Mar 2022 09:22:46 +0100 -Subject: [PATCH] rcu-tasks: Use rcuwait for the rcu_tasks_kthread(). - -The waitqueue used by rcu_tasks_kthread() has always only one waiter. -With a guaranteed only one waiter, this can be replaced with rcuwait -which is smaller and simpler. With rcuwait based wake counterpart, the -irqwork function (call_rcu_tasks_iw_wakeup()) can be invoked hardirq -context because it is only a wake up and no sleeping locks are involved -(unlike the wait_queue_head). -As a side effect, this is also one piece of the puzzle to pass the RCU -selftest at early boot on PREEMPT_RT. - -Replace wait_queue_head with rcuwait and let the irqwork run in hardirq -context on PREEMPT_RT. - -Link: https://lkml.kernel.org/r/YiHy7Y5fTU3jRdMi@linutronix.de -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tasks.h | 14 ++++++++------ - 1 file changed, 8 insertions(+), 6 deletions(-) - ---- a/kernel/rcu/tasks.h -+++ b/kernel/rcu/tasks.h -@@ -46,7 +46,7 @@ struct rcu_tasks_percpu { - - /** - * struct rcu_tasks - Definition for a Tasks-RCU-like mechanism. -- * @cbs_wq: Wait queue allowing new callback to get kthread's attention. -+ * @cbs_wait: RCU wait allowing a new callback to get kthread's attention. - * @cbs_gbl_lock: Lock protecting callback list. - * @kthread_ptr: This flavor's grace-period/callback-invocation kthread. - * @gp_func: This flavor's grace-period-wait function. -@@ -77,7 +77,7 @@ struct rcu_tasks_percpu { - * @kname: This flavor's kthread name. - */ - struct rcu_tasks { -- struct wait_queue_head cbs_wq; -+ struct rcuwait cbs_wait; - raw_spinlock_t cbs_gbl_lock; - int gp_state; - int gp_sleep; -@@ -113,11 +113,11 @@ static void call_rcu_tasks_iw_wakeup(str - #define DEFINE_RCU_TASKS(rt_name, gp, call, n) \ - static DEFINE_PER_CPU(struct rcu_tasks_percpu, rt_name ## __percpu) = { \ - .lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name ## __percpu.cbs_pcpu_lock), \ -- .rtp_irq_work = IRQ_WORK_INIT(call_rcu_tasks_iw_wakeup), \ -+ .rtp_irq_work = IRQ_WORK_INIT_HARD(call_rcu_tasks_iw_wakeup), \ - }; \ - static struct rcu_tasks rt_name = \ - { \ -- .cbs_wq = __WAIT_QUEUE_HEAD_INITIALIZER(rt_name.cbs_wq), \ -+ .cbs_wait = __RCUWAIT_INITIALIZER(rt_name.wait), \ - .cbs_gbl_lock = __RAW_SPIN_LOCK_UNLOCKED(rt_name.cbs_gbl_lock), \ - .gp_func = gp, \ - .call_func = call, \ -@@ -261,7 +261,7 @@ static void call_rcu_tasks_iw_wakeup(str - struct rcu_tasks_percpu *rtpcp = container_of(iwp, struct rcu_tasks_percpu, rtp_irq_work); - - rtp = rtpcp->rtpp; -- wake_up(&rtp->cbs_wq); -+ rcuwait_wake_up(&rtp->cbs_wait); - } - - // Enqueue a callback for the specified flavor of Tasks RCU. -@@ -509,7 +509,9 @@ static int __noreturn rcu_tasks_kthread( - set_tasks_gp_state(rtp, RTGS_WAIT_CBS); - - /* If there were none, wait a bit and start over. */ -- wait_event_idle(rtp->cbs_wq, (needgpcb = rcu_tasks_need_gpcb(rtp))); -+ rcuwait_wait_event(&rtp->cbs_wait, -+ (needgpcb = rcu_tasks_need_gpcb(rtp)), -+ TASK_IDLE); - - if (needgpcb & 0x2) { - // Wait for one grace period. diff --git a/patches/rcu-tasks-Use-schedule_hrtimeout_range-while-waiting.patch b/patches/rcu-tasks-Use-schedule_hrtimeout_range-while-waiting.patch deleted file mode 100644 index df6bff82cc36..000000000000 --- a/patches/rcu-tasks-Use-schedule_hrtimeout_range-while-waiting.patch +++ /dev/null @@ -1,42 +0,0 @@ -From: Sebastian Andrzej Siewior -Date: Mon, 7 Mar 2022 17:08:23 +0100 -Subject: [PATCH] rcu-tasks: Use schedule_hrtimeout_range() while waiting for - the gp. - -The RCU selftest is using schedule_timeout_idle() which fails on -PREEMPT_RT because it is used early in boot-up phase an which point -ksoftirqd is not yet ready and is required for the timer to expire. - -To avoid this lockup, use schedule_hrtimeout() and let the timer expire -in hardirq context. This is ensures that the timer fires even on -PREEMPT_RT without any further requirement. - -The timer is set to expire between fract and fract + HZ / 2 jiffies in -order to minimize the amount of extra wake ups and to allign with -possible other timer which expire within this window. - -Signed-off-by: Sebastian Andrzej Siewior ---- - kernel/rcu/tasks.h | 7 +++++-- - 1 file changed, 5 insertions(+), 2 deletions(-) - ---- a/kernel/rcu/tasks.h -+++ b/kernel/rcu/tasks.h -@@ -630,12 +630,15 @@ static void rcu_tasks_wait_gp(struct rcu - while (!list_empty(&holdouts)) { - bool firstreport; - bool needreport; -+ ktime_t exp; - int rtst; - - /* Slowly back off waiting for holdouts */ - set_tasks_gp_state(rtp, RTGS_WAIT_SCAN_HOLDOUTS); -- schedule_timeout_idle(fract); -- -+ exp = jiffies_to_nsecs(fract); -+ __set_current_state(TASK_IDLE); -+ schedule_hrtimeout_range(&exp, jiffies_to_nsecs(HZ / 2), -+ HRTIMER_MODE_REL_HARD); - if (fract < HZ) - fract++; - diff --git a/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch index 81388d4b14c5..9a08f9d47bae 100644 --- a/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch +++ b/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch @@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -624,6 +624,7 @@ extern void raise_softirq_irqoff(unsigne +@@ -606,6 +606,7 @@ extern void raise_softirq_irqoff(unsigne extern void raise_softirq(unsigned int nr); #ifdef CONFIG_PREEMPT_RT @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c -@@ -3294,6 +3294,12 @@ rcu_torture_init(void) +@@ -3342,6 +3342,12 @@ rcu_torture_init(void) WARN_ON_ONCE(!t); sp.sched_priority = 2; sched_setscheduler_nocheck(t, SCHED_FIFO, &sp); diff --git a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch index 03b5fc03d962..6a6ba6386881 100644 --- a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch +++ b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3285,6 +3285,8 @@ unsigned long wait_task_inactive(struct +@@ -3279,6 +3279,8 @@ unsigned long wait_task_inactive(struct struct rq_flags rf; unsigned long ncsw; struct rq *rq; @@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior for (;;) { /* -@@ -3307,8 +3309,22 @@ unsigned long wait_task_inactive(struct +@@ -3301,8 +3303,22 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior cpu_relax(); } -@@ -3322,7 +3338,21 @@ unsigned long wait_task_inactive(struct +@@ -3316,7 +3332,21 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); -@@ -3352,7 +3382,7 @@ unsigned long wait_task_inactive(struct +@@ -3346,7 +3376,7 @@ unsigned long wait_task_inactive(struct * running right now), it's preempted, and we should * yield - it could be a while. */ diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch index 9cd86c40c4a2..bb74b61e61df 100644 --- a/patches/sched__Add_support_for_lazy_preemption.patch +++ b/patches/sched__Add_support_for_lazy_preemption.patch @@ -106,7 +106,7 @@ Signed-off-by: Thomas Gleixner #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ -@@ -241,6 +261,18 @@ do { \ +@@ -235,6 +255,18 @@ do { \ __preempt_schedule(); \ } while (0) @@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner #else /* !CONFIG_PREEMPTION */ #define preempt_enable() \ do { \ -@@ -248,6 +280,12 @@ do { \ +@@ -242,6 +274,12 @@ do { \ preempt_count_dec(); \ } while (0) @@ -138,8 +138,8 @@ Signed-off-by: Thomas Gleixner #define preempt_enable_notrace() \ do { \ barrier(); \ -@@ -289,6 +327,9 @@ do { \ - #define preempt_check_resched_rt() barrier() +@@ -282,6 +320,9 @@ do { \ + #define preempt_enable_notrace() barrier() #define preemptible() 0 +#define preempt_lazy_disable() barrier() @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner #endif /* CONFIG_PREEMPT_COUNT */ #ifdef MODULE -@@ -307,7 +348,7 @@ do { \ +@@ -300,7 +341,7 @@ do { \ } while (0) #define preempt_fold_need_resched() \ do { \ @@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner set_preempt_need_resched(); \ } while (0) -@@ -423,8 +464,15 @@ extern void migrate_enable(void); +@@ -416,8 +457,15 @@ extern void migrate_enable(void); #else @@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -2023,6 +2023,43 @@ static inline int test_tsk_need_resched( +@@ -2026,6 +2026,43 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1065,6 +1065,46 @@ void resched_curr(struct rq *rq) +@@ -1052,6 +1052,46 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2239,6 +2279,7 @@ void migrate_disable(void) +@@ -2233,6 +2273,7 @@ void migrate_disable(void) preempt_disable(); this_rq()->nr_pinned++; p->migration_disabled = 1; @@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner preempt_enable(); } EXPORT_SYMBOL_GPL(migrate_disable); -@@ -2270,6 +2311,7 @@ void migrate_enable(void) +@@ -2264,6 +2305,7 @@ void migrate_enable(void) barrier(); p->migration_disabled = 0; this_rq()->nr_pinned--; @@ -357,7 +357,7 @@ Signed-off-by: Thomas Gleixner preempt_enable(); } EXPORT_SYMBOL_GPL(migrate_enable); -@@ -4544,6 +4586,9 @@ int sched_fork(unsigned long clone_flags +@@ -4584,6 +4626,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -6381,6 +6426,7 @@ static void __sched notrace __schedule(u +@@ -6421,6 +6466,7 @@ static void __sched notrace __schedule(u next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -375,7 +375,7 @@ Signed-off-by: Thomas Gleixner clear_preempt_need_resched(); #ifdef CONFIG_SCHED_DEBUG rq->last_seen_need_resched_ns = 0; -@@ -6591,6 +6637,30 @@ static void __sched notrace preempt_sche +@@ -6631,6 +6677,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner #ifdef CONFIG_PREEMPTION /* * This is the entry point to schedule() from in-kernel preemption -@@ -6604,6 +6674,8 @@ asmlinkage __visible void __sched notrac +@@ -6644,6 +6714,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -6651,6 +6723,9 @@ asmlinkage __visible void __sched notrac +@@ -6691,6 +6763,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner do { /* * Because the function tracer can trace preempt_count_sub() -@@ -8867,7 +8942,9 @@ void __init init_idle(struct task_struct +@@ -8919,7 +8994,9 @@ void __init init_idle(struct task_struct /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner */ --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4481,7 +4481,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4492,7 +4492,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4505,7 +4505,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4516,7 +4516,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -456,7 +456,7 @@ Signed-off-by: Thomas Gleixner } static void -@@ -4651,7 +4651,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4662,7 +4662,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -465,7 +465,7 @@ Signed-off-by: Thomas Gleixner return; } /* -@@ -4800,7 +4800,7 @@ static void __account_cfs_rq_runtime(str +@@ -4811,7 +4811,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -474,7 +474,7 @@ Signed-off-by: Thomas Gleixner } static __always_inline -@@ -5563,7 +5563,7 @@ static void hrtick_start_fair(struct rq +@@ -5574,7 +5574,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (task_current(rq, p)) @@ -483,7 +483,7 @@ Signed-off-by: Thomas Gleixner return; } hrtick_start(rq, delta); -@@ -7213,7 +7213,7 @@ static void check_preempt_wakeup(struct +@@ -7181,7 +7181,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -11251,7 +11251,7 @@ static void task_fork_fair(struct task_s +@@ -11220,7 +11220,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -501,7 +501,7 @@ Signed-off-by: Thomas Gleixner } se->vruntime -= cfs_rq->min_vruntime; -@@ -11278,7 +11278,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -11247,7 +11247,7 @@ prio_changed_fair(struct rq *rq, struct */ if (task_current(rq, p)) { if (p->prio > oldprio) @@ -524,7 +524,7 @@ Signed-off-by: Thomas Gleixner /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -2302,6 +2302,15 @@ extern void reweight_task(struct task_st +@@ -2310,6 +2310,15 @@ extern void reweight_task(struct task_st extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -542,7 +542,7 @@ Signed-off-by: Thomas Gleixner extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2622,11 +2622,19 @@ unsigned int tracing_gen_ctx_irq_test(un +@@ -2625,11 +2625,19 @@ unsigned int tracing_gen_ctx_irq_test(un if (softirq_count() >> (SOFTIRQ_SHIFT + 1)) trace_flags |= TRACE_FLAG_BH_OFF; @@ -564,7 +564,7 @@ Signed-off-by: Thomas Gleixner (min_t(unsigned int, migration_disable_value(), 0xf)) << 4; } -@@ -4212,15 +4220,17 @@ unsigned long trace_total_entries(struct +@@ -4215,15 +4223,17 @@ unsigned long trace_total_entries(struct static void print_lat_help_header(struct seq_file *m) { @@ -591,7 +591,7 @@ Signed-off-by: Thomas Gleixner } static void print_event_info(struct array_buffer *buf, struct seq_file *m) -@@ -4254,14 +4264,16 @@ static void print_func_help_header_irq(s +@@ -4257,14 +4267,16 @@ static void print_func_help_header_irq(s print_event_info(buf, m); diff --git a/patches/series b/patches/series index f786def9cbd1..bfccae87d12c 100644 --- a/patches/series +++ b/patches/series @@ -3,21 +3,6 @@ ########################################################################### # John's printk queue ########################################################################### -0001-printk-rename-cpulock-functions.patch -0002-printk-cpu-sync-always-disable-interrupts.patch -0003-printk-add-missing-memory-barrier-to-wake_up_klogd.patch -0004-printk-wake-up-all-waiters.patch -0005-printk-wake-waiters-for-safe-and-NMI-contexts.patch -0006-printk-get-caller_id-timestamp-after-migration-disab.patch -0007-printk-call-boot_delay_msec-in-printk_delay.patch -0008-printk-add-con_printk-macro-for-console-details.patch -0009-printk-refactor-and-rework-printing-logic.patch -0010-printk-move-buffer-definitions-into-console_emit_nex.patch -0011-printk-add-pr_flush.patch -0012-printk-add-functions-to-prefer-direct-printing.patch -0013-printk-add-kthread-console-printers.patch -0014-printk-extend-console_lock-for-per-console-locking.patch -0015-printk-remove-console_locked.patch 0016-printk-add-infrastucture-for-atomic-consoles.patch 0017-serial-8250-implement-write_atomic.patch 0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch @@ -26,44 +11,17 @@ # Posted and applied ########################################################################### -# in -tip or traveling to -tip. -lib-irq_poll-Prevent-softirq-pending-leak-in-irq_pol.patch # signal_x86__Delay_calling_signals_in_atomic.patch -# v5, fixed 3/3 of smp: Make softirq handling RT friendly -0001-sched-Fix-missing-prototype-warnings.patch -0002-smp-Rename-flush_smp_call_function_from_idle.patch -0003-smp-Make-softirq-handling-RT-safe-in-flush_smp_call_.patch - ########################################################################### # Posted ########################################################################### -rcu-tasks-Use-rcuwait-for-the-rcu_tasks_kthread.patch -rcu-tasks-Use-schedule_hrtimeout_range-while-waiting.patch blk-mq-Don-t-disable-preemption-around-__blk_mq_run_.patch -mm-vmalloc-Use-raw_cpu_ptr-for-vmap_block_queue-acce.patch -SUNRPC-Don-t-disable-preemption-while-calling-svc_po.patch -0001-scsi-fcoe-Add-a-local_lock-to-fcoe_percpu.patch -0002-scsi-fcoe-Use-per-CPU-API-to-update-per-CPU-statisti.patch -0003-scsi-libfc-Remove-get_cpu-semantics-in-fc_exch_em_al.patch -0004-scsi-bnx2fc-Avoid-using-get_cpu-in-bnx2fc_cmd_alloc.patch -genirq-irq_sim-Make-the-irq_work-always-run-in-hard-.patch genirq-Provide-generic_handle_domain_irq_safe.patch +locking-lockdep-Use-sched_clock-for-random-numbers.patch +mm-slub-Move-the-stackdepot-related-allocation-out-o.patch -# Eric's ptrace, v4 -0001-signal-Rename-send_signal-send_signal_locked.patch -0002-signal-Replace-__group_send_sig_info-with-send_signa.patch -0003-ptrace-um-Replace-PT_DTRACE-with-TIF_SINGLESTEP.patch -0004-ptrace-xtensa-Replace-PT_SINGLESTEP-with-TIF_SINGLES.patch -0005-ptrace-Remove-arch_ptrace_attach.patch -0006-signal-Use-lockdep_assert_held-instead-of-assert_spi.patch -0007-ptrace-Reimplement-PTRACE_KILL-by-always-sending-SIG.patch -0008-ptrace-Document-that-wait_task_inactive-can-t-fail.patch -0009-ptrace-Admit-ptrace_stop-can-generate-spuriuos-SIGTR.patch -0010-ptrace-Don-t-change-__state.patch -0011-ptrace-Always-take-siglock-in-ptrace_resume.patch -0012-sched-signal-ptrace-Rework-TASK_TRACED-TASK_STOPPED-.patch -# Hacks to get it to work. +# Hacks to get ptrace to work. signal__Revert_ptrace_preempt_magic.patch sched-Consider-task_struct-saved_state-in-wait_task_.patch @@ -93,7 +51,6 @@ tpm_tis__fix_stall_after_iowrites.patch drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch generic-softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch -crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch iio-adc-stm32-adc-Use-generic_handle_domain_irq.patch locking-lockdep-Remove-lockdep_init_map_crosslock.patch @@ -126,9 +83,6 @@ arch_arm64__Add_lazy_preempt_support.patch ########################################################################### jump-label__disable_if_stop_machine_is_used.patch ARM__enable_irq_in_translation_section_permission_fault_handlers.patch -arm64-mm-Make-arch_faults_on_old_pte-check-for-migra.patch -arm64-sve-Delay-freeing-memory-in-fpsimd_flush_threa.patch -arm64-sve-Make-kernel-FPU-protection-RT-friendly.patch # arm64-signal-Use-ARCH_RT_DELAYS_SIGNAL_SEND.patch tty_serial_omap__Make_the_locking_RT_aware.patch tty_serial_pl011__Make_the_locking_work_on_RT.patch diff --git a/patches/signal__Revert_ptrace_preempt_magic.patch b/patches/signal__Revert_ptrace_preempt_magic.patch index 8d19e928f270..dbac2c73d5c7 100644 --- a/patches/signal__Revert_ptrace_preempt_magic.patch +++ b/patches/signal__Revert_ptrace_preempt_magic.patch @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner --- --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -2283,16 +2283,8 @@ static int ptrace_stop(int exit_code, in +@@ -2294,16 +2294,8 @@ static int ptrace_stop(int exit_code, in if (gstop_done && (!current->ptrace || ptrace_reparented(current))) do_notify_parent_cldstop(current, false, why); diff --git a/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch b/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch index 4554b698dee9..2dde38b281c9 100644 --- a/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch +++ b/patches/softirq-Disable-softirq-stacks-on-PREEMPT_RT.patch @@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior int arch_show_interrupts(struct seq_file *p, int prec) --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c -@@ -689,6 +689,7 @@ static inline void check_stack_overflow( +@@ -611,6 +611,7 @@ static inline void check_stack_overflow( } } @@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior static __always_inline void call_do_softirq(const void *sp) { /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ -@@ -707,6 +708,7 @@ static __always_inline void call_do_soft +@@ -629,6 +630,7 @@ static __always_inline void call_do_soft "r11", "r12" ); } @@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) { -@@ -820,10 +822,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most +@@ -747,10 +749,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most void *softirq_ctx[NR_CPUS] __read_mostly; void *hardirq_ctx[NR_CPUS] __read_mostly; diff --git a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch index 8baa5030f95f..da3e3471e3cc 100644 --- a/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch +++ b/patches/softirq-Use-a-dedicated-thread-for-timer-wakeups.patch @@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -623,6 +623,22 @@ extern void __raise_softirq_irqoff(unsig +@@ -605,6 +605,22 @@ extern void __raise_softirq_irqoff(unsig extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); @@ -194,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD); --- a/kernel/time/timer.c +++ b/kernel/time/timer.c -@@ -1769,7 +1769,7 @@ static void run_local_timers(void) +@@ -1822,7 +1822,7 @@ static void run_local_timers(void) if (time_before(jiffies, base->next_expiry)) return; } diff --git a/patches/tick-Fix-timer-storm-since-introduction-of-timersd.patch b/patches/tick-Fix-timer-storm-since-introduction-of-timersd.patch index 3febe509a913..cc4520e9d754 100644 --- a/patches/tick-Fix-timer-storm-since-introduction-of-timersd.patch +++ b/patches/tick-Fix-timer-storm-since-introduction-of-timersd.patch @@ -47,7 +47,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h -@@ -625,9 +625,16 @@ extern void raise_softirq(unsigned int n +@@ -607,9 +607,16 @@ extern void raise_softirq(unsigned int n #ifdef CONFIG_PREEMPT_RT DECLARE_PER_CPU(struct task_struct *, timersd); @@ -64,7 +64,7 @@ Link: https://lkml.kernel.org/r/20220405010752.1347437-2-frederic@kernel.org #else static inline void raise_timer_softirq(void) { -@@ -638,6 +645,11 @@ static inline void raise_hrtimer_softirq +@@ -620,6 +627,11 @@ static inline void raise_hrtimer_softirq { raise_softirq_irqoff(HRTIMER_SOFTIRQ); } diff --git a/patches/tpm_tis__fix_stall_after_iowrites.patch b/patches/tpm_tis__fix_stall_after_iowrites.patch index 2725f2b0164b..d90a209e0391 100644 --- a/patches/tpm_tis__fix_stall_after_iowrites.patch +++ b/patches/tpm_tis__fix_stall_after_iowrites.patch @@ -62,21 +62,18 @@ Signed-off-by: Thomas Gleixner static int interrupts = -1; module_param(interrupts, int, 0444); MODULE_PARM_DESC(interrupts, "Enable interrupts"); -@@ -169,7 +194,7 @@ static int tpm_tcg_write_bytes(struct tp - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); +@@ -185,12 +210,12 @@ static int tpm_tcg_write_bytes(struct tp + switch (io_mode) { + case TPM_TIS_PHYS_8: + while (len--) +- iowrite8(*value++, phy->iobase + addr); ++ tpm_tis_iowrite8(*value++, phy->iobase, addr); + break; + case TPM_TIS_PHYS_16: + return -EINVAL; + case TPM_TIS_PHYS_32: +- iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase + addr); ++ tpm_tis_iowrite32(le32_to_cpu(*((__le32 *)value)), phy->iobase, addr); + break; + } - while (len--) -- iowrite8(*value++, phy->iobase + addr); -+ tpm_tis_iowrite8(*value++, phy->iobase, addr); - - return 0; - } -@@ -196,7 +221,7 @@ static int tpm_tcg_write32(struct tpm_ti - { - struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); - -- iowrite32(value, phy->iobase + addr); -+ tpm_tis_iowrite32(value, phy->iobase, addr); - - return 0; - } diff --git a/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch b/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch index f3fbad78d715..02be16f32a42 100644 --- a/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch +++ b/patches/tty_serial_pl011__Make_the_locking_work_on_RT.patch @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner --- --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c -@@ -2275,18 +2275,24 @@ pl011_console_write(struct console *co, +@@ -2289,18 +2289,24 @@ pl011_console_write(struct console *co, { struct uart_amba_port *uap = amba_ports[co->index]; unsigned int old_cr = 0, new_cr; @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner /* * First save the CR then disable the interrupts -@@ -2312,8 +2318,7 @@ pl011_console_write(struct console *co, +@@ -2326,8 +2332,7 @@ pl011_console_write(struct console *co, pl011_write(old_cr, uap, REG_CR); if (locked) diff --git a/patches/x86__Support_for_lazy_preemption.patch b/patches/x86__Support_for_lazy_preemption.patch index 88944aef4263..bbfbc0248f01 100644 --- a/patches/x86__Support_for_lazy_preemption.patch +++ b/patches/x86__Support_for_lazy_preemption.patch @@ -19,12 +19,12 @@ Signed-off-by: Thomas Gleixner --- --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -240,6 +240,7 @@ config X86 +@@ -245,6 +245,7 @@ config X86 select HAVE_PCI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY - select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT + select MMU_GATHER_RCU_TABLE_FREE if PARAVIRT select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_REGS_AND_STACK_ACCESS_API --- a/arch/x86/include/asm/preempt.h @@ -98,23 +98,23 @@ Signed-off-by: Thomas Gleixner } #else /* !__ASSEMBLY__ */ -@@ -93,6 +96,7 @@ struct thread_info { +@@ -92,6 +95,7 @@ struct thread_info { + #define TIF_NOCPUID 15 /* CPUID is not accessible in userland */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ #define TIF_NOTIFY_SIGNAL 17 /* signal notifications exist */ - #define TIF_SLD 18 /* Restore split lock detection on context switch */ +#define TIF_NEED_RESCHED_LAZY 19 /* lazy rescheduling necessary */ #define TIF_MEMDIE 20 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 21 /* idle is polling for TIF_NEED_RESCHED */ #define TIF_IO_BITMAP 22 /* uses I/O bitmap */ -@@ -117,6 +121,7 @@ struct thread_info { +@@ -115,6 +119,7 @@ struct thread_info { + #define _TIF_NOCPUID (1 << TIF_NOCPUID) #define _TIF_NOTSC (1 << TIF_NOTSC) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) - #define _TIF_SLD (1 << TIF_SLD) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) #define _TIF_IO_BITMAP (1 << TIF_IO_BITMAP) #define _TIF_SPEC_FORCE_UPDATE (1 << TIF_SPEC_FORCE_UPDATE) -@@ -148,6 +153,8 @@ struct thread_info { +@@ -146,6 +151,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) -- cgit v1.2.1