summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-01-21 19:01:55 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-01-21 19:01:55 +0100
commitb06657798e2003def567a08e959e461019f095bd (patch)
treee6c029ef7b02a7271ccb08dc65fabea4d4eb08ac
parentf1173ecb74fdbfc9b81d7758dd4792801689c9bc (diff)
downloadlinux-rt-b06657798e2003def567a08e959e461019f095bd.tar.gz
[ANNOUNCE] v5.16.2-rt19v5.16.2-rt19-patches
Dear RT folks! I'm pleased to announce the v5.16.2-rt19 patch set. Changes since v5.16.2-rt18: - Update the printk series. Patches by John Ogness. - Redo the i2c patches introduced in the previous release to address other driver which look also broken. Known issues - netconsole triggers WARN. - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com The delta patch against v5.16.2-rt18 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/incr/patch-5.16.2-rt18-rt19.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.16.2-rt19 The RT patch against v5.16.2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/older/patch-5.16.2-rt19.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.16/older/patches-5.16.2-rt19.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-printk-rename-cpulock-functions.patch276
-rw-r--r--patches/0002-printk-cpu-sync-always-disable-interrupts.patch49
-rw-r--r--patches/0003-printk-use-percpu-flag-instead-of-cpu_online.patch50
-rw-r--r--patches/0004-printk-get-caller_id-timestamp-after-migration-disab.patch62
-rw-r--r--patches/0005-printk-refactor-and-rework-printing-logic.patch536
-rw-r--r--patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch177
-rw-r--r--patches/0007-printk-add-pr_flush.patch159
-rw-r--r--patches/0008-printk-add-kthread-console-printers.patch283
-rw-r--r--patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch431
-rw-r--r--patches/0010-printk-remove-console_locked.patch93
-rw-r--r--patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch54
-rw-r--r--patches/0012-printk-add-infrastucture-for-atomic-consoles.patch599
-rw-r--r--patches/0013-serial-8250-implement-write_atomic.patch (renamed from patches/serial__8250__implement_write_atomic.patch)62
-rw-r--r--patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch103
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch131
-rw-r--r--patches/console__add_write_atomic_interface.patch314
-rw-r--r--patches/genirq-Provide-generic_handle_irq_safe.patch54
-rw-r--r--patches/i2c-core-Let-i2c_handle_smbus_host_notify-use-handle.patch40
-rw-r--r--patches/i2c-rcar-Allow-interrupt-handler-to-be-threaded.patch49
-rw-r--r--patches/kdb__only_use_atomic_consoles_for_output_mirroring.patch53
-rw-r--r--patches/panic_remove_oops_id.patch4
-rw-r--r--patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch40
-rw-r--r--patches/printk__add_console_handover.patch80
-rw-r--r--patches/printk__add_pr_flush.patch215
-rw-r--r--patches/printk__call_boot_delay_msec_in_printk_delay.patch41
-rw-r--r--patches/printk__introduce_kernel_sync_mode.patch374
-rw-r--r--patches/printk__move_console_printing_to_kthreads.patch987
-rw-r--r--patches/printk__relocate_printk_delay.patch61
-rw-r--r--patches/printk__remove_deferred_printing.patch823
-rw-r--r--patches/printk__rename_printk_cpulock_API_and_always_disable_interrupts.patch117
-rw-r--r--patches/printk__use_seqcount_latch_for_console_seq.patch186
-rw-r--r--patches/series31
33 files changed, 3111 insertions, 3425 deletions
diff --git a/patches/0001-printk-rename-cpulock-functions.patch b/patches/0001-printk-rename-cpulock-functions.patch
new file mode 100644
index 000000000000..0a276401a706
--- /dev/null
+++ b/patches/0001-printk-rename-cpulock-functions.patch
@@ -0,0 +1,276 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 28 Sep 2021 11:27:02 +0206
+Subject: [PATCH 01/14] printk: rename cpulock functions
+
+Since the printk cpulock is CPU-reentrant and since it is used
+in all contexts, its usage must be carefully considered and
+most likely will require programming locklessly. To avoid
+mistaking the printk cpulock as a typical lock, rename it to
+cpu_sync. The main functions then become:
+
+ printk_cpu_sync_get_irqsave(flags);
+ printk_cpu_sync_put_irqrestore(flags);
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 42 ++++++++++++++--------------
+ kernel/printk/printk.c | 71 ++++++++++++++++++++++++-------------------------
+ lib/dump_stack.c | 4 +-
+ lib/nmi_backtrace.c | 4 +-
+ 4 files changed, 61 insertions(+), 60 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -281,43 +281,43 @@ static inline void printk_trigger_flush(
+ #endif
+
+ #ifdef CONFIG_SMP
+-extern int __printk_cpu_trylock(void);
+-extern void __printk_wait_on_cpu_lock(void);
+-extern void __printk_cpu_unlock(void);
++extern int __printk_cpu_sync_try_get(void);
++extern void __printk_cpu_sync_wait(void);
++extern void __printk_cpu_sync_put(void);
+
+ /**
+- * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
+- * lock and disable interrupts.
++ * printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning
++ * lock and disable interrupts.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+- * to be passed to printk_cpu_unlock_irqrestore().
++ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+ * If the lock is owned by another CPU, spin until it becomes available.
+ * Interrupts are restored while spinning.
+ */
+-#define printk_cpu_lock_irqsave(flags) \
+- for (;;) { \
+- local_irq_save(flags); \
+- if (__printk_cpu_trylock()) \
+- break; \
+- local_irq_restore(flags); \
+- __printk_wait_on_cpu_lock(); \
++#define printk_cpu_sync_get_irqsave(flags) \
++ for (;;) { \
++ local_irq_save(flags); \
++ if (__printk_cpu_sync_try_get()) \
++ break; \
++ local_irq_restore(flags); \
++ __printk_cpu_sync_wait(); \
+ }
+
+ /**
+- * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
+- * lock and restore interrupts.
+- * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
++ * printk_cpu_sync_put_irqrestore() - Release the printk cpu-reentrant spinning
++ * lock and restore interrupts.
++ * @flags: Caller's saved interrupt state, from printk_cpu_sync_get_irqsave().
+ */
+-#define printk_cpu_unlock_irqrestore(flags) \
++#define printk_cpu_sync_put_irqrestore(flags) \
+ do { \
+- __printk_cpu_unlock(); \
++ __printk_cpu_sync_put(); \
+ local_irq_restore(flags); \
+- } while (0) \
++ } while (0)
+
+ #else
+
+-#define printk_cpu_lock_irqsave(flags) ((void)flags)
+-#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
++#define printk_cpu_sync_get_irqsave(flags) ((void)flags)
++#define printk_cpu_sync_put_irqrestore(flags) ((void)flags)
+
+ #endif /* CONFIG_SMP */
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -3586,26 +3586,26 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
+ #endif
+
+ #ifdef CONFIG_SMP
+-static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
+-static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
++static atomic_t printk_cpu_sync_owner = ATOMIC_INIT(-1);
++static atomic_t printk_cpu_sync_nested = ATOMIC_INIT(0);
+
+ /**
+- * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
+- * spinning lock is not owned by any CPU.
++ * __printk_cpu_sync_wait() - Busy wait until the printk cpu-reentrant
++ * spinning lock is not owned by any CPU.
+ *
+ * Context: Any context.
+ */
+-void __printk_wait_on_cpu_lock(void)
++void __printk_cpu_sync_wait(void)
+ {
+ do {
+ cpu_relax();
+- } while (atomic_read(&printk_cpulock_owner) != -1);
++ } while (atomic_read(&printk_cpu_sync_owner) != -1);
+ }
+-EXPORT_SYMBOL(__printk_wait_on_cpu_lock);
++EXPORT_SYMBOL(__printk_cpu_sync_wait);
+
+ /**
+- * __printk_cpu_trylock() - Try to acquire the printk cpu-reentrant
+- * spinning lock.
++ * __printk_cpu_sync_try_get() - Try to acquire the printk cpu-reentrant
++ * spinning lock.
+ *
+ * If no processor has the lock, the calling processor takes the lock and
+ * becomes the owner. If the calling processor is already the owner of the
+@@ -3614,7 +3614,7 @@ EXPORT_SYMBOL(__printk_wait_on_cpu_lock)
+ * Context: Any context. Expects interrupts to be disabled.
+ * Return: 1 on success, otherwise 0.
+ */
+-int __printk_cpu_trylock(void)
++int __printk_cpu_sync_try_get(void)
+ {
+ int cpu;
+ int old;
+@@ -3624,79 +3624,80 @@ int __printk_cpu_trylock(void)
+ /*
+ * Guarantee loads and stores from this CPU when it is the lock owner
+ * are _not_ visible to the previous lock owner. This pairs with
+- * __printk_cpu_unlock:B.
++ * __printk_cpu_sync_put:B.
+ *
+ * Memory barrier involvement:
+ *
+- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B, then
+- * __printk_cpu_unlock:A can never read from __printk_cpu_trylock:B.
++ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
++ * then __printk_cpu_sync_put:A can never read from
++ * __printk_cpu_sync_try_get:B.
+ *
+ * Relies on:
+ *
+- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
++ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
+ * of the previous CPU
+ * matching
+- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+- * of this CPU
++ * ACQUIRE from __printk_cpu_sync_try_get:A to
++ * __printk_cpu_sync_try_get:B of this CPU
+ */
+- old = atomic_cmpxchg_acquire(&printk_cpulock_owner, -1,
+- cpu); /* LMM(__printk_cpu_trylock:A) */
++ old = atomic_cmpxchg_acquire(&printk_cpu_sync_owner, -1,
++ cpu); /* LMM(__printk_cpu_sync_try_get:A) */
+ if (old == -1) {
+ /*
+ * This CPU is now the owner and begins loading/storing
+- * data: LMM(__printk_cpu_trylock:B)
++ * data: LMM(__printk_cpu_sync_try_get:B)
+ */
+ return 1;
+
+ } else if (old == cpu) {
+ /* This CPU is already the owner. */
+- atomic_inc(&printk_cpulock_nested);
++ atomic_inc(&printk_cpu_sync_nested);
+ return 1;
+ }
+
+ return 0;
+ }
+-EXPORT_SYMBOL(__printk_cpu_trylock);
++EXPORT_SYMBOL(__printk_cpu_sync_try_get);
+
+ /**
+- * __printk_cpu_unlock() - Release the printk cpu-reentrant spinning lock.
++ * __printk_cpu_sync_put() - Release the printk cpu-reentrant spinning lock.
+ *
+ * The calling processor must be the owner of the lock.
+ *
+ * Context: Any context. Expects interrupts to be disabled.
+ */
+-void __printk_cpu_unlock(void)
++void __printk_cpu_sync_put(void)
+ {
+- if (atomic_read(&printk_cpulock_nested)) {
+- atomic_dec(&printk_cpulock_nested);
++ if (atomic_read(&printk_cpu_sync_nested)) {
++ atomic_dec(&printk_cpu_sync_nested);
+ return;
+ }
+
+ /*
+ * This CPU is finished loading/storing data:
+- * LMM(__printk_cpu_unlock:A)
++ * LMM(__printk_cpu_sync_put:A)
+ */
+
+ /*
+ * Guarantee loads and stores from this CPU when it was the
+ * lock owner are visible to the next lock owner. This pairs
+- * with __printk_cpu_trylock:A.
++ * with __printk_cpu_sync_try_get:A.
+ *
+ * Memory barrier involvement:
+ *
+- * If __printk_cpu_trylock:A reads from __printk_cpu_unlock:B,
+- * then __printk_cpu_trylock:B reads from __printk_cpu_unlock:A.
++ * If __printk_cpu_sync_try_get:A reads from __printk_cpu_sync_put:B,
++ * then __printk_cpu_sync_try_get:B reads from __printk_cpu_sync_put:A.
+ *
+ * Relies on:
+ *
+- * RELEASE from __printk_cpu_unlock:A to __printk_cpu_unlock:B
++ * RELEASE from __printk_cpu_sync_put:A to __printk_cpu_sync_put:B
+ * of this CPU
+ * matching
+- * ACQUIRE from __printk_cpu_trylock:A to __printk_cpu_trylock:B
+- * of the next CPU
++ * ACQUIRE from __printk_cpu_sync_try_get:A to
++ * __printk_cpu_sync_try_get:B of the next CPU
+ */
+- atomic_set_release(&printk_cpulock_owner,
+- -1); /* LMM(__printk_cpu_unlock:B) */
++ atomic_set_release(&printk_cpu_sync_owner,
++ -1); /* LMM(__printk_cpu_sync_put:B) */
+ }
+-EXPORT_SYMBOL(__printk_cpu_unlock);
++EXPORT_SYMBOL(__printk_cpu_sync_put);
+ #endif /* CONFIG_SMP */
+--- a/lib/dump_stack.c
++++ b/lib/dump_stack.c
+@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl
+ * Permit this cpu to perform nested stack dumps while serialising
+ * against other CPUs
+ */
+- printk_cpu_lock_irqsave(flags);
++ printk_cpu_sync_get_irqsave(flags);
+ __dump_stack(log_lvl);
+- printk_cpu_unlock_irqrestore(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+ }
+ EXPORT_SYMBOL(dump_stack_lvl);
+
+--- a/lib/nmi_backtrace.c
++++ b/lib/nmi_backtrace.c
+@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r
+ * Allow nested NMI backtraces while serializing
+ * against other CPUs.
+ */
+- printk_cpu_lock_irqsave(flags);
++ printk_cpu_sync_get_irqsave(flags);
+ if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
+ pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
+ cpu, (void *)instruction_pointer(regs));
+@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r
+ else
+ dump_stack();
+ }
+- printk_cpu_unlock_irqrestore(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+ cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
+ return true;
+ }
diff --git a/patches/0002-printk-cpu-sync-always-disable-interrupts.patch b/patches/0002-printk-cpu-sync-always-disable-interrupts.patch
new file mode 100644
index 000000000000..0ddf311ee46a
--- /dev/null
+++ b/patches/0002-printk-cpu-sync-always-disable-interrupts.patch
@@ -0,0 +1,49 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 3 Aug 2021 13:00:00 +0206
+Subject: [PATCH 02/14] printk: cpu sync always disable interrupts
+
+The CPU sync functions are a NOP for !CONFIG_SMP. But for
+!CONFIG_SMP they still need to disable interrupts in order to
+preserve context within the CPU sync sections.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -285,9 +285,16 @@ extern int __printk_cpu_sync_try_get(voi
+ extern void __printk_cpu_sync_wait(void);
+ extern void __printk_cpu_sync_put(void);
+
++#else
++
++#define __printk_cpu_sync_try_get() true
++#define __printk_cpu_sync_wait()
++#define __printk_cpu_sync_put()
++#endif /* CONFIG_SMP */
++
+ /**
+- * printk_cpu_sync_get_irqsave() - Acquire the printk cpu-reentrant spinning
+- * lock and disable interrupts.
++ * printk_cpu_sync_get_irqsave() - Disable interrupts and acquire the printk
++ * cpu-reentrant spinning lock.
+ * @flags: Stack-allocated storage for saving local interrupt state,
+ * to be passed to printk_cpu_sync_put_irqrestore().
+ *
+@@ -314,13 +321,6 @@ extern void __printk_cpu_sync_put(void);
+ local_irq_restore(flags); \
+ } while (0)
+
+-#else
+-
+-#define printk_cpu_sync_get_irqsave(flags) ((void)flags)
+-#define printk_cpu_sync_put_irqrestore(flags) ((void)flags)
+-
+-#endif /* CONFIG_SMP */
+-
+ extern int kptr_restrict;
+
+ /**
diff --git a/patches/0003-printk-use-percpu-flag-instead-of-cpu_online.patch b/patches/0003-printk-use-percpu-flag-instead-of-cpu_online.patch
new file mode 100644
index 000000000000..98c318bca765
--- /dev/null
+++ b/patches/0003-printk-use-percpu-flag-instead-of-cpu_online.patch
@@ -0,0 +1,50 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 10 Nov 2021 17:19:25 +0106
+Subject: [PATCH 03/14] printk: use percpu flag instead of cpu_online()
+
+The CON_ANYTIME console flag is used to label consoles that will
+work correctly before percpu resources are allocated. To check
+the condition, cpu_online(raw_smp_processor_id()) was used.
+However, this is odd because CPUs can go offline at a later point.
+Also, the function is forced to use the raw_ variant because
+migration is not disabled.
+
+Since commit ab6f762f0f53 ("printk: queue wake_up_klogd irq_work
+only if per-CPU areas are ready") there is a variable to identify
+if percpu resources have been allocated. Use that variable instead
+of cpu_online(raw_smp_processor_id()).
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2 +-
+ kernel/printk/printk.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -133,7 +133,7 @@ static inline int con_debug_leave(void)
+ #define CON_CONSDEV (2) /* Preferred console, /dev/console */
+ #define CON_ENABLED (4)
+ #define CON_BOOT (8)
+-#define CON_ANYTIME (16) /* Safe to call when cpu is offline */
++#define CON_ANYTIME (16) /* Safe to call before per-cpu resources ready */
+ #define CON_BRL (32) /* Used for a braille device */
+ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2578,11 +2578,11 @@ static int have_callable_console(void)
+ *
+ * Console drivers may assume that per-cpu resources have been allocated. So
+ * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
+- * call them until this CPU is officially up.
++ * call them until per-cpu resources have been allocated.
+ */
+ static inline int can_use_console(void)
+ {
+- return cpu_online(raw_smp_processor_id()) || have_callable_console();
++ return (printk_percpu_data_ready() || have_callable_console());
+ }
+
+ /**
diff --git a/patches/0004-printk-get-caller_id-timestamp-after-migration-disab.patch b/patches/0004-printk-get-caller_id-timestamp-after-migration-disab.patch
new file mode 100644
index 000000000000..2e9db39cab76
--- /dev/null
+++ b/patches/0004-printk-get-caller_id-timestamp-after-migration-disab.patch
@@ -0,0 +1,62 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 10 Nov 2021 17:26:21 +0106
+Subject: [PATCH 04/14] printk: get caller_id/timestamp after migration disable
+
+Currently the local CPU timestamp and caller_id for the record are
+collected while migration is enabled. Since this information is
+CPU-specific, it should be collected with migration disabled.
+
+Migration is disabled immediately after collecting this information
+anyway, so just move the information collection to after the
+migration disabling.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2018,7 +2018,7 @@ static inline void printk_delay(void)
+ static inline u32 printk_caller_id(void)
+ {
+ return in_task() ? task_pid_nr(current) :
+- 0x80000000 + raw_smp_processor_id();
++ 0x80000000 + smp_processor_id();
+ }
+
+ /**
+@@ -2100,7 +2100,6 @@ int vprintk_store(int facility, int leve
+ const struct dev_printk_info *dev_info,
+ const char *fmt, va_list args)
+ {
+- const u32 caller_id = printk_caller_id();
+ struct prb_reserved_entry e;
+ enum printk_info_flags flags = 0;
+ struct printk_record r;
+@@ -2110,10 +2109,14 @@ int vprintk_store(int facility, int leve
+ u8 *recursion_ptr;
+ u16 reserve_size;
+ va_list args2;
++ u32 caller_id;
+ u16 text_len;
+ int ret = 0;
+ u64 ts_nsec;
+
++ if (!printk_enter_irqsave(recursion_ptr, irqflags))
++ return 0;
++
+ /*
+ * Since the duration of printk() can vary depending on the message
+ * and state of the ringbuffer, grab the timestamp now so that it is
+@@ -2122,8 +2125,7 @@ int vprintk_store(int facility, int leve
+ */
+ ts_nsec = local_clock();
+
+- if (!printk_enter_irqsave(recursion_ptr, irqflags))
+- return 0;
++ caller_id = printk_caller_id();
+
+ /*
+ * The sprintf needs to come first since the syslog prefix might be
diff --git a/patches/0005-printk-refactor-and-rework-printing-logic.patch b/patches/0005-printk-refactor-and-rework-printing-logic.patch
new file mode 100644
index 000000000000..97e3fbd0477c
--- /dev/null
+++ b/patches/0005-printk-refactor-and-rework-printing-logic.patch
@@ -0,0 +1,536 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Tue, 10 Aug 2021 16:32:52 +0206
+Subject: [PATCH 05/14] printk: refactor and rework printing logic
+
+Refactor/rework printing logic in order to prepare for moving to threaded
+console printing.
+
+- Move @console_seq into struct console so that the current "position" of
+ each console can be tracked individually.
+
+- Move @console_dropped into struct console so that the current drop count
+ of each console can be tracked individually.
+
+- Modify printing logic so that each console independently loads, prepares,
+ and prints its next record.
+
+- Remove exclusive_console logic. Since console positions are handled
+ independently, replaying past records occurs naturally.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2
+ kernel/printk/printk.c | 380 ++++++++++++++++++++++++------------------------
+ 2 files changed, 194 insertions(+), 188 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -151,6 +151,8 @@ struct console {
+ int cflag;
+ uint ispeed;
+ uint ospeed;
++ u64 seq;
++ unsigned long dropped;
+ void *data;
+ struct console *next;
+ };
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -267,11 +267,6 @@ static void __up_console_sem(unsigned lo
+ static int console_locked, console_suspended;
+
+ /*
+- * If exclusive_console is non-NULL then only this console is to be printed to.
+- */
+-static struct console *exclusive_console;
+-
+-/*
+ * Array of consoles built from command line options (console=)
+ */
+
+@@ -361,12 +356,6 @@ static u64 syslog_seq;
+ static size_t syslog_partial;
+ static bool syslog_time;
+
+-/* All 3 protected by @console_sem. */
+-/* the next printk record to write to the console */
+-static u64 console_seq;
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
+-
+ struct latched_seq {
+ seqcount_latch_t latch;
+ u64 val[2];
+@@ -1888,47 +1877,26 @@ static int console_trylock_spinning(void
+ }
+
+ /*
+- * Call the console drivers, asking them to write out
+- * log_buf[start] to log_buf[end - 1].
+- * The console_lock must be held.
++ * Call the specified console driver, asking it to write out the specified
++ * text and length. For non-extended consoles, if any records have been
++ * dropped, a dropped message will be written out first.
+ */
+-static void call_console_drivers(const char *ext_text, size_t ext_len,
+- const char *text, size_t len)
++static void call_console_driver(struct console *con, const char *text, size_t len)
+ {
+ static char dropped_text[64];
+- size_t dropped_len = 0;
+- struct console *con;
++ size_t dropped_len;
+
+ trace_console_rcuidle(text, len);
+
+- if (!console_drivers)
+- return;
+-
+- if (console_dropped) {
++ if (con->dropped && !(con->flags & CON_EXTENDED)) {
+ dropped_len = snprintf(dropped_text, sizeof(dropped_text),
+ "** %lu printk messages dropped **\n",
+- console_dropped);
+- console_dropped = 0;
++ con->dropped);
++ con->dropped = 0;
++ con->write(con, dropped_text, dropped_len);
+ }
+
+- for_each_console(con) {
+- if (exclusive_console && con != exclusive_console)
+- continue;
+- if (!(con->flags & CON_ENABLED))
+- continue;
+- if (!con->write)
+- continue;
+- if (!cpu_online(smp_processor_id()) &&
+- !(con->flags & CON_ANYTIME))
+- continue;
+- if (con->flags & CON_EXTENDED)
+- con->write(con, ext_text, ext_len);
+- else {
+- if (dropped_len)
+- con->write(con, dropped_text, dropped_len);
+- con->write(con, text, len);
+- }
+- }
++ con->write(con, text, len);
+ }
+
+ /*
+@@ -2225,9 +2193,6 @@ asmlinkage int vprintk_emit(int facility
+ in_sched = true;
+ }
+
+- boot_delay_msec(level);
+- printk_delay();
+-
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+ /* If called from the scheduler, we can not call up(). */
+@@ -2279,11 +2244,9 @@ EXPORT_SYMBOL(_printk);
+
+ #define prb_read_valid(rb, seq, r) false
+ #define prb_first_valid_seq(rb) 0
++#define prb_next_seq(rb) 0
+
+ static u64 syslog_seq;
+-static u64 console_seq;
+-static u64 exclusive_console_stop_seq;
+-static unsigned long console_dropped;
+
+ static size_t record_print_text(const struct printk_record *r,
+ bool syslog, bool time)
+@@ -2300,8 +2263,7 @@ static ssize_t msg_print_ext_body(char *
+ struct dev_printk_info *dev_info) { return 0; }
+ static void console_lock_spinning_enable(void) { }
+ static int console_lock_spinning_disable_and_check(void) { return 0; }
+-static void call_console_drivers(const char *ext_text, size_t ext_len,
+- const char *text, size_t len) {}
++static void call_console_driver(struct console *con, const char *text, size_t len) {}
+ static bool suppress_message_printing(int level) { return false; }
+
+ #endif /* CONFIG_PRINTK */
+@@ -2560,31 +2522,166 @@ int is_console_locked(void)
+ EXPORT_SYMBOL(is_console_locked);
+
+ /*
+- * Check if we have any console that is capable of printing while cpu is
+- * booting or shutting down. Requires console_sem.
++ * Check if the given console is currently capable and allowed to print
++ * records.
++ *
++ * Requires the console_lock.
+ */
+-static int have_callable_console(void)
++static inline bool console_is_usable(struct console *con)
+ {
+- struct console *con;
++ if (!(con->flags & CON_ENABLED))
++ return false;
+
+- for_each_console(con)
+- if ((con->flags & CON_ENABLED) &&
+- (con->flags & CON_ANYTIME))
+- return 1;
++ if (!con->write)
++ return false;
+
+- return 0;
++ /*
++ * Console drivers may assume that per-cpu resources have been
++ * allocated. So unless they're explicitly marked as being able to
++ * cope (CON_ANYTIME) don't call them until per-cpu resources have
++ * been allocated.
++ */
++ if (!printk_percpu_data_ready() &&
++ !(con->flags & CON_ANYTIME))
++ return false;
++
++ return true;
++}
++
++static void __console_unlock(void)
++{
++ console_locked = 0;
++ up_console_sem();
++}
++
++/*
++ * Print one record for the given console. The record printed is whatever
++ * record is the next available record for the given console.
++ *
++ * Requires the console_lock.
++ *
++ * Returns false if the given console has no next record to print, otherwise
++ * true.
++ *
++ * @handover will be set to true if a printk waiter has taken over the
++ * console_lock, in which case the caller is no longer holding the
++ * console_lock.
++ */
++static bool console_emit_next_record(struct console *con, bool *handover)
++{
++ static char ext_text[CONSOLE_EXT_LOG_MAX];
++ static char text[CONSOLE_LOG_MAX];
++ struct printk_info info;
++ struct printk_record r;
++ unsigned long flags;
++ char *write_text;
++ size_t len;
++
++ prb_rec_init_rd(&r, &info, text, sizeof(text));
++
++ if (!prb_read_valid(prb, con->seq, &r))
++ return false;
++
++ if (con->seq != r.info->seq) {
++ con->dropped += r.info->seq - con->seq;
++ con->seq = r.info->seq;
++ }
++
++ /* Skip record that has level above the console loglevel. */
++ if (suppress_message_printing(r.info->level)) {
++ con->seq++;
++ goto skip;
++ }
++
++ if (con->flags & CON_EXTENDED) {
++ write_text = &ext_text[0];
++ len = info_print_ext_header(ext_text, sizeof(ext_text), r.info);
++ len += msg_print_ext_body(ext_text + len, sizeof(ext_text) - len,
++ &r.text_buf[0], r.info->text_len, &r.info->dev_info);
++ } else {
++ write_text = &text[0];
++ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
++ }
++
++ /*
++ * While actively printing out messages, if another printk()
++ * were to occur on another CPU, it may wait for this one to
++ * finish. This task can not be preempted if there is a
++ * waiter waiting to take over.
++ *
++ * Interrupts are disabled because the hand over to a waiter
++ * must not be interrupted until the hand over is completed
++ * (@console_waiter is cleared).
++ */
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
++
++ stop_critical_timings(); /* don't trace print latency */
++ call_console_driver(con, write_text, len);
++ start_critical_timings();
++
++ con->seq++;
++
++ *handover = console_lock_spinning_disable_and_check();
++ printk_safe_exit_irqrestore(flags);
++
++ boot_delay_msec(r.info->level);
++ printk_delay();
++skip:
++ return true;
+ }
+
+ /*
+- * Can we actually use the console at this time on this cpu?
++ * Print out all remaining records to all consoles.
++ *
++ * Requires the console_lock.
++ *
++ * Returns true if a console was available for flushing, otherwise false.
+ *
+- * Console drivers may assume that per-cpu resources have been allocated. So
+- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
+- * call them until per-cpu resources have been allocated.
++ * @next_seq is set to the highest sequence number of all of the consoles that
++ * were flushed.
++ *
++ * @handover will be set to true if a printk waiter has taken over the
++ * console_lock, in which case the caller is no longer holding the
++ * console_lock.
+ */
+-static inline int can_use_console(void)
++static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
+ {
+- return (printk_percpu_data_ready() || have_callable_console());
++ bool any_usable = false;
++ struct console *con;
++ bool any_progress;
++
++ *next_seq = 0;
++ *handover = false;
++
++ do {
++ any_progress = false;
++
++ for_each_console(con) {
++ bool progress;
++
++ if (!console_is_usable(con))
++ continue;
++ any_usable = true;
++
++ progress = console_emit_next_record(con, handover);
++ if (*handover)
++ return true;
++
++ /* Track the highest seq flushed. */
++ if (con->seq > *next_seq)
++ *next_seq = con->seq;
++
++ if (!progress)
++ continue;
++ any_progress = true;
++
++ if (do_cond_resched)
++ cond_resched();
++ }
++ } while (any_progress);
++
++ return any_usable;
+ }
+
+ /**
+@@ -2603,21 +2700,16 @@ static inline int can_use_console(void)
+ */
+ void console_unlock(void)
+ {
+- static char ext_text[CONSOLE_EXT_LOG_MAX];
+- static char text[CONSOLE_LOG_MAX];
+- unsigned long flags;
+- bool do_cond_resched, retry;
+- struct printk_info info;
+- struct printk_record r;
+- u64 __maybe_unused next_seq;
++ bool do_cond_resched;
++ bool handover;
++ bool flushed;
++ u64 next_seq;
+
+ if (console_suspended) {
+ up_console_sem();
+ return;
+ }
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text));
+-
+ /*
+ * Console drivers are called with interrupts disabled, so
+ * @console_may_schedule should be cleared before; however, we may
+@@ -2633,110 +2725,27 @@ void console_unlock(void)
+ * and cleared after the "again" goto label.
+ */
+ do_cond_resched = console_may_schedule;
+-again:
+- console_may_schedule = 0;
+-
+- /*
+- * We released the console_sem lock, so we need to recheck if
+- * cpu is online and (if not) is there at least one CON_ANYTIME
+- * console.
+- */
+- if (!can_use_console()) {
+- console_locked = 0;
+- up_console_sem();
+- return;
+- }
+
+- for (;;) {
+- size_t ext_len = 0;
+- int handover;
+- size_t len;
++ do {
++ console_may_schedule = 0;
+
+-skip:
+- if (!prb_read_valid(prb, console_seq, &r))
++ flushed = console_flush_all(do_cond_resched, &next_seq, &handover);
++ if (handover)
+ break;
+
+- if (console_seq != r.info->seq) {
+- console_dropped += r.info->seq - console_seq;
+- console_seq = r.info->seq;
+- }
+-
+- if (suppress_message_printing(r.info->level)) {
+- /*
+- * Skip record we have buffered and already printed
+- * directly to the console when we received it, and
+- * record that has level above the console loglevel.
+- */
+- console_seq++;
+- goto skip;
+- }
+-
+- /* Output to all consoles once old messages replayed. */
+- if (unlikely(exclusive_console &&
+- console_seq >= exclusive_console_stop_seq)) {
+- exclusive_console = NULL;
+- }
++ __console_unlock();
+
+- /*
+- * Handle extended console text first because later
+- * record_print_text() will modify the record buffer in-place.
+- */
+- if (nr_ext_console_drivers) {
+- ext_len = info_print_ext_header(ext_text,
+- sizeof(ext_text),
+- r.info);
+- ext_len += msg_print_ext_body(ext_text + ext_len,
+- sizeof(ext_text) - ext_len,
+- &r.text_buf[0],
+- r.info->text_len,
+- &r.info->dev_info);
+- }
+- len = record_print_text(&r,
+- console_msg_format & MSG_FORMAT_SYSLOG,
+- printk_time);
+- console_seq++;
++ /* Were there any consoles available for flushing? */
++ if (!flushed)
++ break;
+
+ /*
+- * While actively printing out messages, if another printk()
+- * were to occur on another CPU, it may wait for this one to
+- * finish. This task can not be preempted if there is a
+- * waiter waiting to take over.
+- *
+- * Interrupts are disabled because the hand over to a waiter
+- * must not be interrupted until the hand over is completed
+- * (@console_waiter is cleared).
++ * Some context may have added new records after
++ * console_flush_all() but before unlocking the console.
++ * Re-check if there is a new record to flush. If the trylock
++ * fails, another context is already handling the printing.
+ */
+- printk_safe_enter_irqsave(flags);
+- console_lock_spinning_enable();
+-
+- stop_critical_timings(); /* don't trace print latency */
+- call_console_drivers(ext_text, ext_len, text, len);
+- start_critical_timings();
+-
+- handover = console_lock_spinning_disable_and_check();
+- printk_safe_exit_irqrestore(flags);
+- if (handover)
+- return;
+-
+- if (do_cond_resched)
+- cond_resched();
+- }
+-
+- /* Get consistent value of the next-to-be-used sequence number. */
+- next_seq = console_seq;
+-
+- console_locked = 0;
+- up_console_sem();
+-
+- /*
+- * Someone could have filled up the buffer again, so re-check if there's
+- * something to flush. In case we cannot trylock the console_sem again,
+- * there's a new owner and the console_unlock() from them will do the
+- * flush, no worries.
+- */
+- retry = prb_read_valid(prb, next_seq, NULL);
+- if (retry && console_trylock())
+- goto again;
++ } while (prb_read_valid(prb, next_seq, NULL) && console_trylock());
+ }
+ EXPORT_SYMBOL(console_unlock);
+
+@@ -2796,8 +2805,14 @@ void console_flush_on_panic(enum con_flu
+ console_trylock();
+ console_may_schedule = 0;
+
+- if (mode == CONSOLE_REPLAY_ALL)
+- console_seq = prb_first_valid_seq(prb);
++ if (mode == CONSOLE_REPLAY_ALL) {
++ struct console *c;
++ u64 seq;
++
++ seq = prb_first_valid_seq(prb);
++ for_each_console(c)
++ c->seq = seq;
++ }
+ console_unlock();
+ }
+
+@@ -3019,26 +3034,15 @@ void register_console(struct console *ne
+ if (newcon->flags & CON_EXTENDED)
+ nr_ext_console_drivers++;
+
++ newcon->dropped = 0;
+ if (newcon->flags & CON_PRINTBUFFER) {
+- /*
+- * console_unlock(); will print out the buffered messages
+- * for us.
+- *
+- * We're about to replay the log buffer. Only do this to the
+- * just-registered console to avoid excessive message spam to
+- * the already-registered consoles.
+- *
+- * Set exclusive_console with disabled interrupts to reduce
+- * race window with eventual console_flush_on_panic() that
+- * ignores console_lock.
+- */
+- exclusive_console = newcon;
+- exclusive_console_stop_seq = console_seq;
+-
+ /* Get a consistent copy of @syslog_seq. */
+ mutex_lock(&syslog_lock);
+- console_seq = syslog_seq;
++ newcon->seq = syslog_seq;
+ mutex_unlock(&syslog_lock);
++ } else {
++ /* Begin with next message. */
++ newcon->seq = prb_next_seq(prb);
+ }
+ console_unlock();
+ console_sysfs_notify();
diff --git a/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch b/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch
new file mode 100644
index 000000000000..98afc516a82d
--- /dev/null
+++ b/patches/0006-printk-move-buffer-definitions-into-console_emit_nex.patch
@@ -0,0 +1,177 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 22 Nov 2021 17:04:02 +0106
+Subject: [PATCH 06/14] printk: move buffer definitions into
+ console_emit_next_record() caller
+
+Extended consoles print extended messages and do not print messages about
+dropped records.
+
+Non-extended consoles print "normal" messages as well as extra messages
+about dropped records.
+
+Currently the buffers for these various message types are defined within
+the functions that might use them and their usage is based upon the
+CON_EXTENDED flag. This will be a problem when moving to kthread printers
+because each printer must be able to provide its own buffers.
+
+Move all the message buffer definitions outside of
+console_emit_next_record(). The caller knows if extended or dropped
+messages should be printed and can specify the appropriate buffers to
+use. The console_emit_next_record() and call_console_driver() functions
+can know what to print based on whether specified buffers are non-NULL.
+
+With this change, buffer definition/allocation/specification is separated
+from the code that does the various types of string printing.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 58 ++++++++++++++++++++++++++++++++++---------------
+ 1 file changed, 41 insertions(+), 17 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -381,6 +381,9 @@ static struct latched_seq clear_seq = {
+ /* the maximum size of a formatted record (i.e. with prefix added per line) */
+ #define CONSOLE_LOG_MAX 1024
+
++/* the maximum size for a dropped text message */
++#define DROPPED_TEXT_MAX 64
++
+ /* the maximum size allowed to be reserved for a record */
+ #define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
+
+@@ -1878,18 +1881,18 @@ static int console_trylock_spinning(void
+
+ /*
+ * Call the specified console driver, asking it to write out the specified
+- * text and length. For non-extended consoles, if any records have been
++ * text and length. If @dropped_text is non-NULL and any records have been
+ * dropped, a dropped message will be written out first.
+ */
+-static void call_console_driver(struct console *con, const char *text, size_t len)
++static void call_console_driver(struct console *con, const char *text, size_t len,
++ char *dropped_text)
+ {
+- static char dropped_text[64];
+ size_t dropped_len;
+
+ trace_console_rcuidle(text, len);
+
+- if (con->dropped && !(con->flags & CON_EXTENDED)) {
+- dropped_len = snprintf(dropped_text, sizeof(dropped_text),
++ if (con->dropped && dropped_text) {
++ dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
+ "** %lu printk messages dropped **\n",
+ con->dropped);
+ con->dropped = 0;
+@@ -2240,6 +2243,7 @@ EXPORT_SYMBOL(_printk);
+ #else /* CONFIG_PRINTK */
+
+ #define CONSOLE_LOG_MAX 0
++#define DROPPED_TEXT_MAX 0
+ #define printk_time false
+
+ #define prb_read_valid(rb, seq, r) false
+@@ -2263,7 +2267,8 @@ static ssize_t msg_print_ext_body(char *
+ struct dev_printk_info *dev_info) { return 0; }
+ static void console_lock_spinning_enable(void) { }
+ static int console_lock_spinning_disable_and_check(void) { return 0; }
+-static void call_console_driver(struct console *con, const char *text, size_t len) {}
++static void call_console_driver(struct console *con, const char *text, size_t len,
++ char *dropped_text) {}
+ static bool suppress_message_printing(int level) { return false; }
+
+ #endif /* CONFIG_PRINTK */
+@@ -2558,6 +2563,14 @@ static void __console_unlock(void)
+ * Print one record for the given console. The record printed is whatever
+ * record is the next available record for the given console.
+ *
++ * @text is a buffer of size CONSOLE_LOG_MAX.
++ *
++ * If extended messages should be printed, @ext_text is a buffer of size
++ * CONSOLE_EXT_LOG_MAX. Otherwise @ext_text must be NULL.
++ *
++ * If dropped messages should be printed, @dropped_text is a buffer of size
++ * DROPPED_TEXT_MAX. Otherise @dropped_text must be NULL.
++ *
+ * Requires the console_lock.
+ *
+ * Returns false if the given console has no next record to print, otherwise
+@@ -2567,17 +2580,16 @@ static void __console_unlock(void)
+ * console_lock, in which case the caller is no longer holding the
+ * console_lock.
+ */
+-static bool console_emit_next_record(struct console *con, bool *handover)
++static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
++ char *dropped_text, bool *handover)
+ {
+- static char ext_text[CONSOLE_EXT_LOG_MAX];
+- static char text[CONSOLE_LOG_MAX];
+ struct printk_info info;
+ struct printk_record r;
+ unsigned long flags;
+ char *write_text;
+ size_t len;
+
+- prb_rec_init_rd(&r, &info, text, sizeof(text));
++ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
+ if (!prb_read_valid(prb, con->seq, &r))
+ return false;
+@@ -2593,13 +2605,13 @@ static bool console_emit_next_record(str
+ goto skip;
+ }
+
+- if (con->flags & CON_EXTENDED) {
+- write_text = &ext_text[0];
+- len = info_print_ext_header(ext_text, sizeof(ext_text), r.info);
+- len += msg_print_ext_body(ext_text + len, sizeof(ext_text) - len,
++ if (ext_text) {
++ write_text = ext_text;
++ len = info_print_ext_header(ext_text, CONSOLE_EXT_LOG_MAX, r.info);
++ len += msg_print_ext_body(ext_text + len, CONSOLE_EXT_LOG_MAX - len,
+ &r.text_buf[0], r.info->text_len, &r.info->dev_info);
+ } else {
+- write_text = &text[0];
++ write_text = text;
+ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
+ }
+
+@@ -2617,7 +2629,7 @@ static bool console_emit_next_record(str
+ console_lock_spinning_enable();
+
+ stop_critical_timings(); /* don't trace print latency */
+- call_console_driver(con, write_text, len);
++ call_console_driver(con, write_text, len, dropped_text);
+ start_critical_timings();
+
+ con->seq++;
+@@ -2647,6 +2659,9 @@ static bool console_emit_next_record(str
+ */
+ static bool console_flush_all(bool do_cond_resched, u64 *next_seq, bool *handover)
+ {
++ static char dropped_text[DROPPED_TEXT_MAX];
++ static char ext_text[CONSOLE_EXT_LOG_MAX];
++ static char text[CONSOLE_LOG_MAX];
+ bool any_usable = false;
+ struct console *con;
+ bool any_progress;
+@@ -2664,7 +2679,16 @@ static bool console_flush_all(bool do_co
+ continue;
+ any_usable = true;
+
+- progress = console_emit_next_record(con, handover);
++ if (con->flags & CON_EXTENDED) {
++ /* Extended consoles do not print "dropped messages". */
++ progress = console_emit_next_record(con, &text[0],
++ &ext_text[0], NULL,
++ handover);
++ } else {
++ progress = console_emit_next_record(con, &text[0],
++ NULL, &dropped_text[0],
++ handover);
++ }
+ if (*handover)
+ return true;
+
diff --git a/patches/0007-printk-add-pr_flush.patch b/patches/0007-printk-add-pr_flush.patch
new file mode 100644
index 000000000000..34ecf392006f
--- /dev/null
+++ b/patches/0007-printk-add-pr_flush.patch
@@ -0,0 +1,159 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 15 Dec 2021 18:44:59 +0106
+Subject: [PATCH 07/14] printk: add pr_flush()
+
+Provide a might-sleep function to allow waiting for threaded console
+printers to catch up to the latest logged message.
+
+Use pr_flush() whenever it is desirable to get buffered messages
+printed before continuing: suspend_console(), resume_console(),
+console_stop(), console_start(), console_unblank().
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/printk.h | 7 ++++
+ kernel/printk/printk.c | 73 ++++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 79 insertions(+), 1 deletion(-)
+
+--- a/include/linux/printk.h
++++ b/include/linux/printk.h
+@@ -170,6 +170,8 @@ extern void __printk_safe_exit(void);
+ #define printk_deferred_enter __printk_safe_enter
+ #define printk_deferred_exit __printk_safe_exit
+
++extern bool pr_flush(int timeout_ms, bool reset_on_progress);
++
+ /*
+ * Please don't use printk_ratelimit(), because it shares ratelimiting state
+ * with all other unrelated printk_ratelimit() callsites. Instead use
+@@ -224,6 +226,11 @@ static inline void printk_deferred_exit(
+ {
+ }
+
++static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
++{
++ return true;
++}
++
+ static inline int printk_ratelimit(void)
+ {
+ return 0;
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2445,6 +2445,7 @@ void suspend_console(void)
+ if (!console_suspend_enabled)
+ return;
+ pr_info("Suspending console(s) (use no_console_suspend to debug)\n");
++ pr_flush(1000, true);
+ console_lock();
+ console_suspended = 1;
+ up_console_sem();
+@@ -2457,6 +2458,7 @@ void resume_console(void)
+ down_console_sem();
+ console_suspended = 0;
+ console_unlock();
++ pr_flush(1000, true);
+ }
+
+ /**
+@@ -2800,8 +2802,10 @@ void console_unblank(void)
+ if (oops_in_progress) {
+ if (down_trylock_console_sem() != 0)
+ return;
+- } else
++ } else {
++ pr_flush(1000, true);
+ console_lock();
++ }
+
+ console_locked = 1;
+ console_may_schedule = 0;
+@@ -2867,6 +2871,7 @@ struct tty_driver *console_device(int *i
+ */
+ void console_stop(struct console *console)
+ {
++ pr_flush(1000, true);
+ console_lock();
+ console->flags &= ~CON_ENABLED;
+ console_unlock();
+@@ -2878,6 +2883,7 @@ void console_start(struct console *conso
+ console_lock();
+ console->flags |= CON_ENABLED;
+ console_unlock();
++ pr_flush(1000, true);
+ }
+ EXPORT_SYMBOL(console_start);
+
+@@ -3234,6 +3240,71 @@ static int __init printk_late_init(void)
+ late_initcall(printk_late_init);
+
+ #if defined CONFIG_PRINTK
++/**
++ * pr_flush() - Wait for printing threads to catch up.
++ *
++ * @timeout_ms: The maximum time (in ms) to wait.
++ * @reset_on_progress: Reset the timeout if forward progress is seen.
++ *
++ * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
++ * represents infinite waiting.
++ *
++ * If @reset_on_progress is true, the timeout will be reset whenever any
++ * printer has been seen to make some forward progress.
++ *
++ * Context: Process context. May sleep while acquiring console lock.
++ * Return: true if all enabled printers are caught up.
++ */
++bool pr_flush(int timeout_ms, bool reset_on_progress)
++{
++ int remaining = timeout_ms;
++ struct console *con;
++ u64 last_diff = 0;
++ u64 printk_seq;
++ u64 diff;
++ u64 seq;
++
++ might_sleep();
++
++ seq = prb_next_seq(prb);
++
++ for (;;) {
++ diff = 0;
++
++ console_lock();
++ for_each_console(con) {
++ if (!console_is_usable(con))
++ continue;
++ printk_seq = con->seq;
++ if (printk_seq < seq)
++ diff += seq - printk_seq;
++ }
++ console_unlock();
++
++ if (diff != last_diff && reset_on_progress)
++ remaining = timeout_ms;
++
++ if (diff == 0 || remaining == 0)
++ break;
++
++ if (remaining < 0) {
++ /* no timeout limit */
++ msleep(100);
++ } else if (remaining < 100) {
++ msleep(remaining);
++ remaining = 0;
++ } else {
++ msleep(100);
++ remaining -= 100;
++ }
++
++ last_diff = diff;
++ }
++
++ return (diff == 0);
++}
++EXPORT_SYMBOL(pr_flush);
++
+ /*
+ * Delayed printk version, for scheduler-internal messages:
+ */
diff --git a/patches/0008-printk-add-kthread-console-printers.patch b/patches/0008-printk-add-kthread-console-printers.patch
new file mode 100644
index 000000000000..bdd807cee942
--- /dev/null
+++ b/patches/0008-printk-add-kthread-console-printers.patch
@@ -0,0 +1,283 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 13 Dec 2021 21:22:17 +0106
+Subject: [PATCH 08/14] printk: add kthread console printers
+
+Create a kthread for each console to perform console printing. During
+normal operation (@system_state == SYSTEM_RUNNING), the kthread
+printers are responsible for all printing on their respective
+consoles.
+
+During non-normal operation, console printing is done as it has been:
+within the context of the printk caller or within irq work triggered
+by the printk caller.
+
+Console printers synchronize against each other and against console
+lockers by taking the console lock for each message that is printed.
+
+NOTE: The kthread printers do not disable preemption while holding
+ the console lock. This introduces a regression that was
+ previously fixed with commit fd5f7cde1b85 ("printk: Never set
+ console_may_schedule in console_trylock()"). A follow-up
+ commit will correct this.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 2
+ kernel/printk/printk.c | 157 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 157 insertions(+), 2 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -153,6 +153,8 @@ struct console {
+ uint ospeed;
+ u64 seq;
+ unsigned long dropped;
++ struct task_struct *thread;
++
+ void *data;
+ struct console *next;
+ };
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -348,6 +348,20 @@ static int console_msg_format = MSG_FORM
+ /* syslog_lock protects syslog_* variables and write access to clear_seq. */
+ static DEFINE_MUTEX(syslog_lock);
+
++/*
++ * A flag to signify if printk_late_init() has already started the kthread
++ * printers. If true, any later registered consoles must start their own
++ * kthread directly. The flag is write protected by the console_lock.
++ */
++static bool kthreads_started;
++
++static inline bool kthread_printers_active(void)
++{
++ return (kthreads_started &&
++ system_state == SYSTEM_RUNNING &&
++ !oops_in_progress);
++}
++
+ #ifdef CONFIG_PRINTK
+ DECLARE_WAIT_QUEUE_HEAD(log_wait);
+ /* All 3 protected by @syslog_lock. */
+@@ -2199,7 +2213,7 @@ asmlinkage int vprintk_emit(int facility
+ printed_len = vprintk_store(facility, level, dev_info, fmt, args);
+
+ /* If called from the scheduler, we can not call up(). */
+- if (!in_sched) {
++ if (!in_sched && !kthread_printers_active()) {
+ /*
+ * Disable preemption to avoid being preempted while holding
+ * console_sem which would prevent anyone from printing to
+@@ -2240,6 +2254,8 @@ asmlinkage __visible int _printk(const c
+ }
+ EXPORT_SYMBOL(_printk);
+
++static void start_printk_kthread(struct console *con);
++
+ #else /* CONFIG_PRINTK */
+
+ #define CONSOLE_LOG_MAX 0
+@@ -2270,6 +2286,7 @@ static int console_lock_spinning_disable
+ static void call_console_driver(struct console *con, const char *text, size_t len,
+ char *dropped_text) {}
+ static bool suppress_message_printing(int level) { return false; }
++static void start_printk_kthread(struct console *con) {}
+
+ #endif /* CONFIG_PRINTK */
+
+@@ -2458,6 +2475,10 @@ void resume_console(void)
+ down_console_sem();
+ console_suspended = 0;
+ console_unlock();
++
++ /* Wake the kthread printers. */
++ wake_up_klogd();
++
+ pr_flush(1000, true);
+ }
+
+@@ -2672,6 +2693,10 @@ static bool console_flush_all(bool do_co
+ *handover = false;
+
+ do {
++ /* Let the kthread printers do the work if they can. */
++ if (kthread_printers_active())
++ return false;
++
+ any_progress = false;
+
+ for_each_console(con) {
+@@ -2883,6 +2908,10 @@ void console_start(struct console *conso
+ console_lock();
+ console->flags |= CON_ENABLED;
+ console_unlock();
++
++ /* Wake the kthread printers. */
++ wake_up_klogd();
++
+ pr_flush(1000, true);
+ }
+ EXPORT_SYMBOL(console_start);
+@@ -3074,6 +3103,8 @@ void register_console(struct console *ne
+ /* Begin with next message. */
+ newcon->seq = prb_next_seq(prb);
+ }
++ if (kthreads_started)
++ start_printk_kthread(newcon);
+ console_unlock();
+ console_sysfs_notify();
+
+@@ -3130,6 +3161,11 @@ int unregister_console(struct console *c
+ }
+ }
+
++ if (console->thread) {
++ kthread_stop(console->thread);
++ console->thread = NULL;
++ }
++
+ if (res)
+ goto out_disable_unlock;
+
+@@ -3235,6 +3271,13 @@ static int __init printk_late_init(void)
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "printk:online",
+ console_cpu_notify, NULL);
+ WARN_ON(ret < 0);
++
++ console_lock();
++ for_each_console(con)
++ start_printk_kthread(con);
++ kthreads_started = true;
++ console_unlock();
++
+ return 0;
+ }
+ late_initcall(printk_late_init);
+@@ -3305,6 +3348,116 @@ bool pr_flush(int timeout_ms, bool reset
+ }
+ EXPORT_SYMBOL(pr_flush);
+
++static bool printer_should_wake(struct console *con, u64 seq)
++{
++ short flags;
++
++ if (kthread_should_stop())
++ return true;
++
++ if (console_suspended)
++ return false;
++
++ /*
++ * This is an unsafe read to con->flags, but false positives
++ * are not an issue as long as they are rare.
++ */
++ flags = data_race(READ_ONCE(con->flags));
++ if (!(flags & CON_ENABLED))
++ return false;
++
++ return prb_read_valid(prb, seq, NULL);
++}
++
++static int printk_kthread_func(void *data)
++{
++ struct console *con = data;
++ char *dropped_text = NULL;
++ char *ext_text = NULL;
++ bool progress;
++ bool handover;
++ u64 seq = 0;
++ char *text;
++ int error;
++
++ pr_info("%sconsole [%s%d]: printing thread started\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
++
++ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
++ if (!text)
++ goto out;
++
++ if (con->flags & CON_EXTENDED) {
++ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
++ if (!ext_text)
++ goto out;
++ } else {
++ dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
++ if (!dropped_text)
++ goto out;
++ }
++
++ for (;;) {
++ error = wait_event_interruptible(log_wait, printer_should_wake(con, seq));
++
++ if (kthread_should_stop())
++ break;
++
++ if (error)
++ continue;
++
++ do {
++ console_lock();
++ if (console_suspended) {
++ console_unlock();
++ break;
++ }
++
++ /*
++ * Even though the printk kthread is always preemptible, it is
++ * still not allowed to call cond_resched() from within
++ * console drivers. The task may become non-preemptible in the
++ * console driver call chain. For example, vt_console_print()
++ * takes a spinlock and then can call into fbcon_redraw(),
++ * which can conditionally invoke cond_resched().
++ */
++ console_may_schedule = 0;
++ progress = console_emit_next_record(con, text, ext_text,
++ dropped_text, &handover);
++ if (handover)
++ break;
++
++ seq = con->seq;
++
++ /* Unlock console without invoking direct printing. */
++ __console_unlock();
++ } while (progress);
++ }
++out:
++ kfree(dropped_text);
++ kfree(ext_text);
++ kfree(text);
++ pr_info("%sconsole [%s%d]: printing thread stopped\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
++ return 0;
++}
++
++/* Must be called within console_lock(). */
++static void start_printk_kthread(struct console *con)
++{
++ con->thread = kthread_run(printk_kthread_func, con,
++ "pr/%s%d", con->name, con->index);
++ if (IS_ERR(con->thread)) {
++ con->thread = NULL;
++ pr_err("%sconsole [%s%d]: unable to start printing thread\n",
++ (con->flags & CON_BOOT) ? "boot" : "",
++ con->name, con->index);
++ return;
++ }
++}
++
+ /*
+ * Delayed printk version, for scheduler-internal messages:
+ */
+@@ -3324,7 +3477,7 @@ static void wake_up_klogd_work_func(stru
+ }
+
+ if (pending & PRINTK_PENDING_WAKEUP)
+- wake_up_interruptible(&log_wait);
++ wake_up_interruptible_all(&log_wait);
+ }
+
+ static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
diff --git a/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch b/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
new file mode 100644
index 000000000000..a1e887b01a78
--- /dev/null
+++ b/patches/0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
@@ -0,0 +1,431 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 13 Dec 2021 21:24:23 +0106
+Subject: [PATCH 09/14] printk: reimplement console_lock for proper kthread
+ support
+
+With non-threaded console printers preemption is disabled while
+holding the console lock in order to avoid the situation where the
+console printer is scheduled away and no other task can lock the
+console (for printing or otherwise). Disabling preemption is
+necessary because the console lock is implemented purely as a
+semaphore, which has no owner.
+
+Like non-threaded console printers, kthread printers use the
+console lock to synchronize during printing. However, since they
+use console_lock() instead of a best-effort console_trylock(), it
+is not possible to disable preemption upon locking. Therefore an
+alternative for synchronizing and avoiding the above mentioned
+situation is needed.
+
+The kthread printers do not need to synchronize against each other,
+but they do need to synchronize against console_lock() callers. To
+provide this synchonization, introduce a per-console mutex. The
+mutex is taken by the kthread printer during printing and is also
+taken by console_lock() callers. Since mutexes have owners, when
+calling console_lock(), the scheduler is able to schedule any
+kthread printers that may have been preempted while printing.
+
+Rather than console_lock() callers holding the per-console mutex
+for the duration of the console lock, the per-console mutex is only
+taken in order to set a new CON_PAUSED flag, which is checked by
+the kthread printers. This avoids any issues due to nested locking
+between the various per-console mutexes.
+
+The kthread printers must also synchronize against console_trylock()
+callers. Since console_trylock() is non-blocking, a global atomic
+counter will be used to identify if any kthread printers are active.
+The kthread printers will also check the atomic counter to identify
+if the console has been locked by another task via
+console_trylock().
+
+A locking overview for console_lock(), console_trylock(), and the
+kthread printers is as follows (pseudo code):
+
+console_lock()
+{
+ down(&console_sem);
+ for_each_console(con) {
+ mutex_lock(&con->lock);
+ con->flags |= CON_PAUSED;
+ mutex_unlock(&con->lock);
+ }
+}
+
+console_trylock()
+{
+ assert(down_trylock(&console_sem));
+ assert(atomic_cmpxchg(&console_lock_count, 0, -1));
+}
+
+kthread_printer()
+{
+ mutex_lock(&con->lock);
+ assert(con->flags & CON_PAUSED);
+ assert(atomic_inc_unless_negative(&console_lock_count));
+ con->write();
+ atomic_dec(&console_lock_count);
+ mutex_unlock(&con->lock);
+}
+
+Also note that the console owner and waiter logic now only applies
+between contexts that have both taken the console lock via
+console_trylock(). This is for 2 reasons:
+
+1. Contexts that have taken the console lock via console_lock()
+ require a sleepable context when unlocking to unpause the kthread
+ printers. But a waiter context has used console_trylock() and
+ may not be sleepable.
+
+2. The kthread printers no longer acquire the console lock, so it is
+ not possible to handover the console lock.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 15 ++++
+ kernel/printk/printk.c | 162 +++++++++++++++++++++++++++++++++++++-----------
+ 2 files changed, 140 insertions(+), 37 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -16,6 +16,7 @@
+
+ #include <linux/atomic.h>
+ #include <linux/types.h>
++#include <linux/mutex.h>
+
+ struct vc_data;
+ struct console_font_op;
+@@ -136,6 +137,7 @@ static inline int con_debug_leave(void)
+ #define CON_ANYTIME (16) /* Safe to call before per-cpu resources ready */
+ #define CON_BRL (32) /* Used for a braille device */
+ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
++#define CON_PAUSED (128) /* Sleep while console is locked */
+
+ struct console {
+ char name[16];
+@@ -155,6 +157,19 @@ struct console {
+ unsigned long dropped;
+ struct task_struct *thread;
+
++ /*
++ * The per-console lock is used by printing kthreads to synchronize
++ * this console with callers of console_lock(). This is necessary in
++ * order to allow printing kthreads to run in parallel to each other,
++ * while each safely accessing their own @flags and synchronizing
++ * against direct printing via console_lock/console_unlock.
++ *
++ * Note: For synchronizing against direct printing via
++ * console_trylock/console_unlock, see the static global
++ * variable @console_lock_count.
++ */
++ struct mutex lock;
++
+ void *data;
+ struct console *next;
+ };
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -215,6 +215,26 @@ int devkmsg_sysctl_set_loglvl(struct ctl
+ static int nr_ext_console_drivers;
+
+ /*
++ * Used to synchronize printing kthreads against direct printing via
++ * console_trylock/console_unlock.
++ *
++ * Values:
++ * -1 = console locked (via trylock), kthreads will not print
++ * 0 = no kthread printing, console not locked (via trylock)
++ * >0 = kthread(s) actively printing
++ *
++ * Note: For synchronizing against direct printing via
++ * console_lock/console_unlock, see the @lock variable in
++ * struct console.
++ */
++static atomic_t console_lock_count = ATOMIC_INIT(0);
++
++#define console_excl_trylock() (atomic_cmpxchg(&console_lock_count, 0, -1) == 0)
++#define console_excl_unlock() atomic_cmpxchg(&console_lock_count, -1, 0)
++#define console_printer_tryenter() atomic_inc_unless_negative(&console_lock_count)
++#define console_printer_exit() atomic_dec(&console_lock_count)
++
++/*
+ * Helper macros to handle lockdep when locking/unlocking console_sem. We use
+ * macros instead of functions so that _RET_IP_ contains useful information.
+ */
+@@ -257,6 +277,37 @@ static void __up_console_sem(unsigned lo
+ #define up_console_sem() __up_console_sem(_RET_IP_)
+
+ /*
++ * Tracks whether kthread printers are all paused. A value of true implies
++ * that the console is locked via console_lock() or the console is suspended.
++ * Reading and writing to this variable requires holding @console_sem.
++ */
++static bool consoles_paused;
++
++/*
++ * Pause or unpause all kthread printers.
++ *
++ * Requires the console_lock.
++ */
++static void __pause_all_consoles(bool do_pause)
++{
++ struct console *con;
++
++ for_each_console(con) {
++ mutex_lock(&con->lock);
++ if (do_pause)
++ con->flags |= CON_PAUSED;
++ else
++ con->flags &= ~CON_PAUSED;
++ mutex_unlock(&con->lock);
++ }
++
++ consoles_paused = do_pause;
++}
++
++#define pause_all_consoles() __pause_all_consoles(true)
++#define unpause_all_consoles() __pause_all_consoles(false)
++
++/*
+ * This is used for debugging the mess that is the VT code by
+ * keeping track if we have the console semaphore held. It's
+ * definitely not the perfect debug tool (we don't know if _WE_
+@@ -2475,10 +2526,6 @@ void resume_console(void)
+ down_console_sem();
+ console_suspended = 0;
+ console_unlock();
+-
+- /* Wake the kthread printers. */
+- wake_up_klogd();
+-
+ pr_flush(1000, true);
+ }
+
+@@ -2516,6 +2563,7 @@ void console_lock(void)
+ down_console_sem();
+ if (console_suspended)
+ return;
++ pause_all_consoles();
+ console_locked = 1;
+ console_may_schedule = 1;
+ }
+@@ -2537,6 +2585,10 @@ int console_trylock(void)
+ up_console_sem();
+ return 0;
+ }
++ if (!console_excl_trylock()) {
++ up_console_sem();
++ return 0;
++ }
+ console_locked = 1;
+ console_may_schedule = 0;
+ return 1;
+@@ -2545,7 +2597,7 @@ EXPORT_SYMBOL(console_trylock);
+
+ int is_console_locked(void)
+ {
+- return console_locked;
++ return (console_locked || atomic_read(&console_lock_count));
+ }
+ EXPORT_SYMBOL(is_console_locked);
+
+@@ -2579,6 +2631,19 @@ static inline bool console_is_usable(str
+ static void __console_unlock(void)
+ {
+ console_locked = 0;
++
++ /*
++ * Depending on whether console_lock() or console_trylock() was used,
++ * appropriately allow the kthread printers to continue.
++ */
++ if (consoles_paused)
++ unpause_all_consoles();
++ else
++ console_excl_unlock();
++
++ /* Wake the kthread printers. */
++ wake_up_klogd();
++
+ up_console_sem();
+ }
+
+@@ -2601,7 +2666,8 @@ static void __console_unlock(void)
+ *
+ * @handover will be set to true if a printk waiter has taken over the
+ * console_lock, in which case the caller is no longer holding the
+- * console_lock.
++ * console_lock. A NULL pointer may be provided to disable allowing
++ * the console_lock to be taken over by a printk waiter.
+ */
+ static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
+ char *dropped_text, bool *handover)
+@@ -2609,11 +2675,15 @@ static bool console_emit_next_record(str
+ struct printk_info info;
+ struct printk_record r;
+ unsigned long flags;
++ bool allow_handover;
+ char *write_text;
+ size_t len;
+
+ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
++ if (handover)
++ *handover = false;
++
+ if (!prb_read_valid(prb, con->seq, &r))
+ return false;
+
+@@ -2638,18 +2708,23 @@ static bool console_emit_next_record(str
+ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
+ }
+
+- /*
+- * While actively printing out messages, if another printk()
+- * were to occur on another CPU, it may wait for this one to
+- * finish. This task can not be preempted if there is a
+- * waiter waiting to take over.
+- *
+- * Interrupts are disabled because the hand over to a waiter
+- * must not be interrupted until the hand over is completed
+- * (@console_waiter is cleared).
+- */
+- printk_safe_enter_irqsave(flags);
+- console_lock_spinning_enable();
++ /* Handovers may only happen between trylock contexts. */
++ allow_handover = (handover && atomic_read(&console_lock_count) == -1);
++
++ if (allow_handover) {
++ /*
++ * While actively printing out messages, if another printk()
++ * were to occur on another CPU, it may wait for this one to
++ * finish. This task can not be preempted if there is a
++ * waiter waiting to take over.
++ *
++ * Interrupts are disabled because the hand over to a waiter
++ * must not be interrupted until the hand over is completed
++ * (@console_waiter is cleared).
++ */
++ printk_safe_enter_irqsave(flags);
++ console_lock_spinning_enable();
++ }
+
+ stop_critical_timings(); /* don't trace print latency */
+ call_console_driver(con, write_text, len, dropped_text);
+@@ -2657,8 +2732,10 @@ static bool console_emit_next_record(str
+
+ con->seq++;
+
+- *handover = console_lock_spinning_disable_and_check();
+- printk_safe_exit_irqrestore(flags);
++ if (allow_handover) {
++ *handover = console_lock_spinning_disable_and_check();
++ printk_safe_exit_irqrestore(flags);
++ }
+
+ boot_delay_msec(r.info->level);
+ printk_delay();
+@@ -2827,6 +2904,10 @@ void console_unblank(void)
+ if (oops_in_progress) {
+ if (down_trylock_console_sem() != 0)
+ return;
++ if (!console_excl_trylock()) {
++ up_console_sem();
++ return;
++ }
+ } else {
+ pr_flush(1000, true);
+ console_lock();
+@@ -2908,10 +2989,6 @@ void console_start(struct console *conso
+ console_lock();
+ console->flags |= CON_ENABLED;
+ console_unlock();
+-
+- /* Wake the kthread printers. */
+- wake_up_klogd();
+-
+ pr_flush(1000, true);
+ }
+ EXPORT_SYMBOL(console_start);
+@@ -3093,7 +3170,11 @@ void register_console(struct console *ne
+ if (newcon->flags & CON_EXTENDED)
+ nr_ext_console_drivers++;
+
++ if (consoles_paused)
++ newcon->flags |= CON_PAUSED;
++
+ newcon->dropped = 0;
++ mutex_init(&newcon->lock);
+ if (newcon->flags & CON_PRINTBUFFER) {
+ /* Get a consistent copy of @syslog_seq. */
+ mutex_lock(&syslog_lock);
+@@ -3355,16 +3436,17 @@ static bool printer_should_wake(struct c
+ if (kthread_should_stop())
+ return true;
+
+- if (console_suspended)
+- return false;
+-
+ /*
+ * This is an unsafe read to con->flags, but false positives
+ * are not an issue as long as they are rare.
+ */
+ flags = data_race(READ_ONCE(con->flags));
+- if (!(flags & CON_ENABLED))
++
++ if (!(flags & CON_ENABLED) ||
++ (flags & CON_PAUSED) ||
++ atomic_read(&console_lock_count) == -1) {
+ return false;
++ }
+
+ return prb_read_valid(prb, seq, NULL);
+ }
+@@ -3375,7 +3457,6 @@ static int printk_kthread_func(void *dat
+ char *dropped_text = NULL;
+ char *ext_text = NULL;
+ bool progress;
+- bool handover;
+ u64 seq = 0;
+ char *text;
+ int error;
+@@ -3408,9 +3489,17 @@ static int printk_kthread_func(void *dat
+ continue;
+
+ do {
+- console_lock();
+- if (console_suspended) {
+- console_unlock();
++ error = mutex_lock_interruptible(&con->lock);
++ if (error)
++ break;
++
++ if (!console_is_usable(con)) {
++ mutex_unlock(&con->lock);
++ break;
++ }
++
++ if ((con->flags & CON_PAUSED) || !console_printer_tryenter()) {
++ mutex_unlock(&con->lock);
+ break;
+ }
+
+@@ -3424,14 +3513,13 @@ static int printk_kthread_func(void *dat
+ */
+ console_may_schedule = 0;
+ progress = console_emit_next_record(con, text, ext_text,
+- dropped_text, &handover);
+- if (handover)
+- break;
++ dropped_text, NULL);
+
+ seq = con->seq;
+
+- /* Unlock console without invoking direct printing. */
+- __console_unlock();
++ console_printer_exit();
++
++ mutex_unlock(&con->lock);
+ } while (progress);
+ }
+ out:
diff --git a/patches/0010-printk-remove-console_locked.patch b/patches/0010-printk-remove-console_locked.patch
new file mode 100644
index 000000000000..e4f2bf7e8f4c
--- /dev/null
+++ b/patches/0010-printk-remove-console_locked.patch
@@ -0,0 +1,93 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Fri, 17 Dec 2021 12:29:13 +0106
+Subject: [PATCH 10/14] printk: remove @console_locked
+
+The static global variable @console_locked is used to help debug VT
+code to make sure that certain code paths are running with the
+console_lock held. However, this information is also available with
+the static global variable @consoles_paused (for locking via
+console_lock()), and the static global variable @console_lock_count
+(for locking via console_trylock()).
+
+Remove @console_locked and update is_console_locked() to use the
+alternative variables.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 29 ++++++++++++++---------------
+ 1 file changed, 14 insertions(+), 15 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -307,15 +307,7 @@ static void __pause_all_consoles(bool do
+ #define pause_all_consoles() __pause_all_consoles(true)
+ #define unpause_all_consoles() __pause_all_consoles(false)
+
+-/*
+- * This is used for debugging the mess that is the VT code by
+- * keeping track if we have the console semaphore held. It's
+- * definitely not the perfect debug tool (we don't know if _WE_
+- * hold it and are racing, but it helps tracking those weird code
+- * paths in the console code where we end up in places I want
+- * locked without the console semaphore held).
+- */
+-static int console_locked, console_suspended;
++static int console_suspended;
+
+ /*
+ * Array of consoles built from command line options (console=)
+@@ -2564,7 +2556,6 @@ void console_lock(void)
+ if (console_suspended)
+ return;
+ pause_all_consoles();
+- console_locked = 1;
+ console_may_schedule = 1;
+ }
+ EXPORT_SYMBOL(console_lock);
+@@ -2589,15 +2580,26 @@ int console_trylock(void)
+ up_console_sem();
+ return 0;
+ }
+- console_locked = 1;
+ console_may_schedule = 0;
+ return 1;
+ }
+ EXPORT_SYMBOL(console_trylock);
+
++/*
++ * This is used to help to make sure that certain paths within the VT code are
++ * running with the console lock held. It is definitely not the perfect debug
++ * tool (it is not known if the VT code is the task holding the console lock),
++ * but it helps tracking those weird code paths in the console code such as
++ * when the console is suspended: where the console is not locked but no
++ * console printing may occur.
++ *
++ * Note: This returns true when the console is suspended but is not locked.
++ * This is intentional because the VT code must consider that situation
++ * the same as if the console was locked.
++ */
+ int is_console_locked(void)
+ {
+- return (console_locked || atomic_read(&console_lock_count));
++ return (consoles_paused || atomic_read(&console_lock_count));
+ }
+ EXPORT_SYMBOL(is_console_locked);
+
+@@ -2630,8 +2632,6 @@ static inline bool console_is_usable(str
+
+ static void __console_unlock(void)
+ {
+- console_locked = 0;
+-
+ /*
+ * Depending on whether console_lock() or console_trylock() was used,
+ * appropriately allow the kthread printers to continue.
+@@ -2913,7 +2913,6 @@ void console_unblank(void)
+ console_lock();
+ }
+
+- console_locked = 1;
+ console_may_schedule = 0;
+ for_each_console(c)
+ if ((c->flags & CON_ENABLED) && c->unblank)
diff --git a/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch b/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
new file mode 100644
index 000000000000..08610894babf
--- /dev/null
+++ b/patches/0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
@@ -0,0 +1,54 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 16 Dec 2021 16:06:29 +0106
+Subject: [PATCH 11/14] console: introduce CON_MIGHT_SLEEP for vt
+
+Deadlocks and the framebuffer console have been a recurring issue
+that is getting worse. Daniel Vetter suggested [0] that
+fbcon->write() should no longer be called from an atomic context.
+
+Introduce a new console flag CON_MIGHT_SLEEP for a console driver to
+specify that it is only called from sleepable contexts. Set the
+fbcon to use this new flag.
+
+[0] https://lore.kernel.org/all/YYuS1uNhxWOEX1Ci@phenom.ffwll.local
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/tty/vt/vt.c | 2 +-
+ include/linux/console.h | 1 +
+ kernel/printk/printk.c | 2 ++
+ 3 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/tty/vt/vt.c
++++ b/drivers/tty/vt/vt.c
+@@ -3161,7 +3161,7 @@ static struct console vt_console_driver
+ .write = vt_console_print,
+ .device = vt_console_device,
+ .unblank = unblank_screen,
+- .flags = CON_PRINTBUFFER,
++ .flags = CON_PRINTBUFFER|CON_MIGHT_SLEEP,
+ .index = -1,
+ };
+ #endif
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -138,6 +138,7 @@ static inline int con_debug_leave(void)
+ #define CON_BRL (32) /* Used for a braille device */
+ #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
+ #define CON_PAUSED (128) /* Sleep while console is locked */
++#define CON_MIGHT_SLEEP (256) /* Can only be called from sleepable context */
+
+ struct console {
+ char name[16];
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2781,6 +2781,8 @@ static bool console_flush_all(bool do_co
+
+ if (!console_is_usable(con))
+ continue;
++ if ((con->flags & CON_MIGHT_SLEEP) && !do_cond_resched)
++ continue;
+ any_usable = true;
+
+ if (con->flags & CON_EXTENDED) {
diff --git a/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch b/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch
new file mode 100644
index 000000000000..445085f828c8
--- /dev/null
+++ b/patches/0012-printk-add-infrastucture-for-atomic-consoles.patch
@@ -0,0 +1,599 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Wed, 22 Dec 2021 13:44:40 +0106
+Subject: [PATCH 12/14] printk: add infrastucture for atomic consoles
+
+Many times it is not possible to see the console output on
+panic because printing threads cannot be scheduled and/or the
+console is already taken and forcibly overtaking/busting the
+locks does provide the hoped results.
+
+Introduce a new infrastructure to support "atomic consoles".
+A new optional callback in struct console, write_atomic(), is
+available for consoles to provide an implemention for writing
+console messages. The implementation must be NMI safe if they
+can run on an architecture where NMIs exist.
+
+Console drivers implementing the write_atomic() callback must
+also select CONFIG_HAVE_ATOMIC_CONSOLE in order to enable the
+atomic console code within the printk subsystem.
+
+If atomic consoles are available, panic() will flush the kernel
+log only to the atomic consoles (after kgdb but before busting
+spinlocks). Afterwards, panic() will continue the same,
+including attempting to flush the other (non-atomic) consoles.
+
+Note that busting spinlocks is now done after kgdb. Since kgdb
+fakes oops_in_progress anyway, there should be no need to bust
+spinlocks before.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/console.h | 16 ++
+ init/Kconfig | 4
+ kernel/panic.c | 6
+ kernel/printk/printk.c | 291 +++++++++++++++++++++++++++++++++++++++++++-----
+ 4 files changed, 288 insertions(+), 29 deletions(-)
+
+--- a/include/linux/console.h
++++ b/include/linux/console.h
+@@ -140,9 +140,19 @@ static inline int con_debug_leave(void)
+ #define CON_PAUSED (128) /* Sleep while console is locked */
+ #define CON_MIGHT_SLEEP (256) /* Can only be called from sleepable context */
+
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++struct console_atomic_data {
++ u64 seq;
++ char *text;
++ char *ext_text;
++ char *dropped_text;
++};
++#endif
++
+ struct console {
+ char name[16];
+ void (*write)(struct console *, const char *, unsigned);
++ void (*write_atomic)(struct console *, const char *, unsigned);
+ int (*read)(struct console *, char *, unsigned);
+ struct tty_driver *(*device)(struct console *, int *);
+ void (*unblank)(void);
+@@ -155,7 +165,10 @@ struct console {
+ uint ispeed;
+ uint ospeed;
+ u64 seq;
+- unsigned long dropped;
++ atomic_long_t dropped;
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ struct console_atomic_data *atomic_data;
++#endif
+ struct task_struct *thread;
+
+ /*
+@@ -185,6 +198,7 @@ extern int console_set_on_cmdline;
+ extern struct console *early_console;
+
+ enum con_flush_mode {
++ CONSOLE_ATOMIC_FLUSH_PENDING,
+ CONSOLE_FLUSH_PENDING,
+ CONSOLE_REPLAY_ALL,
+ };
+--- a/init/Kconfig
++++ b/init/Kconfig
+@@ -1542,6 +1542,10 @@ config PRINTK
+ very difficult to diagnose system problems, saying N here is
+ strongly discouraged.
+
++config HAVE_ATOMIC_CONSOLE
++ bool
++ default n
++
+ config BUG
+ bool "BUG() support" if EXPERT
+ default y
+--- a/kernel/panic.c
++++ b/kernel/panic.c
+@@ -215,7 +215,6 @@ void panic(const char *fmt, ...)
+ panic_smp_self_stop();
+
+ console_verbose();
+- bust_spinlocks(1);
+ va_start(args, fmt);
+ len = vscnprintf(buf, sizeof(buf), fmt, args);
+ va_end(args);
+@@ -239,6 +238,11 @@ void panic(const char *fmt, ...)
+ */
+ kgdb_panic(buf);
+
++ /* Use atomic consoles to dump the kernel log. */
++ console_flush_on_panic(CONSOLE_ATOMIC_FLUSH_PENDING);
++
++ bust_spinlocks(1);
++
+ /*
+ * If we have crashed and we have a crash kernel loaded let it handle
+ * everything else.
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -44,6 +44,7 @@
+ #include <linux/irq_work.h>
+ #include <linux/ctype.h>
+ #include <linux/uio.h>
++#include <linux/clocksource.h>
+ #include <linux/sched/clock.h>
+ #include <linux/sched/debug.h>
+ #include <linux/sched/task_stack.h>
+@@ -1942,21 +1943,30 @@ static int console_trylock_spinning(void
+ * dropped, a dropped message will be written out first.
+ */
+ static void call_console_driver(struct console *con, const char *text, size_t len,
+- char *dropped_text)
++ char *dropped_text, bool atomic_printing)
+ {
++ unsigned long dropped = 0;
+ size_t dropped_len;
+
+ trace_console_rcuidle(text, len);
+
+- if (con->dropped && dropped_text) {
++ if (dropped_text)
++ dropped = atomic_long_xchg_relaxed(&con->dropped, 0);
++
++ if (dropped) {
+ dropped_len = snprintf(dropped_text, DROPPED_TEXT_MAX,
+ "** %lu printk messages dropped **\n",
+- con->dropped);
+- con->dropped = 0;
+- con->write(con, dropped_text, dropped_len);
++ dropped);
++ if (atomic_printing)
++ con->write_atomic(con, dropped_text, dropped_len);
++ else
++ con->write(con, dropped_text, dropped_len);
+ }
+
+- con->write(con, text, len);
++ if (atomic_printing)
++ con->write_atomic(con, text, len);
++ else
++ con->write(con, text, len);
+ }
+
+ /*
+@@ -2297,6 +2307,76 @@ asmlinkage __visible int _printk(const c
+ }
+ EXPORT_SYMBOL(_printk);
+
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++static void __free_atomic_data(struct console_atomic_data *d)
++{
++ kfree(d->text);
++ kfree(d->ext_text);
++ kfree(d->dropped_text);
++}
++
++static void free_atomic_data(struct console_atomic_data *d)
++{
++ int count = 1;
++ int i;
++
++ if (!d)
++ return;
++
++#ifdef CONFIG_HAVE_NMI
++ count = 2;
++#endif
++
++ for (i = 0; i < count; i++)
++ __free_atomic_data(&d[i]);
++ kfree(d);
++}
++
++static int __alloc_atomic_data(struct console_atomic_data *d, short flags)
++{
++ d->text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
++ if (!d->text)
++ return -1;
++
++ if (flags & CON_EXTENDED) {
++ d->ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
++ if (!d->ext_text)
++ return -1;
++ } else {
++ d->dropped_text = kmalloc(DROPPED_TEXT_MAX, GFP_KERNEL);
++ if (!d->dropped_text)
++ return -1;
++ }
++
++ return 0;
++}
++
++static struct console_atomic_data *alloc_atomic_data(short flags)
++{
++ struct console_atomic_data *d;
++ int count = 1;
++ int i;
++
++#ifdef CONFIG_HAVE_NMI
++ count = 2;
++#endif
++
++ d = kzalloc(sizeof(*d) * count, GFP_KERNEL);
++ if (!d)
++ goto err_out;
++
++ for (i = 0; i < count; i++) {
++ if (__alloc_atomic_data(&d[i], flags) != 0)
++ goto err_out;
++ }
++
++ return d;
++err_out:
++ free_atomic_data(d);
++ return NULL;
++}
++#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */
++
+ static void start_printk_kthread(struct console *con);
+
+ #else /* CONFIG_PRINTK */
+@@ -2309,6 +2389,8 @@ static void start_printk_kthread(struct
+ #define prb_first_valid_seq(rb) 0
+ #define prb_next_seq(rb) 0
+
++#define free_atomic_data(d)
++
+ static u64 syslog_seq;
+
+ static size_t record_print_text(const struct printk_record *r,
+@@ -2327,7 +2409,7 @@ static ssize_t msg_print_ext_body(char *
+ static void console_lock_spinning_enable(void) { }
+ static int console_lock_spinning_disable_and_check(void) { return 0; }
+ static void call_console_driver(struct console *con, const char *text, size_t len,
+- char *dropped_text) {}
++ char *dropped_text, bool atomic_printing) {}
+ static bool suppress_message_printing(int level) { return false; }
+ static void start_printk_kthread(struct console *con) {}
+
+@@ -2609,13 +2691,23 @@ EXPORT_SYMBOL(is_console_locked);
+ *
+ * Requires the console_lock.
+ */
+-static inline bool console_is_usable(struct console *con)
++static inline bool console_is_usable(struct console *con, bool atomic_printing)
+ {
+ if (!(con->flags & CON_ENABLED))
+ return false;
+
+- if (!con->write)
++ if (atomic_printing) {
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ if (!con->write_atomic)
++ return false;
++ if (!con->atomic_data)
++ return false;
++#else
++ return false;
++#endif
++ } else if (!con->write) {
+ return false;
++ }
+
+ /*
+ * Console drivers may assume that per-cpu resources have been
+@@ -2647,6 +2739,66 @@ static void __console_unlock(void)
+ up_console_sem();
+ }
+
++static u64 read_console_seq(struct console *con)
++{
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ unsigned long flags;
++ u64 seq2;
++ u64 seq;
++
++ if (!con->atomic_data)
++ return con->seq;
++
++ printk_cpu_sync_get_irqsave(flags);
++
++ seq = con->seq;
++ seq2 = con->atomic_data[0].seq;
++ if (seq2 > seq)
++ seq = seq2;
++#ifdef CONFIG_HAVE_NMI
++ seq2 = con->atomic_data[1].seq;
++ if (seq2 > seq)
++ seq = seq2;
++#endif
++
++ printk_cpu_sync_put_irqrestore(flags);
++
++ return seq;
++#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
++ return con->seq;
++#endif
++}
++
++static void write_console_seq(struct console *con, u64 val, bool atomic_printing)
++{
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ unsigned long flags;
++ u64 *seq;
++
++ if (!con->atomic_data) {
++ con->seq = val;
++ return;
++ }
++
++ printk_cpu_sync_get_irqsave(flags);
++
++ if (atomic_printing) {
++ seq = &con->atomic_data[0].seq;
++#ifdef CONFIG_HAVE_NMI
++ if (in_nmi())
++ seq = &con->atomic_data[1].seq;
++#endif
++ } else {
++ seq = &con->seq;
++ }
++ *seq = val;
++
++ printk_cpu_sync_put_irqrestore(flags);
++#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
++ con->seq = val;
++#endif
++}
++
+ /*
+ * Print one record for the given console. The record printed is whatever
+ * record is the next available record for the given console.
+@@ -2659,6 +2811,8 @@ static void __console_unlock(void)
+ * If dropped messages should be printed, @dropped_text is a buffer of size
+ * DROPPED_TEXT_MAX. Otherise @dropped_text must be NULL.
+ *
++ * @atomic_printing specifies if atomic printing should be used.
++ *
+ * Requires the console_lock.
+ *
+ * Returns false if the given console has no next record to print, otherwise
+@@ -2670,7 +2824,8 @@ static void __console_unlock(void)
+ * the console_lock to be taken over by a printk waiter.
+ */
+ static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
+- char *dropped_text, bool *handover)
++ char *dropped_text, bool atomic_printing,
++ bool *handover)
+ {
+ struct printk_info info;
+ struct printk_record r;
+@@ -2678,23 +2833,27 @@ static bool console_emit_next_record(str
+ bool allow_handover;
+ char *write_text;
+ size_t len;
++ u64 seq;
+
+ prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
+
+ if (handover)
+ *handover = false;
+
+- if (!prb_read_valid(prb, con->seq, &r))
++ seq = read_console_seq(con);
++
++ if (!prb_read_valid(prb, seq, &r))
+ return false;
+
+- if (con->seq != r.info->seq) {
+- con->dropped += r.info->seq - con->seq;
+- con->seq = r.info->seq;
++ if (seq != r.info->seq) {
++ atomic_long_add((unsigned long)(r.info->seq - seq), &con->dropped);
++ write_console_seq(con, r.info->seq, atomic_printing);
++ seq = r.info->seq;
+ }
+
+ /* Skip record that has level above the console loglevel. */
+ if (suppress_message_printing(r.info->level)) {
+- con->seq++;
++ write_console_seq(con, seq + 1, atomic_printing);
+ goto skip;
+ }
+
+@@ -2727,10 +2886,10 @@ static bool console_emit_next_record(str
+ }
+
+ stop_critical_timings(); /* don't trace print latency */
+- call_console_driver(con, write_text, len, dropped_text);
++ call_console_driver(con, write_text, len, dropped_text, atomic_printing);
+ start_critical_timings();
+
+- con->seq++;
++ write_console_seq(con, seq + 1, atomic_printing);
+
+ if (allow_handover) {
+ *handover = console_lock_spinning_disable_and_check();
+@@ -2779,7 +2938,7 @@ static bool console_flush_all(bool do_co
+ for_each_console(con) {
+ bool progress;
+
+- if (!console_is_usable(con))
++ if (!console_is_usable(con, false))
+ continue;
+ if ((con->flags & CON_MIGHT_SLEEP) && !do_cond_resched)
+ continue;
+@@ -2789,11 +2948,11 @@ static bool console_flush_all(bool do_co
+ /* Extended consoles do not print "dropped messages". */
+ progress = console_emit_next_record(con, &text[0],
+ &ext_text[0], NULL,
+- handover);
++ false, handover);
+ } else {
+ progress = console_emit_next_record(con, &text[0],
+ NULL, &dropped_text[0],
+- handover);
++ false, handover);
+ }
+ if (*handover)
+ return true;
+@@ -2814,6 +2973,67 @@ static bool console_flush_all(bool do_co
+ return any_usable;
+ }
+
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++static void atomic_console_flush_all(void)
++{
++ bool any_usable = false;
++ unsigned long flags;
++ struct console *con;
++ bool any_progress;
++ int index = 0;
++
++ if (console_suspended)
++ return;
++
++#ifdef CONFIG_HAVE_NMI
++ if (in_nmi())
++ index = 1;
++#endif
++
++ printk_cpu_sync_get_irqsave(flags);
++
++ do {
++ any_progress = false;
++
++ for_each_console(con) {
++ bool progress;
++
++ if (!console_is_usable(con, true))
++ continue;
++ any_usable = true;
++
++ if (con->flags & CON_EXTENDED) {
++ /* Extended consoles do not print "dropped messages". */
++ progress = console_emit_next_record(con,
++ &con->atomic_data->text[index],
++ &con->atomic_data->ext_text[index],
++ NULL,
++ true, NULL);
++ } else {
++ progress = console_emit_next_record(con,
++ &con->atomic_data->text[index],
++ NULL,
++ &con->atomic_data->dropped_text[index],
++ true, NULL);
++ }
++
++ if (!progress)
++ continue;
++ any_progress = true;
++
++ touch_softlockup_watchdog_sync();
++ clocksource_touch_watchdog();
++ rcu_cpu_stall_reset();
++ touch_nmi_watchdog();
++ }
++ } while (any_progress);
++
++ printk_cpu_sync_put_irqrestore(flags);
++}
++#else /* CONFIG_HAVE_ATOMIC_CONSOLE */
++#define atomic_console_flush_all()
++#endif
++
+ /**
+ * console_unlock - unlock the console system
+ *
+@@ -2930,6 +3150,11 @@ void console_unblank(void)
+ */
+ void console_flush_on_panic(enum con_flush_mode mode)
+ {
++ if (mode == CONSOLE_ATOMIC_FLUSH_PENDING) {
++ atomic_console_flush_all();
++ return;
++ }
++
+ /*
+ * If someone else is holding the console lock, trylock will fail
+ * and may_schedule may be set. Ignore and proceed to unlock so
+@@ -2946,7 +3171,7 @@ void console_flush_on_panic(enum con_flu
+
+ seq = prb_first_valid_seq(prb);
+ for_each_console(c)
+- c->seq = seq;
++ write_console_seq(c, seq, false);
+ }
+ console_unlock();
+ }
+@@ -3174,16 +3399,19 @@ void register_console(struct console *ne
+ if (consoles_paused)
+ newcon->flags |= CON_PAUSED;
+
+- newcon->dropped = 0;
++ atomic_long_set(&newcon->dropped, 0);
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ newcon->atomic_data = NULL;
++#endif
+ mutex_init(&newcon->lock);
+ if (newcon->flags & CON_PRINTBUFFER) {
+ /* Get a consistent copy of @syslog_seq. */
+ mutex_lock(&syslog_lock);
+- newcon->seq = syslog_seq;
++ write_console_seq(newcon, syslog_seq, false);
+ mutex_unlock(&syslog_lock);
+ } else {
+ /* Begin with next message. */
+- newcon->seq = prb_next_seq(prb);
++ write_console_seq(newcon, prb_next_seq(prb), false);
+ }
+ if (kthreads_started)
+ start_printk_kthread(newcon);
+@@ -3265,6 +3493,10 @@ int unregister_console(struct console *c
+ console_unlock();
+ console_sysfs_notify();
+
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ free_atomic_data(console->atomic_data);
++#endif
++
+ if (console->exit)
+ res = console->exit(console);
+
+@@ -3398,7 +3630,7 @@ bool pr_flush(int timeout_ms, bool reset
+
+ console_lock();
+ for_each_console(con) {
+- if (!console_is_usable(con))
++ if (!console_is_usable(con, false))
+ continue;
+ printk_seq = con->seq;
+ if (printk_seq < seq)
+@@ -3466,6 +3698,11 @@ static int printk_kthread_func(void *dat
+ (con->flags & CON_BOOT) ? "boot" : "",
+ con->name, con->index);
+
++#ifdef CONFIG_HAVE_ATOMIC_CONSOLE
++ if (con->write_atomic)
++ con->atomic_data = alloc_atomic_data(con->flags);
++#endif
++
+ text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
+ if (!text)
+ goto out;
+@@ -3494,7 +3731,7 @@ static int printk_kthread_func(void *dat
+ if (error)
+ break;
+
+- if (!console_is_usable(con)) {
++ if (!console_is_usable(con, false)) {
+ mutex_unlock(&con->lock);
+ break;
+ }
+@@ -3514,7 +3751,7 @@ static int printk_kthread_func(void *dat
+ */
+ console_may_schedule = 0;
+ progress = console_emit_next_record(con, text, ext_text,
+- dropped_text, NULL);
++ dropped_text, false, NULL);
+
+ seq = con->seq;
+
diff --git a/patches/serial__8250__implement_write_atomic.patch b/patches/0013-serial-8250-implement-write_atomic.patch
index b08a974b1ef9..11e77e5dd488 100644
--- a/patches/serial__8250__implement_write_atomic.patch
+++ b/patches/0013-serial-8250-implement-write_atomic.patch
@@ -1,22 +1,19 @@
-Subject: serial: 8250: implement write_atomic
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:02 2020 +0106
-
From: John Ogness <john.ogness@linutronix.de>
+Date: Mon, 30 Nov 2020 01:42:02 +0106
+Subject: [PATCH 13/14] serial: 8250: implement write_atomic
Implement a non-sleeping NMI-safe write_atomic() console function in
-order to support emergency console printing.
+order to support atomic console printing during a panic.
Since interrupts need to be disabled during transmit, all usage of
the IER register is wrapped with access functions that use the
-console_atomic_lock() function to synchronize register access while
-tracking the state of the interrupts. This is necessary because
+printk_cpu_sync_get_irqsave() function to synchronize register access
+while tracking the state of the interrupts. This is necessary because
write_atomic() can be called from an NMI context that has preempted
write_atomic().
Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/tty/serial/8250/8250.h | 47 ++++++++++++++++
drivers/tty/serial/8250/8250_core.c | 17 ++++--
@@ -24,9 +21,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/tty/serial/8250/8250_ingenic.c | 7 ++
drivers/tty/serial/8250/8250_mtk.c | 29 +++++++++-
drivers/tty/serial/8250/8250_port.c | 92 ++++++++++++++++++++-------------
+ drivers/tty/serial/8250/Kconfig | 1
include/linux/serial_8250.h | 5 +
- 7 files changed, 162 insertions(+), 44 deletions(-)
----
+ 8 files changed, 163 insertions(+), 44 deletions(-)
+
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
@@ -132,12 +132,55 @@ static inline void serial_dl_write(struc
@@ -43,12 +41,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ is_console = uart_console(port);
+
+ if (is_console)
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+
+ serial_out(up, UART_IER, ier);
+
+ if (is_console)
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+}
+
+static inline unsigned char serial8250_clear_IER(struct uart_8250_port *up)
@@ -65,13 +63,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ clearval = UART_IER_UUE;
+
+ if (is_console)
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+
+ prior = serial_port_in(port, UART_IER);
+ serial_port_out(port, UART_IER, clearval);
+
+ if (is_console)
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+
+ return prior;
+}
@@ -155,10 +153,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ is_console = uart_console(port);
+
+ if (is_console)
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
up->ier = port->serial_in(port, UART_IER);
+ if (is_console)
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+
if (up->ier & (UART_IER_RLSI | UART_IER_RDI)) {
port->ops->stop_rx(port);
@@ -180,10 +178,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
+ is_console = uart_console(p);
+ if (is_console)
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
ier = p->serial_in(p, UART_IER);
+ if (is_console)
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
if (ier & UART_IER_MSI)
value |= UART_MCR_MDCE | UART_MCR_FCM;
@@ -202,13 +200,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ is_console = uart_console(port);
+
+ if (is_console)
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, ier & (~mask));
+
+ if (is_console)
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
}
static void mtk8250_enable_intrs(struct uart_8250_port *up, int mask)
@@ -219,13 +217,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned int ier;
+
+ if (uart_console(port))
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+
+ ier = serial_in(up, UART_IER);
+ serial_out(up, UART_IER, ier | mask);
+
+ if (uart_console(port))
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
}
static void mtk8250_set_flow_ctrl(struct uart_8250_port *up, int mode)
@@ -339,9 +337,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+ wait_for_xmitr(up, UART_LSR_THRE);
+
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+ serial8250_console_putchar_locked(port, ch);
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+}
+
/*
@@ -358,7 +356,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned long flags;
+ unsigned int ier;
+
-+ console_atomic_lock(flags);
++ printk_cpu_sync_get_irqsave(flags);
+
+ touch_nmi_watchdog();
+
@@ -374,7 +372,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ wait_for_xmitr(up, BOTH_EMPTY);
+ serial8250_set_IER(up, ier);
+
-+ console_atomic_unlock(flags);
++ printk_cpu_sync_put_irqrestore(flags);
+}
+
/*
@@ -454,6 +452,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (options)
uart_parse_options(options, &baud, &parity, &bits, &flow);
else if (probe)
+--- a/drivers/tty/serial/8250/Kconfig
++++ b/drivers/tty/serial/8250/Kconfig
+@@ -9,6 +9,7 @@ config SERIAL_8250
+ depends on !S390
+ select SERIAL_CORE
+ select SERIAL_MCTRL_GPIO if GPIOLIB
++ select HAVE_ATOMIC_CONSOLE
+ help
+ This selects whether you want to include the driver for the standard
+ serial ports. The standard answer is Y. People who might say N
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -7,6 +7,7 @@
diff --git a/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
new file mode 100644
index 000000000000..a74f3e1f6821
--- /dev/null
+++ b/patches/0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
@@ -0,0 +1,103 @@
+From: John Ogness <john.ogness@linutronix.de>
+Date: Thu, 20 Jan 2022 16:53:56 +0106
+Subject: [PATCH 14/14] printk: avoid preempt_disable() for PREEMPT_RT
+
+During non-normal operation, printk() calls will attempt to
+write the messages directly to the consoles. This involves
+using console_trylock() to acquire @console_sem.
+
+Since commit fd5f7cde1b85 ("printk: Never set
+console_may_schedule in console_trylock()"), preemption is
+disabled while directly printing to the consoles in order to
+ensure that the printing task is not scheduled away while
+holding @console_sem.
+
+On PREEMPT_RT systems, disabling preemption here is not allowed
+because console drivers will acquire spin locks (which under
+PREEMPT_RT is an rtmutex).
+
+For normal operation, direct printing is not used. In a panic
+scenario, atomic consoles and spinlock busting are used to
+handle direct printing. So the usefulness of disabling
+preemption here is really restricted to early boot.
+
+For PREEMPT_RT systems, do not disable preemption during direct
+console printing. This also means that console handovers cannot
+take place. Console handovers are also something that is really
+restricted to early boot.
+
+Signed-off-by: John Ogness <john.ogness@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/printk/printk.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1873,6 +1873,7 @@ static int console_lock_spinning_disable
+ return 1;
+ }
+
++#if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ /**
+ * console_trylock_spinning - try to get console_lock by busy waiting
+ *
+@@ -1936,6 +1937,7 @@ static int console_trylock_spinning(void
+
+ return 1;
+ }
++#endif /* CONFIG_PREEMPT_RT */
+
+ /*
+ * Call the specified console driver, asking it to write out the specified
+@@ -2268,19 +2270,31 @@ asmlinkage int vprintk_emit(int facility
+ /* If called from the scheduler, we can not call up(). */
+ if (!in_sched && !kthread_printers_active()) {
+ /*
++ * Try to acquire and then immediately release the console
++ * semaphore. The release will print out buffers.
++ */
++#if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ /*
++ * Use the non-spinning trylock since PREEMPT_RT does not
++ * support console lock handovers.
++ *
++ * Direct printing will most likely involve taking spinlocks.
++ * For PREEMPT_RT, this is only allowed if in a preemptible
++ * context.
++ */
++ if (preemptible() && console_trylock())
++ console_unlock();
++#else
++ /*
+ * Disable preemption to avoid being preempted while holding
+ * console_sem which would prevent anyone from printing to
+ * console
+ */
+ preempt_disable();
+- /*
+- * Try to acquire and then immediately release the console
+- * semaphore. The release will print out buffers and wake up
+- * /dev/kmsg and syslog() users.
+- */
+ if (console_trylock_spinning())
+ console_unlock();
+ preempt_enable();
++#endif
+ }
+
+ wake_up_klogd();
+@@ -2867,8 +2881,13 @@ static bool console_emit_next_record(str
+ len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
+ }
+
++#if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ /* PREEMPT_RT does not support console lock handovers. */
++ allow_handover = false;
++#else
+ /* Handovers may only happen between trylock contexts. */
+ allow_handover = (handover && atomic_read(&console_lock_count) == -1);
++#endif
+
+ if (allow_handover) {
+ /*
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 5da6b6218f1c..c0ab4419d1ec 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt18
++-rt19
diff --git a/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch b/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch
new file mode 100644
index 000000000000..c4ebc27f8a1c
--- /dev/null
+++ b/patches/Use-generic_handle_irq_safe-where-it-makes-sense.patch
@@ -0,0 +1,131 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 19 Jan 2022 16:10:39 +0100
+Subject: [PATCH] *: Use generic_handle_irq_safe() where it makes sense.
+
+The i2c-i801 driver invokes i2c_handle_smbus_host_notify() from his
+interrupt service routine. On PREEMPT_RT i2c-i801's handler is forced
+threaded with enabled interrupts which leads to a warning by
+handle_irq_event_percpu() assuming that irq_default_primary_handler()
+enabled interrupts.
+
+i2c-i801's interrupt handler can't be made non-threaded because the
+interrupt line is shared with other devices.
+All i2c host driver's interrupt handler are (force-)threaded on
+PREEMPT_RT.
+
+Use generic_handle_irq_safe() in forced-threaded IRQ-handlers and where
+interrupts are disabled prio invoking generic_handle_irq().
+
+Reported-by: Michael Below <below@judiz.de>
+Link: https://bugs.debian.org/1002537
+Cc: Salvatore Bonaccorso <carnil@debian.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/i2c/busses/i2c-cht-wc.c | 11 ++---------
+ drivers/i2c/i2c-core-base.c | 2 +-
+ drivers/mfd/ezx-pcap.c | 4 +---
+ drivers/misc/hi6421v600-irq.c | 6 +++---
+ drivers/net/usb/lan78xx.c | 7 ++-----
+ drivers/staging/greybus/gpio.c | 5 +----
+ 6 files changed, 10 insertions(+), 25 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-cht-wc.c
++++ b/drivers/i2c/busses/i2c-cht-wc.c
+@@ -99,15 +99,8 @@ static irqreturn_t cht_wc_i2c_adap_threa
+ * interrupt handler as well, so running the client irq handler from
+ * this thread will cause things to lock up.
+ */
+- if (reg & CHT_WC_EXTCHGRIRQ_CLIENT_IRQ) {
+- /*
+- * generic_handle_irq expects local IRQs to be disabled
+- * as normally it is called from interrupt context.
+- */
+- local_irq_disable();
+- generic_handle_irq(adap->client_irq);
+- local_irq_enable();
+- }
++ if (reg & CHT_WC_EXTCHGRIRQ_CLIENT_IRQ)
++ generic_handle_irq_safe(adap->client_irq);
+
+ return IRQ_HANDLED;
+ }
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1423,7 +1423,7 @@ int i2c_handle_smbus_host_notify(struct
+ if (irq <= 0)
+ return -ENXIO;
+
+- generic_handle_irq(irq);
++ generic_handle_irq_safe(irq);
+
+ return 0;
+ }
+--- a/drivers/mfd/ezx-pcap.c
++++ b/drivers/mfd/ezx-pcap.c
+@@ -193,13 +193,11 @@ static void pcap_isr_work(struct work_st
+ ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
+ ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
+
+- local_irq_disable();
+ service = isr & ~msr;
+ for (irq = pcap->irq_base; service; service >>= 1, irq++) {
+ if (service & 1)
+- generic_handle_irq(irq);
++ generic_handle_irq_safe(irq);
+ }
+- local_irq_enable();
+ ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
+ } while (gpio_get_value(pdata->gpio));
+ }
+--- a/drivers/misc/hi6421v600-irq.c
++++ b/drivers/misc/hi6421v600-irq.c
+@@ -117,8 +117,8 @@ static irqreturn_t hi6421v600_irq_handle
+ * If both powerkey down and up IRQs are received,
+ * handle them at the right order
+ */
+- generic_handle_irq(priv->irqs[POWERKEY_DOWN]);
+- generic_handle_irq(priv->irqs[POWERKEY_UP]);
++ generic_handle_irq_safe(priv->irqs[POWERKEY_DOWN]);
++ generic_handle_irq_safe(priv->irqs[POWERKEY_UP]);
+ pending &= ~HISI_IRQ_POWERKEY_UP_DOWN;
+ }
+
+@@ -126,7 +126,7 @@ static irqreturn_t hi6421v600_irq_handle
+ continue;
+
+ for_each_set_bit(offset, &pending, BITS_PER_BYTE) {
+- generic_handle_irq(priv->irqs[offset + i * BITS_PER_BYTE]);
++ generic_handle_irq_safe(priv->irqs[offset + i * BITS_PER_BYTE]);
+ }
+ }
+
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -1367,11 +1367,8 @@ static void lan78xx_status(struct lan78x
+ netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
+ lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
+
+- if (dev->domain_data.phyirq > 0) {
+- local_irq_disable();
+- generic_handle_irq(dev->domain_data.phyirq);
+- local_irq_enable();
+- }
++ if (dev->domain_data.phyirq > 0)
++ generic_handle_irq_safe(dev->domain_data.phyirq);
+ } else {
+ netdev_warn(dev->net,
+ "unexpected interrupt: 0x%08x\n", intdata);
+--- a/drivers/staging/greybus/gpio.c
++++ b/drivers/staging/greybus/gpio.c
+@@ -391,10 +391,7 @@ static int gb_gpio_request_handler(struc
+ return -EINVAL;
+ }
+
+- local_irq_disable();
+- ret = generic_handle_irq(irq);
+- local_irq_enable();
+-
++ ret = generic_handle_irq_safe(irq);
+ if (ret)
+ dev_err(dev, "failed to invoke irq handler\n");
+
diff --git a/patches/console__add_write_atomic_interface.patch b/patches/console__add_write_atomic_interface.patch
deleted file mode 100644
index ec5663190789..000000000000
--- a/patches/console__add_write_atomic_interface.patch
+++ /dev/null
@@ -1,314 +0,0 @@
-Subject: console: add write_atomic interface
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:01 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Add a write_atomic() callback to the console. This is an optional
-function for console drivers. The function must be atomic (including
-NMI safe) for writing to the console.
-
-Console drivers must still implement the write() callback. The
-write_atomic() callback will only be used in special situations,
-such as when the kernel panics.
-
-Creating an NMI safe write_atomic() that must synchronize with
-write() requires a careful implementation of the console driver. To
-aid with the implementation, a set of console_atomic_*() functions
-are provided:
-
- void console_atomic_lock(unsigned long flags);
- void console_atomic_unlock(unsigned long flags);
-
-These functions synchronize using the printk cpulock and disable
-hardware interrupts.
-
-kgdb makes use of its own cpulock (@dbg_master_lock, @kgdb_active)
-during cpu roundup. This will conflict with the printk cpulock.
-Therefore, a CPU must ensure that it is not holding the printk
-cpulock when calling kgdb_cpu_enter(). If it is, it must allow its
-printk context to complete first.
-
-A new helper function kgdb_roundup_delay() is introduced for kgdb
-to determine if it is holding the printk cpulock. If so, a flag is
-set so that when the printk cpulock is released, kgdb will be
-re-triggered for that CPU.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- arch/powerpc/include/asm/smp.h | 1
- arch/powerpc/kernel/kgdb.c | 10 ++++++++-
- arch/powerpc/kernel/smp.c | 5 ++++
- arch/x86/kernel/kgdb.c | 9 +++++---
- include/linux/console.h | 1
- include/linux/kgdb.h | 3 ++
- include/linux/printk.h | 23 ++++++++++++++++++++
- kernel/debug/debug_core.c | 45 +++++++++++++++++++++++------------------
- kernel/printk/printk.c | 26 +++++++++++++++++++++++
- 9 files changed, 100 insertions(+), 23 deletions(-)
----
---- a/arch/powerpc/include/asm/smp.h
-+++ b/arch/powerpc/include/asm/smp.h
-@@ -62,6 +62,7 @@ struct smp_ops_t {
-
- extern int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
- extern int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us);
-+extern void smp_send_debugger_break_cpu(unsigned int cpu);
- extern void smp_send_debugger_break(void);
- extern void start_secondary_resume(void);
- extern void smp_generic_give_timebase(void);
---- a/arch/powerpc/kernel/kgdb.c
-+++ b/arch/powerpc/kernel/kgdb.c
-@@ -120,11 +120,19 @@ int kgdb_skipexception(int exception, st
-
- static int kgdb_debugger_ipi(struct pt_regs *regs)
- {
-- kgdb_nmicallback(raw_smp_processor_id(), regs);
-+ int cpu = raw_smp_processor_id();
-+
-+ if (!kgdb_roundup_delay(cpu))
-+ kgdb_nmicallback(cpu, regs);
- return 0;
- }
-
- #ifdef CONFIG_SMP
-+void kgdb_roundup_cpu(unsigned int cpu)
-+{
-+ smp_send_debugger_break_cpu(cpu);
-+}
-+
- void kgdb_roundup_cpus(void)
- {
- smp_send_debugger_break();
---- a/arch/powerpc/kernel/smp.c
-+++ b/arch/powerpc/kernel/smp.c
-@@ -589,6 +589,11 @@ static void debugger_ipi_callback(struct
- debugger_ipi(regs);
- }
-
-+void smp_send_debugger_break_cpu(unsigned int cpu)
-+{
-+ smp_send_nmi_ipi(cpu, debugger_ipi_callback, 1000000);
-+}
-+
- void smp_send_debugger_break(void)
- {
- smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000);
---- a/arch/x86/kernel/kgdb.c
-+++ b/arch/x86/kernel/kgdb.c
-@@ -502,9 +502,12 @@ static int kgdb_nmi_handler(unsigned int
- if (atomic_read(&kgdb_active) != -1) {
- /* KGDB CPU roundup */
- cpu = raw_smp_processor_id();
-- kgdb_nmicallback(cpu, regs);
-- set_bit(cpu, was_in_debug_nmi);
-- touch_nmi_watchdog();
-+
-+ if (!kgdb_roundup_delay(cpu)) {
-+ kgdb_nmicallback(cpu, regs);
-+ set_bit(cpu, was_in_debug_nmi);
-+ touch_nmi_watchdog();
-+ }
-
- return NMI_HANDLED;
- }
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -140,6 +140,7 @@ static inline int con_debug_leave(void)
- struct console {
- char name[16];
- void (*write)(struct console *, const char *, unsigned);
-+ void (*write_atomic)(struct console *co, const char *s, unsigned int count);
- int (*read)(struct console *, char *, unsigned);
- struct tty_driver *(*device)(struct console *, int *);
- void (*unblank)(void);
---- a/include/linux/kgdb.h
-+++ b/include/linux/kgdb.h
-@@ -212,6 +212,8 @@ extern void kgdb_call_nmi_hook(void *ign
- */
- extern void kgdb_roundup_cpus(void);
-
-+extern void kgdb_roundup_cpu(unsigned int cpu);
-+
- /**
- * kgdb_arch_set_pc - Generic call back to the program counter
- * @regs: Current &struct pt_regs.
-@@ -365,5 +367,6 @@ extern void kgdb_free_init_mem(void);
- #define dbg_late_init()
- static inline void kgdb_panic(const char *msg) {}
- static inline void kgdb_free_init_mem(void) { }
-+static inline void kgdb_roundup_cpu(unsigned int cpu) {}
- #endif /* ! CONFIG_KGDB */
- #endif /* _KGDB_H_ */
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -284,10 +284,18 @@ static inline void printk_trigger_flush(
- extern int __printk_cpu_trylock(void);
- extern void __printk_wait_on_cpu_lock(void);
- extern void __printk_cpu_unlock(void);
-+extern bool kgdb_roundup_delay(unsigned int cpu);
-+
- #else
-+
- #define __printk_cpu_trylock() 1
- #define __printk_wait_on_cpu_lock()
- #define __printk_cpu_unlock()
-+
-+static inline bool kgdb_roundup_delay(unsigned int cpu)
-+{
-+ return false;
-+}
- #endif /* CONFIG_SMP */
-
- /**
-@@ -319,6 +327,21 @@ extern void __printk_cpu_unlock(void);
- local_irq_restore(flags); \
- } while (0)
-
-+/*
-+ * Used to synchronize atomic consoles.
-+ *
-+ * The same as raw_printk_cpu_lock_irqsave() except that hardware interrupts
-+ * are _not_ restored while spinning.
-+ */
-+#define console_atomic_lock(flags) \
-+ do { \
-+ local_irq_save(flags); \
-+ while (!__printk_cpu_trylock()) \
-+ cpu_relax(); \
-+ } while (0)
-+
-+#define console_atomic_unlock raw_printk_cpu_unlock_irqrestore
-+
- extern int kptr_restrict;
-
- /**
---- a/kernel/debug/debug_core.c
-+++ b/kernel/debug/debug_core.c
-@@ -238,35 +238,42 @@ NOKPROBE_SYMBOL(kgdb_call_nmi_hook);
- static DEFINE_PER_CPU(call_single_data_t, kgdb_roundup_csd) =
- CSD_INIT(kgdb_call_nmi_hook, NULL);
-
--void __weak kgdb_roundup_cpus(void)
-+void __weak kgdb_roundup_cpu(unsigned int cpu)
- {
- call_single_data_t *csd;
-+ int ret;
-+
-+ csd = &per_cpu(kgdb_roundup_csd, cpu);
-+
-+ /*
-+ * If it didn't round up last time, don't try again
-+ * since smp_call_function_single_async() will block.
-+ *
-+ * If rounding_up is false then we know that the
-+ * previous call must have at least started and that
-+ * means smp_call_function_single_async() won't block.
-+ */
-+ if (kgdb_info[cpu].rounding_up)
-+ return;
-+ kgdb_info[cpu].rounding_up = true;
-+
-+ ret = smp_call_function_single_async(cpu, csd);
-+ if (ret)
-+ kgdb_info[cpu].rounding_up = false;
-+}
-+NOKPROBE_SYMBOL(kgdb_roundup_cpu);
-+
-+void __weak kgdb_roundup_cpus(void)
-+{
- int this_cpu = raw_smp_processor_id();
- int cpu;
-- int ret;
-
- for_each_online_cpu(cpu) {
- /* No need to roundup ourselves */
- if (cpu == this_cpu)
- continue;
-
-- csd = &per_cpu(kgdb_roundup_csd, cpu);
--
-- /*
-- * If it didn't round up last time, don't try again
-- * since smp_call_function_single_async() will block.
-- *
-- * If rounding_up is false then we know that the
-- * previous call must have at least started and that
-- * means smp_call_function_single_async() won't block.
-- */
-- if (kgdb_info[cpu].rounding_up)
-- continue;
-- kgdb_info[cpu].rounding_up = true;
--
-- ret = smp_call_function_single_async(cpu, csd);
-- if (ret)
-- kgdb_info[cpu].rounding_up = false;
-+ kgdb_roundup_cpu(cpu);
- }
- }
- NOKPROBE_SYMBOL(kgdb_roundup_cpus);
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -44,6 +44,7 @@
- #include <linux/irq_work.h>
- #include <linux/ctype.h>
- #include <linux/uio.h>
-+#include <linux/kgdb.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/debug.h>
- #include <linux/sched/task_stack.h>
-@@ -3588,6 +3589,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
- #ifdef CONFIG_SMP
- static atomic_t printk_cpulock_owner = ATOMIC_INIT(-1);
- static atomic_t printk_cpulock_nested = ATOMIC_INIT(0);
-+static unsigned int kgdb_cpu = -1;
-
- /**
- * __printk_wait_on_cpu_lock() - Busy wait until the printk cpu-reentrant
-@@ -3667,6 +3669,9 @@ EXPORT_SYMBOL(__printk_cpu_trylock);
- */
- void __printk_cpu_unlock(void)
- {
-+ bool trigger_kgdb = false;
-+ unsigned int cpu;
-+
- if (atomic_read(&printk_cpulock_nested)) {
- atomic_dec(&printk_cpulock_nested);
- return;
-@@ -3677,6 +3682,12 @@ void __printk_cpu_unlock(void)
- * LMM(__printk_cpu_unlock:A)
- */
-
-+ cpu = smp_processor_id();
-+ if (kgdb_cpu == cpu) {
-+ trigger_kgdb = true;
-+ kgdb_cpu = -1;
-+ }
-+
- /*
- * Guarantee loads and stores from this CPU when it was the
- * lock owner are visible to the next lock owner. This pairs
-@@ -3697,6 +3708,21 @@ void __printk_cpu_unlock(void)
- */
- atomic_set_release(&printk_cpulock_owner,
- -1); /* LMM(__printk_cpu_unlock:B) */
-+
-+ if (trigger_kgdb) {
-+ pr_warn("re-triggering kgdb roundup for CPU#%d\n", cpu);
-+ kgdb_roundup_cpu(cpu);
-+ }
- }
- EXPORT_SYMBOL(__printk_cpu_unlock);
-+
-+bool kgdb_roundup_delay(unsigned int cpu)
-+{
-+ if (cpu != atomic_read(&printk_cpulock_owner))
-+ return false;
-+
-+ kgdb_cpu = cpu;
-+ return true;
-+}
-+EXPORT_SYMBOL(kgdb_roundup_delay);
- #endif /* CONFIG_SMP */
diff --git a/patches/genirq-Provide-generic_handle_irq_safe.patch b/patches/genirq-Provide-generic_handle_irq_safe.patch
new file mode 100644
index 000000000000..7865b6bf928a
--- /dev/null
+++ b/patches/genirq-Provide-generic_handle_irq_safe.patch
@@ -0,0 +1,54 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 21 Jan 2022 18:08:11 +0100
+Subject: [PATCH] genirq: Provide generic_handle_irq_safe().
+
+Provide generic_handle_irq_safe() which can be used can used from any
+context.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/irqdesc.h | 1 +
+ kernel/irq/irqdesc.c | 21 +++++++++++++++++++++
+ 2 files changed, 22 insertions(+)
+
+--- a/include/linux/irqdesc.h
++++ b/include/linux/irqdesc.h
+@@ -160,6 +160,7 @@ static inline void generic_handle_irq_de
+
+ int handle_irq_desc(struct irq_desc *desc);
+ int generic_handle_irq(unsigned int irq);
++int generic_handle_irq_safe(unsigned int irq);
+
+ #ifdef CONFIG_IRQ_DOMAIN
+ /*
+--- a/kernel/irq/irqdesc.c
++++ b/kernel/irq/irqdesc.c
+@@ -662,6 +662,27 @@ int generic_handle_irq(unsigned int irq)
+ }
+ EXPORT_SYMBOL_GPL(generic_handle_irq);
+
++/**
++ * generic_handle_irq_safe - Invoke the handler for a particular irq
++ * @irq: The irq number to handle
++ *
++ * Returns: 0 on success, or -EINVAL if conversion has failed
++ *
++ * This function must be called either from an IRQ context with irq regs
++ * initialized or with care from any context.
++ */
++int generic_handle_irq_safe(unsigned int irq)
++{
++ unsigned long flags;
++ int ret;
++
++ local_irq_save(flags);
++ ret = handle_irq_desc(irq_to_desc(irq));
++ local_irq_restore(flags);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(generic_handle_irq_safe);
++
+ #ifdef CONFIG_IRQ_DOMAIN
+ /**
+ * generic_handle_domain_irq - Invoke the handler for a HW irq belonging
diff --git a/patches/i2c-core-Let-i2c_handle_smbus_host_notify-use-handle.patch b/patches/i2c-core-Let-i2c_handle_smbus_host_notify-use-handle.patch
deleted file mode 100644
index 719ffc5da81d..000000000000
--- a/patches/i2c-core-Let-i2c_handle_smbus_host_notify-use-handle.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 19 Jan 2022 16:10:39 +0100
-Subject: [PATCH] i2c: core: Let i2c_handle_smbus_host_notify() use
- handle_nested_irq() on PREEMPT_RT.
-
-The i2c-i801 driver invokes i2c_handle_smbus_host_notify() from his
-interrupt service routine. On PREEMPT_RT i2c-i801's handler is forced
-threaded with enabled interrupts which leads to a warning by
-handle_irq_event_percpu() assuming that irq_default_primary_handler()
-enabled interrupts.
-
-i2c-i801's interrupt handler can't be made non-threaded because the
-interrupt line is shared with other devices.
-All i2c host driver's interrupt handler are (force-)threaded on
-PREEMPT_RT.
-
-Handle the IRQs by invoking handle_nested_irq() on PREEMPT_RT.
-
-Reported-by: Michael Below <below@judiz.de>
-Link: https://bugs.debian.org/1002537
-Cc: Salvatore Bonaccorso <carnil@debian.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/i2c/i2c-core-base.c | 5 ++++-
- 1 file changed, 4 insertions(+), 1 deletion(-)
-
---- a/drivers/i2c/i2c-core-base.c
-+++ b/drivers/i2c/i2c-core-base.c
-@@ -1423,7 +1423,10 @@ int i2c_handle_smbus_host_notify(struct
- if (irq <= 0)
- return -ENXIO;
-
-- generic_handle_irq(irq);
-+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ generic_handle_irq(irq);
-+ else
-+ handle_nested_irq(irq);
-
- return 0;
- }
diff --git a/patches/i2c-rcar-Allow-interrupt-handler-to-be-threaded.patch b/patches/i2c-rcar-Allow-interrupt-handler-to-be-threaded.patch
deleted file mode 100644
index bccd17f553fd..000000000000
--- a/patches/i2c-rcar-Allow-interrupt-handler-to-be-threaded.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 19 Jan 2022 15:56:50 +0100
-Subject: [PATCH] i2c: rcar: Allow interrupt handler to be threaded.
-
-This is a revert of commit
- 24c6d4bc56388 ("i2c: rcar: make sure irq is not threaded on Gen2 and earlier")
-
-The IRQ-handler is not threaded unless requested. On PREEMPT_RT the
-handler must be threaded because the handler acquires spinlock_t locks
-which are sleeping locks on PREEMPT_RT. This is either via the DMA API
-or the possible wake_up() invocation.
-
-Remove IRQF_NO_THREAD from flags passed to request_irq().
-
-Fixes: 24c6d4bc56388 ("i2c: rcar: make sure irq is not threaded on Gen2 and earlier")
-Cc: Wolfram Sang <wsa+renesas@sang-engineering.com>
-Cc: linux-renesas-soc@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/i2c/busses/i2c-rcar.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
---- a/drivers/i2c/busses/i2c-rcar.c
-+++ b/drivers/i2c/busses/i2c-rcar.c
-@@ -1025,7 +1025,6 @@ static int rcar_i2c_probe(struct platfor
- struct rcar_i2c_priv *priv;
- struct i2c_adapter *adap;
- struct device *dev = &pdev->dev;
-- unsigned long irqflags = 0;
- irqreturn_t (*irqhandler)(int irq, void *ptr) = rcar_i2c_gen3_irq;
- int ret;
-
-@@ -1076,7 +1075,6 @@ static int rcar_i2c_probe(struct platfor
- rcar_i2c_write(priv, ICSAR, 0); /* Gen2: must be 0 if not using slave */
-
- if (priv->devtype < I2C_RCAR_GEN3) {
-- irqflags |= IRQF_NO_THREAD;
- irqhandler = rcar_i2c_gen2_irq;
- }
-
-@@ -1102,7 +1100,7 @@ static int rcar_i2c_probe(struct platfor
- if (ret < 0)
- goto out_pm_disable;
- priv->irq = ret;
-- ret = devm_request_irq(dev, priv->irq, irqhandler, irqflags, dev_name(dev), priv);
-+ ret = devm_request_irq(dev, priv->irq, irqhandler, 0, dev_name(dev), priv);
- if (ret < 0) {
- dev_err(dev, "cannot get irq %d\n", priv->irq);
- goto out_pm_disable;
diff --git a/patches/kdb__only_use_atomic_consoles_for_output_mirroring.patch b/patches/kdb__only_use_atomic_consoles_for_output_mirroring.patch
deleted file mode 100644
index 175785fc9d23..000000000000
--- a/patches/kdb__only_use_atomic_consoles_for_output_mirroring.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-Subject: kdb: only use atomic consoles for output mirroring
-From: John Ogness <john.ogness@linutronix.de>
-Date: Fri Mar 19 14:57:31 2021 +0100
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Currently kdb uses the @oops_in_progress hack to mirror kdb output
-to all active consoles from NMI context. Ignoring locks is unsafe.
-Now that an NMI-safe atomic interfaces is available for consoles,
-use that interface to mirror kdb output.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
-
----
- kernel/debug/kdb/kdb_io.c | 18 ++++++------------
- 1 file changed, 6 insertions(+), 12 deletions(-)
----
---- a/kernel/debug/kdb/kdb_io.c
-+++ b/kernel/debug/kdb/kdb_io.c
-@@ -559,23 +559,17 @@ static void kdb_msg_write(const char *ms
- cp++;
- }
-
-+ /* mirror output on atomic consoles */
- for_each_console(c) {
- if (!(c->flags & CON_ENABLED))
- continue;
- if (c == dbg_io_ops->cons)
- continue;
-- /*
-- * Set oops_in_progress to encourage the console drivers to
-- * disregard their internal spin locks: in the current calling
-- * context the risk of deadlock is a bigger problem than risks
-- * due to re-entering the console driver. We operate directly on
-- * oops_in_progress rather than using bust_spinlocks() because
-- * the calls bust_spinlocks() makes on exit are not appropriate
-- * for this calling context.
-- */
-- ++oops_in_progress;
-- c->write(c, msg, msg_len);
-- --oops_in_progress;
-+
-+ if (!c->write_atomic)
-+ continue;
-+ c->write_atomic(c, msg, msg_len);
-+
- touch_nmi_watchdog();
- }
- }
diff --git a/patches/panic_remove_oops_id.patch b/patches/panic_remove_oops_id.patch
index 36e3753a6cf6..3813a0b370cb 100644
--- a/patches/panic_remove_oops_id.patch
+++ b/patches/panic_remove_oops_id.patch
@@ -26,7 +26,7 @@ Link: https://lore.kernel.org/r/Ybdi16aP2NEugWHq@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -538,26 +538,9 @@ void oops_enter(void)
+@@ -537,26 +537,9 @@ void oops_enter(void)
trigger_all_cpu_backtrace();
}
@@ -51,6 +51,6 @@ Link: https://lore.kernel.org/r/Ybdi16aP2NEugWHq@linutronix.de>
- init_oops_id();
- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
+ pr_warn("---[ end trace %016llx ]---\n", 0ULL);
- pr_flush(1000, true);
}
+ /*
diff --git a/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch b/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
deleted file mode 100644
index 0fa777df022d..000000000000
--- a/patches/printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
+++ /dev/null
@@ -1,40 +0,0 @@
-Subject: printk: Enhance the condition check of msleep in pr_flush()
-From: Chao Qin <chao.qin@intel.com>
-Date: Mon Jul 19 10:26:50 2021 +0800
-
-From: Chao Qin <chao.qin@intel.com>
-
-There is msleep in pr_flush(). If call WARN() in the early boot
-stage such as in early_initcall, pr_flush() will run into msleep
-when process scheduler is not ready yet. And then the system will
-sleep forever.
-
-Before the system_state is SYSTEM_RUNNING, make sure DO NOT sleep
-in pr_flush().
-
-Fixes: c0b395bd0fe3("printk: add pr_flush()")
-Signed-off-by: Chao Qin <chao.qin@intel.com>
-Signed-off-by: Lili Li <lili.li@intel.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Reviewed-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Link: https://lore.kernel.org/lkml/20210719022649.3444072-1-chao.qin@intel.com
-
----
- kernel/printk/printk.c | 4 +++-
- 1 file changed, 3 insertions(+), 1 deletion(-)
----
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3655,7 +3655,9 @@ bool pr_flush(int timeout_ms, bool reset
- u64 diff;
- u64 seq;
-
-- may_sleep = (preemptible() && !in_softirq());
-+ may_sleep = (preemptible() &&
-+ !in_softirq() &&
-+ system_state >= SYSTEM_RUNNING);
-
- seq = prb_next_seq(prb);
-
diff --git a/patches/printk__add_console_handover.patch b/patches/printk__add_console_handover.patch
deleted file mode 100644
index 10c1a5f566a7..000000000000
--- a/patches/printk__add_console_handover.patch
+++ /dev/null
@@ -1,80 +0,0 @@
-Subject: printk: add console handover
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:09 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-If earlyprintk is used, a boot console will print directly to the
-console immediately. The boot console will unregister itself as soon
-as a non-boot console registers. However, the non-boot console does
-not begin printing until its kthread has started. Since this happens
-much later, there is a long pause in the console output. If the
-ringbuffer is small, messages could even be dropped during the
-pause.
-
-Add a new CON_HANDOVER console flag to be used internally by printk
-in order to track which non-boot console took over from a boot
-console. If handover consoles have implemented write_atomic(), they
-are allowed to print directly to the console until their kthread can
-take over.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
-
----
- include/linux/console.h | 1 +
- kernel/printk/printk.c | 15 +++++++++++++--
- 2 files changed, 14 insertions(+), 2 deletions(-)
----
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -143,6 +143,7 @@ static inline int con_debug_leave(void)
- #define CON_ANYTIME (16) /* Safe to call when cpu is offline */
- #define CON_BRL (32) /* Used for a braille device */
- #define CON_EXTENDED (64) /* Use the extended output format a la /dev/kmsg */
-+#define CON_HANDOVER (128) /* Device was previously a boot console. */
-
- struct console {
- char name[16];
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1746,6 +1746,8 @@ static bool console_may_sync(struct cons
- return false;
- if (con->write_atomic && kernel_sync_mode())
- return true;
-+ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread)
-+ return true;
- if (con->write && (con->flags & CON_BOOT) && !con->thread)
- return true;
- return false;
-@@ -1761,7 +1763,14 @@ static bool call_sync_console_driver(str
- return true;
- }
-
-- if (con->write && (con->flags & CON_BOOT) && !con->thread) {
-+ if (con->write_atomic && (con->flags & CON_HANDOVER) && !con->thread) {
-+ if (console_trylock()) {
-+ con->write_atomic(con, text, text_len);
-+ console_unlock();
-+ return true;
-+ }
-+
-+ } else if (con->write && (con->flags & CON_BOOT) && !con->thread) {
- if (console_trylock()) {
- con->write(con, text, text_len);
- console_unlock();
-@@ -2892,8 +2901,10 @@ void register_console(struct console *ne
- * the real console are the same physical device, it's annoying to
- * see the beginning boot messages twice
- */
-- if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV))
-+ if (bcon && ((newcon->flags & (CON_CONSDEV | CON_BOOT)) == CON_CONSDEV)) {
- newcon->flags &= ~CON_PRINTBUFFER;
-+ newcon->flags |= CON_HANDOVER;
-+ }
-
- /*
- * Put this console in the list - keep the
diff --git a/patches/printk__add_pr_flush.patch b/patches/printk__add_pr_flush.patch
deleted file mode 100644
index ebf6398747e7..000000000000
--- a/patches/printk__add_pr_flush.patch
+++ /dev/null
@@ -1,215 +0,0 @@
-Subject: printk: add pr_flush()
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:10 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Provide a function to allow waiting for console printers to catch
-up to the latest logged message.
-
-Use pr_flush() to give console printers a chance to finish in
-critical situations if no atomic console is available. For now
-pr_flush() is only used in the most common error paths:
-panic(), print_oops_end_marker(), report_bug(), kmsg_dump().
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/printk.h | 7 ++++
- kernel/panic.c | 28 ++++++++++------
- kernel/printk/printk.c | 81 +++++++++++++++++++++++++++++++++++++++++++++++++
- lib/bug.c | 1
- 4 files changed, 106 insertions(+), 11 deletions(-)
----
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -161,6 +161,8 @@ int vprintk(const char *fmt, va_list arg
- asmlinkage __printf(1, 2) __cold
- int _printk(const char *fmt, ...);
-
-+bool pr_flush(int timeout_ms, bool reset_on_progress);
-+
- /*
- * Please don't use printk_ratelimit(), because it shares ratelimiting state
- * with all other unrelated printk_ratelimit() callsites. Instead use
-@@ -202,6 +204,11 @@ int _printk(const char *s, ...)
- return 0;
- }
-
-+static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
-+{
-+ return true;
-+}
-+
- static inline int printk_ratelimit(void)
- {
- return 0;
---- a/kernel/panic.c
-+++ b/kernel/panic.c
-@@ -178,12 +178,28 @@ static void panic_print_sys_info(void)
- void panic(const char *fmt, ...)
- {
- static char buf[1024];
-+ va_list args2;
- va_list args;
- long i, i_next = 0, len;
- int state = 0;
- int old_cpu, this_cpu;
- bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
-
-+ console_verbose();
-+ pr_emerg("Kernel panic - not syncing:\n");
-+ va_start(args2, fmt);
-+ va_copy(args, args2);
-+ vprintk(fmt, args2);
-+ va_end(args2);
-+#ifdef CONFIG_DEBUG_BUGVERBOSE
-+ /*
-+ * Avoid nested stack-dumping if a panic occurs during oops processing
-+ */
-+ if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
-+ dump_stack();
-+#endif
-+ pr_flush(1000, true);
-+
- /*
- * Disable local interrupts. This will prevent panic_smp_self_stop
- * from deadlocking the first cpu that invokes the panic, since
-@@ -214,24 +230,13 @@ void panic(const char *fmt, ...)
- if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu)
- panic_smp_self_stop();
-
-- console_verbose();
- bust_spinlocks(1);
-- va_start(args, fmt);
- len = vscnprintf(buf, sizeof(buf), fmt, args);
- va_end(args);
-
- if (len && buf[len - 1] == '\n')
- buf[len - 1] = '\0';
-
-- pr_emerg("Kernel panic - not syncing: %s\n", buf);
--#ifdef CONFIG_DEBUG_BUGVERBOSE
-- /*
-- * Avoid nested stack-dumping if a panic occurs during oops processing
-- */
-- if (!test_taint(TAINT_DIE) && oops_in_progress <= 1)
-- dump_stack();
--#endif
--
- /*
- * If kgdb is enabled, give it a chance to run before we stop all
- * the other CPUs or else we won't be able to debug processes left
-@@ -553,6 +558,7 @@ static void print_oops_end_marker(void)
- {
- init_oops_id();
- pr_warn("---[ end trace %016llx ]---\n", (unsigned long long)oops_id);
-+ pr_flush(1000, true);
- }
-
- /*
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -3291,6 +3291,12 @@ void kmsg_dump(enum kmsg_dump_reason rea
- sync_mode = true;
- pr_info("enabled sync mode\n");
- }
-+
-+ /*
-+ * Give the printing threads time to flush, allowing up to
-+ * 1s of no printing forward progress before giving up.
-+ */
-+ pr_flush(1000, true);
- }
-
- rcu_read_lock();
-@@ -3612,3 +3618,78 @@ bool kgdb_roundup_delay(unsigned int cpu
- }
- EXPORT_SYMBOL(kgdb_roundup_delay);
- #endif /* CONFIG_SMP */
-+
-+#ifdef CONFIG_PRINTK
-+static void pr_msleep(bool may_sleep, int ms)
-+{
-+ if (may_sleep) {
-+ msleep(ms);
-+ } else {
-+ while (ms--)
-+ udelay(1000);
-+ }
-+}
-+
-+/**
-+ * pr_flush() - Wait for printing threads to catch up.
-+ *
-+ * @timeout_ms: The maximum time (in ms) to wait.
-+ * @reset_on_progress: Reset the timeout if forward progress is seen.
-+ *
-+ * A value of 0 for @timeout_ms means no waiting will occur. A value of -1
-+ * represents infinite waiting.
-+ *
-+ * If @reset_on_progress is true, the timeout will be reset whenever any
-+ * printer has been seen to make some forward progress.
-+ *
-+ * Context: Any context.
-+ * Return: true if all enabled printers are caught up.
-+ */
-+bool pr_flush(int timeout_ms, bool reset_on_progress)
-+{
-+ int remaining = timeout_ms;
-+ struct console *con;
-+ u64 last_diff = 0;
-+ bool may_sleep;
-+ u64 printk_seq;
-+ u64 diff;
-+ u64 seq;
-+
-+ may_sleep = (preemptible() && !in_softirq());
-+
-+ seq = prb_next_seq(prb);
-+
-+ for (;;) {
-+ diff = 0;
-+
-+ for_each_console(con) {
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+ printk_seq = read_console_seq(con);
-+ if (printk_seq < seq)
-+ diff += seq - printk_seq;
-+ }
-+
-+ if (diff != last_diff && reset_on_progress)
-+ remaining = timeout_ms;
-+
-+ if (diff == 0 || remaining == 0)
-+ break;
-+
-+ if (remaining < 0) {
-+ pr_msleep(may_sleep, 100);
-+ } else if (remaining < 100) {
-+ pr_msleep(may_sleep, remaining);
-+ remaining = 0;
-+ } else {
-+ pr_msleep(may_sleep, 100);
-+ remaining -= 100;
-+ }
-+
-+ last_diff = diff;
-+ }
-+
-+ return (diff == 0);
-+}
-+EXPORT_SYMBOL(pr_flush);
-+#endif /* CONFIG_PRINTK */
---- a/lib/bug.c
-+++ b/lib/bug.c
-@@ -206,6 +206,7 @@ enum bug_trap_type report_bug(unsigned l
- else
- pr_crit("Kernel BUG at %pB [verbose debug info unavailable]\n",
- (void *)bugaddr);
-+ pr_flush(1000, true);
-
- return BUG_TRAP_TYPE_BUG;
- }
diff --git a/patches/printk__call_boot_delay_msec_in_printk_delay.patch b/patches/printk__call_boot_delay_msec_in_printk_delay.patch
deleted file mode 100644
index e7f15d686f16..000000000000
--- a/patches/printk__call_boot_delay_msec_in_printk_delay.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-Subject: printk: call boot_delay_msec() in printk_delay()
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:04 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-boot_delay_msec() is always called immediately before printk_delay()
-so just call it from within printk_delay().
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/printk/printk.c | 7 ++++---
- 1 file changed, 4 insertions(+), 3 deletions(-)
----
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1750,8 +1750,10 @@ SYSCALL_DEFINE3(syslog, int, type, char
-
- int printk_delay_msec __read_mostly;
-
--static inline void printk_delay(void)
-+static inline void printk_delay(int level)
- {
-+ boot_delay_msec(level);
-+
- if (unlikely(printk_delay_msec)) {
- int m = printk_delay_msec;
-
-@@ -2224,8 +2226,7 @@ asmlinkage int vprintk_emit(int facility
- in_sched = true;
- }
-
-- boot_delay_msec(level);
-- printk_delay();
-+ printk_delay(level);
-
- printed_len = vprintk_store(facility, level, dev_info, fmt, args);
-
diff --git a/patches/printk__introduce_kernel_sync_mode.patch b/patches/printk__introduce_kernel_sync_mode.patch
deleted file mode 100644
index c4a6812c190c..000000000000
--- a/patches/printk__introduce_kernel_sync_mode.patch
+++ /dev/null
@@ -1,374 +0,0 @@
-Subject: printk: introduce kernel sync mode
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:06 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-When the kernel performs an OOPS, enter into "sync mode":
-
-- only atomic consoles (write_atomic() callback) will print
-- printing occurs within vprintk_store() instead of console_unlock()
-
-CONSOLE_LOG_MAX is moved to printk.h to support the per-console
-buffer used in sync mode.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/console.h | 4 +
- include/linux/printk.h | 6 +
- kernel/printk/printk.c | 178 +++++++++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 178 insertions(+), 10 deletions(-)
----
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -16,6 +16,7 @@
-
- #include <linux/atomic.h>
- #include <linux/types.h>
-+#include <linux/printk.h>
-
- struct vc_data;
- struct console_font_op;
-@@ -150,6 +151,9 @@ struct console {
- short flags;
- short index;
- int cflag;
-+#ifdef CONFIG_PRINTK
-+ char sync_buf[CONSOLE_LOG_MAX];
-+#endif
- uint ispeed;
- uint ospeed;
- void *data;
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -47,6 +47,12 @@ static inline const char *printk_skip_he
-
- #define CONSOLE_EXT_LOG_MAX 8192
-
-+/*
-+ * The maximum size of a record formatted for console printing
-+ * (i.e. with the prefix prepended to every line).
-+ */
-+#define CONSOLE_LOG_MAX 1024
-+
- /* printk's without a loglevel use this.. */
- #define MESSAGE_LOGLEVEL_DEFAULT CONFIG_MESSAGE_LOGLEVEL_DEFAULT
-
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -45,6 +45,7 @@
- #include <linux/ctype.h>
- #include <linux/uio.h>
- #include <linux/kgdb.h>
-+#include <linux/clocksource.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/debug.h>
- #include <linux/sched/task_stack.h>
-@@ -355,6 +356,9 @@ static int console_msg_format = MSG_FORM
- static DEFINE_MUTEX(syslog_lock);
-
- #ifdef CONFIG_PRINTK
-+/* Set to enable sync mode. Once set, it is never cleared. */
-+static bool sync_mode;
-+
- DECLARE_WAIT_QUEUE_HEAD(log_wait);
- /* All 3 protected by @syslog_lock. */
- /* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -382,6 +386,20 @@ static struct latched_seq console_seq =
- .val[1] = 0,
- };
-
-+static struct latched_seq console_sync_seq = {
-+ .latch = SEQCNT_LATCH_ZERO(console_sync_seq.latch),
-+ .val[0] = 0,
-+ .val[1] = 0,
-+};
-+
-+#ifdef CONFIG_HAVE_NMI
-+static struct latched_seq console_sync_nmi_seq = {
-+ .latch = SEQCNT_LATCH_ZERO(console_sync_nmi_seq.latch),
-+ .val[0] = 0,
-+ .val[1] = 0,
-+};
-+#endif
-+
- /*
- * The next printk record to read after the last 'clear' command. There are
- * two copies (updated with seqcount_latch) so that reads can locklessly
-@@ -399,9 +417,6 @@ static struct latched_seq clear_seq = {
- #define PREFIX_MAX 32
- #endif
-
--/* the maximum size of a formatted record (i.e. with prefix added per line) */
--#define CONSOLE_LOG_MAX 1024
--
- /* the maximum size allowed to be reserved for a record */
- #define LOG_LINE_MAX (CONSOLE_LOG_MAX - PREFIX_MAX)
-
-@@ -1773,6 +1788,116 @@ static inline void printk_delay(int leve
- }
- }
-
-+static bool kernel_sync_mode(void)
-+{
-+ return (oops_in_progress || sync_mode);
-+}
-+
-+static bool console_may_sync(struct console *con)
-+{
-+ if (!(con->flags & CON_ENABLED))
-+ return false;
-+ if (con->write_atomic && kernel_sync_mode())
-+ return true;
-+ return false;
-+}
-+
-+static bool call_sync_console_driver(struct console *con, const char *text, size_t text_len)
-+{
-+ if (!(con->flags & CON_ENABLED))
-+ return false;
-+ if (con->write_atomic && kernel_sync_mode())
-+ con->write_atomic(con, text, text_len);
-+ else
-+ return false;
-+
-+ return true;
-+}
-+
-+static bool have_atomic_console(void)
-+{
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+ if (con->write_atomic)
-+ return true;
-+ }
-+ return false;
-+}
-+
-+static bool print_sync(struct console *con, u64 *seq)
-+{
-+ struct printk_info info;
-+ struct printk_record r;
-+ size_t text_len;
-+
-+ prb_rec_init_rd(&r, &info, &con->sync_buf[0], sizeof(con->sync_buf));
-+
-+ if (!prb_read_valid(prb, *seq, &r))
-+ return false;
-+
-+ text_len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
-+
-+ if (!call_sync_console_driver(con, &con->sync_buf[0], text_len))
-+ return false;
-+
-+ *seq = r.info->seq;
-+
-+ touch_softlockup_watchdog_sync();
-+ clocksource_touch_watchdog();
-+ rcu_cpu_stall_reset();
-+ touch_nmi_watchdog();
-+
-+ if (text_len)
-+ printk_delay(r.info->level);
-+
-+ return true;
-+}
-+
-+static u64 read_console_seq(void)
-+{
-+ u64 seq2;
-+ u64 seq;
-+
-+ seq = latched_seq_read_nolock(&console_seq);
-+ seq2 = latched_seq_read_nolock(&console_sync_seq);
-+ if (seq2 > seq)
-+ seq = seq2;
-+#ifdef CONFIG_HAVE_NMI
-+ seq2 = latched_seq_read_nolock(&console_sync_nmi_seq);
-+ if (seq2 > seq)
-+ seq = seq2;
-+#endif
-+ return seq;
-+}
-+
-+static void print_sync_until(struct console *con, u64 seq)
-+{
-+ u64 printk_seq;
-+
-+ while (!__printk_cpu_trylock())
-+ cpu_relax();
-+
-+ for (;;) {
-+ printk_seq = read_console_seq();
-+ if (printk_seq >= seq)
-+ break;
-+ if (!print_sync(con, &printk_seq))
-+ break;
-+#ifdef CONFIG_PRINTK_NMI
-+ if (in_nmi()) {
-+ latched_seq_write(&console_sync_nmi_seq, printk_seq + 1);
-+ continue;
-+ }
-+#endif
-+ latched_seq_write(&console_sync_seq, printk_seq + 1);
-+ }
-+
-+ __printk_cpu_unlock();
-+}
-+
- /*
- * Special console_lock variants that help to reduce the risk of soft-lockups.
- * They allow to pass console_lock to another printk() call using a busy wait.
-@@ -1947,6 +2072,8 @@ static void call_console_drivers(const c
- if (!cpu_online(smp_processor_id()) &&
- !(con->flags & CON_ANYTIME))
- continue;
-+ if (kernel_sync_mode())
-+ continue;
- if (con->flags & CON_EXTENDED)
- con->write(con, ext_text, ext_len);
- else {
-@@ -2115,6 +2242,7 @@ int vprintk_store(int facility, int leve
- const u32 caller_id = printk_caller_id();
- struct prb_reserved_entry e;
- enum printk_info_flags flags = 0;
-+ bool final_commit = false;
- struct printk_record r;
- unsigned long irqflags;
- u16 trunc_msg_len = 0;
-@@ -2125,6 +2253,7 @@ int vprintk_store(int facility, int leve
- u16 text_len;
- int ret = 0;
- u64 ts_nsec;
-+ u64 seq;
-
- /*
- * Since the duration of printk() can vary depending on the message
-@@ -2163,6 +2292,7 @@ int vprintk_store(int facility, int leve
- if (flags & LOG_CONT) {
- prb_rec_init_wr(&r, reserve_size);
- if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
-+ seq = r.info->seq;
- text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
- facility, &flags, fmt, args);
- r.info->text_len += text_len;
-@@ -2170,6 +2300,7 @@ int vprintk_store(int facility, int leve
- if (flags & LOG_NEWLINE) {
- r.info->flags |= LOG_NEWLINE;
- prb_final_commit(&e);
-+ final_commit = true;
- } else {
- prb_commit(&e);
- }
-@@ -2193,6 +2324,7 @@ int vprintk_store(int facility, int leve
- if (!prb_reserve(&e, prb, &r))
- goto out;
- }
-+ seq = r.info->seq;
-
- /* fill message */
- text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &flags, fmt, args);
-@@ -2208,13 +2340,25 @@ int vprintk_store(int facility, int leve
- memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
-
- /* A message without a trailing newline can be continued. */
-- if (!(flags & LOG_NEWLINE))
-+ if (!(flags & LOG_NEWLINE)) {
- prb_commit(&e);
-- else
-+ } else {
- prb_final_commit(&e);
-+ final_commit = true;
-+ }
-
- ret = text_len + trunc_msg_len;
- out:
-+ /* only the kernel may perform synchronous printing */
-+ if (facility == 0 && final_commit) {
-+ struct console *con;
-+
-+ for_each_console(con) {
-+ if (console_may_sync(con))
-+ print_sync_until(con, seq + 1);
-+ }
-+ }
-+
- printk_exit_irqrestore(recursion_ptr, irqflags);
- return ret;
- }
-@@ -2283,13 +2427,13 @@ EXPORT_SYMBOL(_printk);
-
- #else /* CONFIG_PRINTK */
-
--#define CONSOLE_LOG_MAX 0
- #define printk_time false
-
- #define prb_read_valid(rb, seq, r) false
- #define prb_first_valid_seq(rb) 0
--#define latched_seq_read_nolock(seq) 0
-+#define read_console_seq() 0
- #define latched_seq_write(dst, src)
-+#define kernel_sync_mode() false
-
- static u64 exclusive_console_stop_seq;
- static unsigned long console_dropped;
-@@ -2593,6 +2737,8 @@ static int have_callable_console(void)
- */
- static inline int can_use_console(void)
- {
-+ if (kernel_sync_mode())
-+ return false;
- return cpu_online(raw_smp_processor_id()) || have_callable_console();
- }
-
-@@ -2662,7 +2808,7 @@ void console_unlock(void)
- size_t len;
-
- skip:
-- seq = latched_seq_read_nolock(&console_seq);
-+ seq = read_console_seq();
- if (!prb_read_valid(prb, seq, &r))
- break;
-
-@@ -2742,7 +2888,7 @@ void console_unlock(void)
- * there's a new owner and the console_unlock() from them will do the
- * flush, no worries.
- */
-- retry = prb_read_valid(prb, latched_seq_read_nolock(&console_seq), NULL);
-+ retry = prb_read_valid(prb, read_console_seq(), NULL);
- if (retry && console_trylock())
- goto again;
- }
-@@ -3042,7 +3188,7 @@ void register_console(struct console *ne
- * ignores console_lock.
- */
- exclusive_console = newcon;
-- exclusive_console_stop_seq = latched_seq_read_nolock(&console_seq);
-+ exclusive_console_stop_seq = read_console_seq();
-
- /* Get a consistent copy of @syslog_seq. */
- mutex_lock(&syslog_lock);
-@@ -3417,6 +3563,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
- {
- struct kmsg_dumper *dumper;
-
-+ if (!oops_in_progress) {
-+ /*
-+ * If atomic consoles are available, activate kernel sync mode
-+ * to make sure any final messages are visible. The trailing
-+ * printk message is important to flush any pending messages.
-+ */
-+ if (have_atomic_console()) {
-+ sync_mode = true;
-+ pr_info("enabled sync mode\n");
-+ }
-+ }
-+
- rcu_read_lock();
- list_for_each_entry_rcu(dumper, &dump_list, list) {
- enum kmsg_dump_reason max_reason = dumper->max_reason;
diff --git a/patches/printk__move_console_printing_to_kthreads.patch b/patches/printk__move_console_printing_to_kthreads.patch
deleted file mode 100644
index 09d10e9f656f..000000000000
--- a/patches/printk__move_console_printing_to_kthreads.patch
+++ /dev/null
@@ -1,987 +0,0 @@
-Subject: printk: move console printing to kthreads
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:07 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Create a kthread for each console to perform console printing. Now
-all console printing is fully asynchronous except for the boot
-console and when the kernel enters sync mode (and there are atomic
-consoles available).
-
-The console_lock() and console_unlock() functions now only do what
-their name says... locking and unlocking of the console.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/console.h | 12
- kernel/printk/printk.c | 717 ++++++++++++++----------------------------------
- 2 files changed, 236 insertions(+), 493 deletions(-)
----
---- a/include/linux/console.h
-+++ b/include/linux/console.h
-@@ -17,6 +17,12 @@
- #include <linux/atomic.h>
- #include <linux/types.h>
- #include <linux/printk.h>
-+#include <linux/seqlock.h>
-+
-+struct latched_seq {
-+ seqcount_latch_t latch;
-+ u64 val[2];
-+};
-
- struct vc_data;
- struct console_font_op;
-@@ -153,9 +159,15 @@ struct console {
- int cflag;
- #ifdef CONFIG_PRINTK
- char sync_buf[CONSOLE_LOG_MAX];
-+ struct latched_seq printk_seq;
-+ struct latched_seq printk_sync_seq;
-+#ifdef CONFIG_HAVE_NMI
-+ struct latched_seq printk_sync_nmi_seq;
- #endif
-+#endif /* CONFIG_PRINTK */
- uint ispeed;
- uint ospeed;
-+ struct task_struct *thread;
- void *data;
- struct console *next;
- };
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -45,6 +45,7 @@
- #include <linux/ctype.h>
- #include <linux/uio.h>
- #include <linux/kgdb.h>
-+#include <linux/kthread.h>
- #include <linux/clocksource.h>
- #include <linux/sched/clock.h>
- #include <linux/sched/debug.h>
-@@ -269,11 +270,6 @@ static void __up_console_sem(unsigned lo
- static int console_locked, console_suspended;
-
- /*
-- * If exclusive_console is non-NULL then only this console is to be printed to.
-- */
--static struct console *exclusive_console;
--
--/*
- * Array of consoles built from command line options (console=)
- */
-
-@@ -352,10 +348,10 @@ static int console_msg_format = MSG_FORM
- * non-prinatable characters are escaped in the "\xff" notation.
- */
-
-+#ifdef CONFIG_PRINTK
- /* syslog_lock protects syslog_* variables and write access to clear_seq. */
- static DEFINE_MUTEX(syslog_lock);
-
--#ifdef CONFIG_PRINTK
- /* Set to enable sync mode. Once set, it is never cleared. */
- static bool sync_mode;
-
-@@ -366,40 +362,6 @@ static u64 syslog_seq;
- static size_t syslog_partial;
- static bool syslog_time;
-
--/* Both protected by @console_sem. */
--static u64 exclusive_console_stop_seq;
--static unsigned long console_dropped;
--
--struct latched_seq {
-- seqcount_latch_t latch;
-- u64 val[2];
--};
--
--/*
-- * The next printk record to write to the console. There are two
-- * copies (updated with seqcount_latch) so that reads can locklessly
-- * access a valid value. Writers are synchronized by @console_sem.
-- */
--static struct latched_seq console_seq = {
-- .latch = SEQCNT_LATCH_ZERO(console_seq.latch),
-- .val[0] = 0,
-- .val[1] = 0,
--};
--
--static struct latched_seq console_sync_seq = {
-- .latch = SEQCNT_LATCH_ZERO(console_sync_seq.latch),
-- .val[0] = 0,
-- .val[1] = 0,
--};
--
--#ifdef CONFIG_HAVE_NMI
--static struct latched_seq console_sync_nmi_seq = {
-- .latch = SEQCNT_LATCH_ZERO(console_sync_nmi_seq.latch),
-- .val[0] = 0,
-- .val[1] = 0,
--};
--#endif
--
- /*
- * The next printk record to read after the last 'clear' command. There are
- * two copies (updated with seqcount_latch) so that reads can locklessly
-@@ -1799,6 +1761,8 @@ static bool console_may_sync(struct cons
- return false;
- if (con->write_atomic && kernel_sync_mode())
- return true;
-+ if (con->write && (con->flags & CON_BOOT) && !con->thread)
-+ return true;
- return false;
- }
-
-@@ -1806,12 +1770,21 @@ static bool call_sync_console_driver(str
- {
- if (!(con->flags & CON_ENABLED))
- return false;
-- if (con->write_atomic && kernel_sync_mode())
-+
-+ if (con->write_atomic && kernel_sync_mode()) {
- con->write_atomic(con, text, text_len);
-- else
-- return false;
-+ return true;
-+ }
-
-- return true;
-+ if (con->write && (con->flags & CON_BOOT) && !con->thread) {
-+ if (console_trylock()) {
-+ con->write(con, text, text_len);
-+ console_unlock();
-+ return true;
-+ }
-+ }
-+
-+ return false;
- }
-
- static bool have_atomic_console(void)
-@@ -1856,24 +1829,24 @@ static bool print_sync(struct console *c
- return true;
- }
-
--static u64 read_console_seq(void)
-+static u64 read_console_seq(struct console *con)
- {
- u64 seq2;
- u64 seq;
-
-- seq = latched_seq_read_nolock(&console_seq);
-- seq2 = latched_seq_read_nolock(&console_sync_seq);
-+ seq = latched_seq_read_nolock(&con->printk_seq);
-+ seq2 = latched_seq_read_nolock(&con->printk_sync_seq);
- if (seq2 > seq)
- seq = seq2;
- #ifdef CONFIG_HAVE_NMI
-- seq2 = latched_seq_read_nolock(&console_sync_nmi_seq);
-+ seq2 = latched_seq_read_nolock(&con->printk_sync_nmi_seq);
- if (seq2 > seq)
- seq = seq2;
- #endif
- return seq;
- }
-
--static void print_sync_until(struct console *con, u64 seq)
-+static void print_sync_until(struct console *con, u64 seq, bool is_locked)
- {
- u64 printk_seq;
-
-@@ -1881,210 +1854,26 @@ static void print_sync_until(struct cons
- cpu_relax();
-
- for (;;) {
-- printk_seq = read_console_seq();
-+ printk_seq = read_console_seq(con);
- if (printk_seq >= seq)
- break;
- if (!print_sync(con, &printk_seq))
- break;
-+
-+ if (is_locked)
-+ latched_seq_write(&con->printk_seq, printk_seq + 1);
- #ifdef CONFIG_PRINTK_NMI
-- if (in_nmi()) {
-- latched_seq_write(&console_sync_nmi_seq, printk_seq + 1);
-- continue;
-- }
-+ else if (in_nmi())
-+ latched_seq_write(&con->printk_sync_nmi_seq, printk_seq + 1);
- #endif
-- latched_seq_write(&console_sync_seq, printk_seq + 1);
-+ else
-+ latched_seq_write(&con->printk_sync_seq, printk_seq + 1);
- }
-
- __printk_cpu_unlock();
- }
-
- /*
-- * Special console_lock variants that help to reduce the risk of soft-lockups.
-- * They allow to pass console_lock to another printk() call using a busy wait.
-- */
--
--#ifdef CONFIG_LOCKDEP
--static struct lockdep_map console_owner_dep_map = {
-- .name = "console_owner"
--};
--#endif
--
--static DEFINE_RAW_SPINLOCK(console_owner_lock);
--static struct task_struct *console_owner;
--static bool console_waiter;
--
--/**
-- * console_lock_spinning_enable - mark beginning of code where another
-- * thread might safely busy wait
-- *
-- * This basically converts console_lock into a spinlock. This marks
-- * the section where the console_lock owner can not sleep, because
-- * there may be a waiter spinning (like a spinlock). Also it must be
-- * ready to hand over the lock at the end of the section.
-- */
--static void console_lock_spinning_enable(void)
--{
-- raw_spin_lock(&console_owner_lock);
-- console_owner = current;
-- raw_spin_unlock(&console_owner_lock);
--
-- /* The waiter may spin on us after setting console_owner */
-- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
--}
--
--/**
-- * console_lock_spinning_disable_and_check - mark end of code where another
-- * thread was able to busy wait and check if there is a waiter
-- *
-- * This is called at the end of the section where spinning is allowed.
-- * It has two functions. First, it is a signal that it is no longer
-- * safe to start busy waiting for the lock. Second, it checks if
-- * there is a busy waiter and passes the lock rights to her.
-- *
-- * Important: Callers lose the lock if there was a busy waiter.
-- * They must not touch items synchronized by console_lock
-- * in this case.
-- *
-- * Return: 1 if the lock rights were passed, 0 otherwise.
-- */
--static int console_lock_spinning_disable_and_check(void)
--{
-- int waiter;
--
-- raw_spin_lock(&console_owner_lock);
-- waiter = READ_ONCE(console_waiter);
-- console_owner = NULL;
-- raw_spin_unlock(&console_owner_lock);
--
-- if (!waiter) {
-- spin_release(&console_owner_dep_map, _THIS_IP_);
-- return 0;
-- }
--
-- /* The waiter is now free to continue */
-- WRITE_ONCE(console_waiter, false);
--
-- spin_release(&console_owner_dep_map, _THIS_IP_);
--
-- /*
-- * Hand off console_lock to waiter. The waiter will perform
-- * the up(). After this, the waiter is the console_lock owner.
-- */
-- mutex_release(&console_lock_dep_map, _THIS_IP_);
-- return 1;
--}
--
--/**
-- * console_trylock_spinning - try to get console_lock by busy waiting
-- *
-- * This allows to busy wait for the console_lock when the current
-- * owner is running in specially marked sections. It means that
-- * the current owner is running and cannot reschedule until it
-- * is ready to lose the lock.
-- *
-- * Return: 1 if we got the lock, 0 othrewise
-- */
--static int console_trylock_spinning(void)
--{
-- struct task_struct *owner = NULL;
-- bool waiter;
-- bool spin = false;
-- unsigned long flags;
--
-- if (console_trylock())
-- return 1;
--
-- printk_safe_enter_irqsave(flags);
--
-- raw_spin_lock(&console_owner_lock);
-- owner = READ_ONCE(console_owner);
-- waiter = READ_ONCE(console_waiter);
-- if (!waiter && owner && owner != current) {
-- WRITE_ONCE(console_waiter, true);
-- spin = true;
-- }
-- raw_spin_unlock(&console_owner_lock);
--
-- /*
-- * If there is an active printk() writing to the
-- * consoles, instead of having it write our data too,
-- * see if we can offload that load from the active
-- * printer, and do some printing ourselves.
-- * Go into a spin only if there isn't already a waiter
-- * spinning, and there is an active printer, and
-- * that active printer isn't us (recursive printk?).
-- */
-- if (!spin) {
-- printk_safe_exit_irqrestore(flags);
-- return 0;
-- }
--
-- /* We spin waiting for the owner to release us */
-- spin_acquire(&console_owner_dep_map, 0, 0, _THIS_IP_);
-- /* Owner will clear console_waiter on hand off */
-- while (READ_ONCE(console_waiter))
-- cpu_relax();
-- spin_release(&console_owner_dep_map, _THIS_IP_);
--
-- printk_safe_exit_irqrestore(flags);
-- /*
-- * The owner passed the console lock to us.
-- * Since we did not spin on console lock, annotate
-- * this as a trylock. Otherwise lockdep will
-- * complain.
-- */
-- mutex_acquire(&console_lock_dep_map, 0, 1, _THIS_IP_);
--
-- return 1;
--}
--
--/*
-- * Call the console drivers, asking them to write out
-- * log_buf[start] to log_buf[end - 1].
-- * The console_lock must be held.
-- */
--static void call_console_drivers(const char *ext_text, size_t ext_len,
-- const char *text, size_t len)
--{
-- static char dropped_text[64];
-- size_t dropped_len = 0;
-- struct console *con;
--
-- trace_console_rcuidle(text, len);
--
-- if (!console_drivers)
-- return;
--
-- if (console_dropped) {
-- dropped_len = snprintf(dropped_text, sizeof(dropped_text),
-- "** %lu printk messages dropped **\n",
-- console_dropped);
-- console_dropped = 0;
-- }
--
-- for_each_console(con) {
-- if (exclusive_console && con != exclusive_console)
-- continue;
-- if (!(con->flags & CON_ENABLED))
-- continue;
-- if (!con->write)
-- continue;
-- if (!cpu_online(smp_processor_id()) &&
-- !(con->flags & CON_ANYTIME))
-- continue;
-- if (kernel_sync_mode())
-- continue;
-- if (con->flags & CON_EXTENDED)
-- con->write(con, ext_text, ext_len);
-- else {
-- if (dropped_len)
-- con->write(con, dropped_text, dropped_len);
-- con->write(con, text, len);
-- }
-- }
--}
--
--/*
- * Recursion is tracked separately on each CPU. If NMIs are supported, an
- * additional NMI context per CPU is also separately tracked. Until per-CPU
- * is available, a separate "early tracking" is performed.
-@@ -2355,7 +2144,7 @@ int vprintk_store(int facility, int leve
-
- for_each_console(con) {
- if (console_may_sync(con))
-- print_sync_until(con, seq + 1);
-+ print_sync_until(con, seq + 1, false);
- }
- }
-
-@@ -2368,39 +2157,16 @@ asmlinkage int vprintk_emit(int facility
- const char *fmt, va_list args)
- {
- int printed_len;
-- bool in_sched = false;
-
- /* Suppress unimportant messages after panic happens */
- if (unlikely(suppress_printk))
- return 0;
-
-- if (level == LOGLEVEL_SCHED) {
-+ if (level == LOGLEVEL_SCHED)
- level = LOGLEVEL_DEFAULT;
-- in_sched = true;
-- }
--
-- printk_delay(level);
-
- printed_len = vprintk_store(facility, level, dev_info, fmt, args);
-
-- /* If called from the scheduler, we can not call up(). */
-- if (!in_sched) {
-- /*
-- * Disable preemption to avoid being preempted while holding
-- * console_sem which would prevent anyone from printing to
-- * console
-- */
-- preempt_disable();
-- /*
-- * Try to acquire and then immediately release the console
-- * semaphore. The release will print out buffers and wake up
-- * /dev/kmsg and syslog() users.
-- */
-- if (console_trylock_spinning())
-- console_unlock();
-- preempt_enable();
-- }
--
- wake_up_klogd();
- return printed_len;
- }
-@@ -2425,37 +2191,162 @@ asmlinkage __visible int _printk(const c
- }
- EXPORT_SYMBOL(_printk);
-
--#else /* CONFIG_PRINTK */
-+static int printk_kthread_func(void *data)
-+{
-+ struct console *con = data;
-+ unsigned long dropped = 0;
-+ char *dropped_text = NULL;
-+ struct printk_info info;
-+ struct printk_record r;
-+ char *ext_text = NULL;
-+ size_t dropped_len;
-+ int ret = -ENOMEM;
-+ char *text = NULL;
-+ char *write_text;
-+ size_t len;
-+ int error;
-+ u64 seq;
-+
-+ if (con->flags & CON_EXTENDED) {
-+ ext_text = kmalloc(CONSOLE_EXT_LOG_MAX, GFP_KERNEL);
-+ if (!ext_text)
-+ goto out;
-+ }
-+ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
-+ dropped_text = kmalloc(64, GFP_KERNEL);
-+ if (!text || !dropped_text)
-+ goto out;
-+ if (con->flags & CON_EXTENDED)
-+ write_text = ext_text;
-+ else
-+ write_text = text;
-+
-+ seq = read_console_seq(con);
-+
-+ prb_rec_init_rd(&r, &info, text, LOG_LINE_MAX + PREFIX_MAX);
-
--#define printk_time false
-+ for (;;) {
-+ error = wait_event_interruptible(log_wait,
-+ prb_read_valid(prb, seq, &r) || kthread_should_stop());
-
--#define prb_read_valid(rb, seq, r) false
--#define prb_first_valid_seq(rb) 0
--#define read_console_seq() 0
--#define latched_seq_write(dst, src)
--#define kernel_sync_mode() false
-+ if (kthread_should_stop())
-+ break;
-
--static u64 exclusive_console_stop_seq;
--static unsigned long console_dropped;
-+ if (error)
-+ continue;
-
--static size_t record_print_text(const struct printk_record *r,
-- bool syslog, bool time)
-+ if (seq != r.info->seq) {
-+ dropped += r.info->seq - seq;
-+ seq = r.info->seq;
-+ }
-+
-+ seq++;
-+
-+ if (!(con->flags & CON_ENABLED))
-+ continue;
-+
-+ if (suppress_message_printing(r.info->level))
-+ continue;
-+
-+ if (con->flags & CON_EXTENDED) {
-+ len = info_print_ext_header(ext_text,
-+ CONSOLE_EXT_LOG_MAX,
-+ r.info);
-+ len += msg_print_ext_body(ext_text + len,
-+ CONSOLE_EXT_LOG_MAX - len,
-+ &r.text_buf[0], r.info->text_len,
-+ &r.info->dev_info);
-+ } else {
-+ len = record_print_text(&r,
-+ console_msg_format & MSG_FORMAT_SYSLOG,
-+ printk_time);
-+ }
-+
-+ console_lock();
-+
-+ /*
-+ * Even though the printk kthread is always preemptible, it is
-+ * still not allowed to call cond_resched() from within
-+ * console drivers. The task may become non-preemptible in the
-+ * console driver call chain. For example, vt_console_print()
-+ * takes a spinlock and then can call into fbcon_redraw(),
-+ * which can conditionally invoke cond_resched().
-+ */
-+ console_may_schedule = 0;
-+
-+ if (kernel_sync_mode() && con->write_atomic) {
-+ console_unlock();
-+ break;
-+ }
-+
-+ if (!(con->flags & CON_EXTENDED) && dropped) {
-+ dropped_len = snprintf(dropped_text, 64,
-+ "** %lu printk messages dropped **\n",
-+ dropped);
-+ dropped = 0;
-+
-+ con->write(con, dropped_text, dropped_len);
-+ printk_delay(r.info->level);
-+ }
-+
-+ con->write(con, write_text, len);
-+ if (len)
-+ printk_delay(r.info->level);
-+
-+ latched_seq_write(&con->printk_seq, seq);
-+
-+ console_unlock();
-+ }
-+ ret = 0;
-+out:
-+ kfree(dropped_text);
-+ kfree(text);
-+ kfree(ext_text);
-+ pr_info("%sconsole [%s%d]: printing thread stopped\n",
-+ (con->flags & CON_BOOT) ? "boot" : "",
-+ con->name, con->index);
-+ return ret;
-+}
-+
-+/* Must be called within console_lock(). */
-+static void start_printk_kthread(struct console *con)
- {
-- return 0;
-+ con->thread = kthread_run(printk_kthread_func, con,
-+ "pr/%s%d", con->name, con->index);
-+ if (IS_ERR(con->thread)) {
-+ pr_err("%sconsole [%s%d]: unable to start printing thread\n",
-+ (con->flags & CON_BOOT) ? "boot" : "",
-+ con->name, con->index);
-+ return;
-+ }
-+ pr_info("%sconsole [%s%d]: printing thread started\n",
-+ (con->flags & CON_BOOT) ? "boot" : "",
-+ con->name, con->index);
- }
--static ssize_t info_print_ext_header(char *buf, size_t size,
-- struct printk_info *info)
-+
-+/* protected by console_lock */
-+static bool kthreads_started;
-+
-+/* Must be called within console_lock(). */
-+static void console_try_thread(struct console *con)
- {
-- return 0;
-+ if (kthreads_started) {
-+ start_printk_kthread(con);
-+ return;
-+ }
-+
-+ /*
-+ * The printing threads have not been started yet. If this console
-+ * can print synchronously, print all unprinted messages.
-+ */
-+ if (console_may_sync(con)) {
-+ unsigned long flags;
-+
-+ local_irq_save(flags);
-+ print_sync_until(con, prb_next_seq(prb), true);
-+ local_irq_restore(flags);
-+ }
- }
--static ssize_t msg_print_ext_body(char *buf, size_t size,
-- char *text, size_t text_len,
-- struct dev_printk_info *dev_info) { return 0; }
--static void console_lock_spinning_enable(void) { }
--static int console_lock_spinning_disable_and_check(void) { return 0; }
--static void call_console_drivers(const char *ext_text, size_t ext_len,
-- const char *text, size_t len) {}
--static bool suppress_message_printing(int level) { return false; }
-
- #endif /* CONFIG_PRINTK */
-
-@@ -2712,36 +2603,6 @@ int is_console_locked(void)
- }
- EXPORT_SYMBOL(is_console_locked);
-
--/*
-- * Check if we have any console that is capable of printing while cpu is
-- * booting or shutting down. Requires console_sem.
-- */
--static int have_callable_console(void)
--{
-- struct console *con;
--
-- for_each_console(con)
-- if ((con->flags & CON_ENABLED) &&
-- (con->flags & CON_ANYTIME))
-- return 1;
--
-- return 0;
--}
--
--/*
-- * Can we actually use the console at this time on this cpu?
-- *
-- * Console drivers may assume that per-cpu resources have been allocated. So
-- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
-- * call them until this CPU is officially up.
-- */
--static inline int can_use_console(void)
--{
-- if (kernel_sync_mode())
-- return false;
-- return cpu_online(raw_smp_processor_id()) || have_callable_console();
--}
--
- /**
- * console_unlock - unlock the console system
- *
-@@ -2758,139 +2619,13 @@ static inline int can_use_console(void)
- */
- void console_unlock(void)
- {
-- static char ext_text[CONSOLE_EXT_LOG_MAX];
-- static char text[CONSOLE_LOG_MAX];
-- unsigned long flags;
-- bool do_cond_resched, retry;
-- struct printk_info info;
-- struct printk_record r;
-- u64 seq;
--
- if (console_suspended) {
- up_console_sem();
- return;
- }
-
-- prb_rec_init_rd(&r, &info, text, sizeof(text));
--
-- /*
-- * Console drivers are called with interrupts disabled, so
-- * @console_may_schedule should be cleared before; however, we may
-- * end up dumping a lot of lines, for example, if called from
-- * console registration path, and should invoke cond_resched()
-- * between lines if allowable. Not doing so can cause a very long
-- * scheduling stall on a slow console leading to RCU stall and
-- * softlockup warnings which exacerbate the issue with more
-- * messages practically incapacitating the system.
-- *
-- * console_trylock() is not able to detect the preemptive
-- * context reliably. Therefore the value must be stored before
-- * and cleared after the "again" goto label.
-- */
-- do_cond_resched = console_may_schedule;
--again:
-- console_may_schedule = 0;
--
-- /*
-- * We released the console_sem lock, so we need to recheck if
-- * cpu is online and (if not) is there at least one CON_ANYTIME
-- * console.
-- */
-- if (!can_use_console()) {
-- console_locked = 0;
-- up_console_sem();
-- return;
-- }
--
-- for (;;) {
-- size_t ext_len = 0;
-- int handover;
-- size_t len;
--
--skip:
-- seq = read_console_seq();
-- if (!prb_read_valid(prb, seq, &r))
-- break;
--
-- if (seq != r.info->seq) {
-- console_dropped += r.info->seq - seq;
-- latched_seq_write(&console_seq, r.info->seq);
-- seq = r.info->seq;
-- }
--
-- if (suppress_message_printing(r.info->level)) {
-- /*
-- * Skip record we have buffered and already printed
-- * directly to the console when we received it, and
-- * record that has level above the console loglevel.
-- */
-- latched_seq_write(&console_seq, seq + 1);
-- goto skip;
-- }
--
-- /* Output to all consoles once old messages replayed. */
-- if (unlikely(exclusive_console &&
-- seq >= exclusive_console_stop_seq)) {
-- exclusive_console = NULL;
-- }
--
-- /*
-- * Handle extended console text first because later
-- * record_print_text() will modify the record buffer in-place.
-- */
-- if (nr_ext_console_drivers) {
-- ext_len = info_print_ext_header(ext_text,
-- sizeof(ext_text),
-- r.info);
-- ext_len += msg_print_ext_body(ext_text + ext_len,
-- sizeof(ext_text) - ext_len,
-- &r.text_buf[0],
-- r.info->text_len,
-- &r.info->dev_info);
-- }
-- len = record_print_text(&r,
-- console_msg_format & MSG_FORMAT_SYSLOG,
-- printk_time);
-- latched_seq_write(&console_seq, seq + 1);
--
-- /*
-- * While actively printing out messages, if another printk()
-- * were to occur on another CPU, it may wait for this one to
-- * finish. This task can not be preempted if there is a
-- * waiter waiting to take over.
-- *
-- * Interrupts are disabled because the hand over to a waiter
-- * must not be interrupted until the hand over is completed
-- * (@console_waiter is cleared).
-- */
-- printk_safe_enter_irqsave(flags);
-- console_lock_spinning_enable();
--
-- stop_critical_timings(); /* don't trace print latency */
-- call_console_drivers(ext_text, ext_len, text, len);
-- start_critical_timings();
--
-- handover = console_lock_spinning_disable_and_check();
-- printk_safe_exit_irqrestore(flags);
-- if (handover)
-- return;
--
-- if (do_cond_resched)
-- cond_resched();
-- }
--
- console_locked = 0;
- up_console_sem();
--
-- /*
-- * Someone could have filled up the buffer again, so re-check if there's
-- * something to flush. In case we cannot trylock the console_sem again,
-- * there's a new owner and the console_unlock() from them will do the
-- * flush, no worries.
-- */
-- retry = prb_read_valid(prb, read_console_seq(), NULL);
-- if (retry && console_trylock())
-- goto again;
- }
- EXPORT_SYMBOL(console_unlock);
-
-@@ -2940,19 +2675,20 @@ void console_unblank(void)
- */
- void console_flush_on_panic(enum con_flush_mode mode)
- {
-- if (console_trylock()) {
-- if (mode == CONSOLE_REPLAY_ALL)
-- latched_seq_write(&console_seq, prb_first_valid_seq(prb));
-- } else {
-- /*
-- * Another context is holding the console lock and
-- * @console_may_schedule may be set. Ignore and proceed to
-- * unlock so that messages are flushed out. As this can be
-- * called from any context and we don't want to get preempted
-- * while flushing, ensure @console_may_schedule is cleared.
-- */
-- console_may_schedule = 0;
-+ if (!console_trylock())
-+ return;
-+
-+#ifdef CONFIG_PRINTK
-+ if (mode == CONSOLE_REPLAY_ALL) {
-+ struct console *c;
-+ u64 seq;
-+
-+ seq = prb_first_valid_seq(prb);
-+ for_each_console(c)
-+ latched_seq_write(&c->printk_seq, seq);
- }
-+#endif
-+
- console_unlock();
- }
-
-@@ -3088,6 +2824,7 @@ static int try_enable_new_console(struct
- void register_console(struct console *newcon)
- {
- struct console *bcon = NULL;
-+ u64 __maybe_unused seq = 0;
- int err;
-
- for_each_console(bcon) {
-@@ -3110,6 +2847,8 @@ void register_console(struct console *ne
- }
- }
-
-+ newcon->thread = NULL;
-+
- if (console_drivers && console_drivers->flags & CON_BOOT)
- bcon = console_drivers;
-
-@@ -3174,27 +2913,21 @@ void register_console(struct console *ne
- if (newcon->flags & CON_EXTENDED)
- nr_ext_console_drivers++;
-
-- if (newcon->flags & CON_PRINTBUFFER) {
-- /*
-- * console_unlock(); will print out the buffered messages
-- * for us.
-- *
-- * We're about to replay the log buffer. Only do this to the
-- * just-registered console to avoid excessive message spam to
-- * the already-registered consoles.
-- *
-- * Set exclusive_console with disabled interrupts to reduce
-- * race window with eventual console_flush_on_panic() that
-- * ignores console_lock.
-- */
-- exclusive_console = newcon;
-- exclusive_console_stop_seq = read_console_seq();
-+#ifdef CONFIG_PRINTK
-+ if (!(newcon->flags & CON_PRINTBUFFER))
-+ seq = prb_next_seq(prb);
-
-- /* Get a consistent copy of @syslog_seq. */
-- mutex_lock(&syslog_lock);
-- latched_seq_write(&console_seq, syslog_seq);
-- mutex_unlock(&syslog_lock);
-- }
-+ seqcount_latch_init(&newcon->printk_seq.latch);
-+ latched_seq_write(&newcon->printk_seq, seq);
-+ seqcount_latch_init(&newcon->printk_sync_seq.latch);
-+ latched_seq_write(&newcon->printk_sync_seq, seq);
-+#ifdef CONFIG_HAVE_NMI
-+ seqcount_latch_init(&newcon->printk_sync_nmi_seq.latch);
-+ latched_seq_write(&newcon->printk_sync_nmi_seq, seq);
-+#endif
-+
-+ console_try_thread(newcon);
-+#endif /* CONFIG_PRINTK */
- console_unlock();
- console_sysfs_notify();
-
-@@ -3268,6 +3001,9 @@ int unregister_console(struct console *c
- console_unlock();
- console_sysfs_notify();
-
-+ if (console->thread && !IS_ERR(console->thread))
-+ kthread_stop(console->thread);
-+
- if (console->exit)
- res = console->exit(console);
-
-@@ -3350,6 +3086,15 @@ static int __init printk_late_init(void)
- unregister_console(con);
- }
- }
-+
-+#ifdef CONFIG_PRINTK
-+ console_lock();
-+ for_each_console(con)
-+ start_printk_kthread(con);
-+ kthreads_started = true;
-+ console_unlock();
-+#endif
-+
- ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
- console_cpu_notify);
- WARN_ON(ret < 0);
-@@ -3365,7 +3110,6 @@ late_initcall(printk_late_init);
- * Delayed printk version, for scheduler-internal messages:
- */
- #define PRINTK_PENDING_WAKEUP 0x01
--#define PRINTK_PENDING_OUTPUT 0x02
-
- static DEFINE_PER_CPU(int, printk_pending);
-
-@@ -3373,14 +3117,8 @@ static void wake_up_klogd_work_func(stru
- {
- int pending = __this_cpu_xchg(printk_pending, 0);
-
-- if (pending & PRINTK_PENDING_OUTPUT) {
-- /* If trylock fails, someone else is doing the printing */
-- if (console_trylock())
-- console_unlock();
-- }
--
- if (pending & PRINTK_PENDING_WAKEUP)
-- wake_up_interruptible(&log_wait);
-+ wake_up_interruptible_all(&log_wait);
- }
-
- static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) =
-@@ -3401,18 +3139,11 @@ void wake_up_klogd(void)
-
- void defer_console_output(void)
- {
-- if (!printk_percpu_data_ready())
-- return;
--
-- preempt_disable();
-- __this_cpu_or(printk_pending, PRINTK_PENDING_OUTPUT);
-- irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
-- preempt_enable();
- }
-
- void printk_trigger_flush(void)
- {
-- defer_console_output();
-+ wake_up_klogd();
- }
-
- int vprintk_deferred(const char *fmt, va_list args)
diff --git a/patches/printk__relocate_printk_delay.patch b/patches/printk__relocate_printk_delay.patch
deleted file mode 100644
index 6800726a77cf..000000000000
--- a/patches/printk__relocate_printk_delay.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-Subject: printk: relocate printk_delay()
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:03 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Move printk_delay() "as is" further up so that they can be used by
-new functions in an upcoming commit.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/printk/printk.c | 28 ++++++++++++++--------------
- 1 file changed, 14 insertions(+), 14 deletions(-)
----
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -1748,6 +1748,20 @@ SYSCALL_DEFINE3(syslog, int, type, char
- return do_syslog(type, buf, len, SYSLOG_FROM_READER);
- }
-
-+int printk_delay_msec __read_mostly;
-+
-+static inline void printk_delay(void)
-+{
-+ if (unlikely(printk_delay_msec)) {
-+ int m = printk_delay_msec;
-+
-+ while (m--) {
-+ mdelay(1);
-+ touch_nmi_watchdog();
-+ }
-+ }
-+}
-+
- /*
- * Special console_lock variants that help to reduce the risk of soft-lockups.
- * They allow to pass console_lock to another printk() call using a busy wait.
-@@ -2002,20 +2016,6 @@ static u8 *__printk_recursion_counter(vo
- local_irq_restore(flags); \
- } while (0)
-
--int printk_delay_msec __read_mostly;
--
--static inline void printk_delay(void)
--{
-- if (unlikely(printk_delay_msec)) {
-- int m = printk_delay_msec;
--
-- while (m--) {
-- mdelay(1);
-- touch_nmi_watchdog();
-- }
-- }
--}
--
- static inline u32 printk_caller_id(void)
- {
- return in_task() ? task_pid_nr(current) :
diff --git a/patches/printk__remove_deferred_printing.patch b/patches/printk__remove_deferred_printing.patch
deleted file mode 100644
index 17af3e09682d..000000000000
--- a/patches/printk__remove_deferred_printing.patch
+++ /dev/null
@@ -1,823 +0,0 @@
-Subject: printk: remove deferred printing
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:08 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-Since printing occurs either atomically or from the printing
-kthread, there is no need for any deferring or tracking possible
-recursion paths. Remove all printk defer functions and context
-tracking.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- arch/arm/kernel/smp.c | 2 -
- arch/powerpc/kexec/crash.c | 3 -
- arch/x86/kernel/dumpstack_32.c | 2 -
- arch/x86/kernel/dumpstack_64.c | 3 +
- arch/x86/kernel/i8259.c | 3 -
- arch/x86/kernel/unwind_frame.c | 16 +++-----
- arch/x86/kernel/unwind_orc.c | 2 -
- drivers/char/random.c | 5 +-
- include/linux/printk.h | 34 ------------------
- include/linux/suspend.h | 10 +----
- kernel/power/main.c | 10 +----
- kernel/printk/Makefile | 1
- kernel/printk/internal.h | 36 -------------------
- kernel/printk/printk.c | 74 +++++++++++++---------------------------
- kernel/printk/printk_safe.c | 52 ----------------------------
- kernel/sched/core.c | 9 ++--
- kernel/sched/deadline.c | 2 -
- kernel/sched/psi.c | 14 +++----
- kernel/sched/rt.c | 2 -
- kernel/sched/stats.h | 2 -
- kernel/time/clockevents.c | 9 +---
- kernel/time/ntp.c | 14 ++-----
- kernel/time/timekeeping.c | 30 ++++++++--------
- kernel/time/timekeeping_debug.c | 2 -
- kernel/workqueue.c | 4 --
- lib/ratelimit.c | 4 --
- 26 files changed, 83 insertions(+), 262 deletions(-)
- delete mode 100644 kernel/printk/internal.h
- delete mode 100644 kernel/printk/printk_safe.c
----
---- a/arch/arm/kernel/smp.c
-+++ b/arch/arm/kernel/smp.c
-@@ -676,9 +676,7 @@ static void do_handle_IPI(int ipinr)
- break;
-
- case IPI_CPU_BACKTRACE:
-- printk_deferred_enter();
- nmi_cpu_backtrace(get_irq_regs());
-- printk_deferred_exit();
- break;
-
- default:
---- a/arch/powerpc/kexec/crash.c
-+++ b/arch/powerpc/kexec/crash.c
-@@ -312,9 +312,6 @@ void default_machine_crash_shutdown(stru
- unsigned int i;
- int (*old_handler)(struct pt_regs *regs);
-
-- /* Avoid hardlocking with irresponsive CPU holding logbuf_lock */
-- printk_deferred_enter();
--
- /*
- * This function is only called after the system
- * has panicked or is otherwise in a critical state.
---- a/arch/x86/kernel/dumpstack_32.c
-+++ b/arch/x86/kernel/dumpstack_32.c
-@@ -141,7 +141,7 @@ int get_stack_info(unsigned long *stack,
- */
- if (visit_mask) {
- if (*visit_mask & (1UL << info->type)) {
-- printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
-+ pr_warn_once("WARNING: stack recursion on stack type %d\n", info->type);
- goto unknown;
- }
- *visit_mask |= 1UL << info->type;
---- a/arch/x86/kernel/dumpstack_64.c
-+++ b/arch/x86/kernel/dumpstack_64.c
-@@ -207,7 +207,8 @@ int get_stack_info(unsigned long *stack,
- if (visit_mask) {
- if (*visit_mask & (1UL << info->type)) {
- if (task == current)
-- printk_deferred_once(KERN_WARNING "WARNING: stack recursion on stack type %d\n", info->type);
-+ pr_warn_once("WARNING: stack recursion on stack type %d\n",
-+ info->type);
- goto unknown;
- }
- *visit_mask |= 1UL << info->type;
---- a/arch/x86/kernel/i8259.c
-+++ b/arch/x86/kernel/i8259.c
-@@ -207,8 +207,7 @@ static void mask_and_ack_8259A(struct ir
- * lets ACK and report it. [once per IRQ]
- */
- if (!(spurious_irq_mask & irqmask)) {
-- printk_deferred(KERN_DEBUG
-- "spurious 8259A interrupt: IRQ%d.\n", irq);
-+ printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
- spurious_irq_mask |= irqmask;
- }
- atomic_inc(&irq_err_count);
---- a/arch/x86/kernel/unwind_frame.c
-+++ b/arch/x86/kernel/unwind_frame.c
-@@ -41,9 +41,9 @@ static void unwind_dump(struct unwind_st
-
- dumped_before = true;
-
-- printk_deferred("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
-- state->stack_info.type, state->stack_info.next_sp,
-- state->stack_mask, state->graph_idx);
-+ printk("unwind stack type:%d next_sp:%p mask:0x%lx graph_idx:%d\n",
-+ state->stack_info.type, state->stack_info.next_sp,
-+ state->stack_mask, state->graph_idx);
-
- for (sp = PTR_ALIGN(state->orig_sp, sizeof(long)); sp;
- sp = PTR_ALIGN(stack_info.next_sp, sizeof(long))) {
-@@ -59,13 +59,11 @@ static void unwind_dump(struct unwind_st
-
- if (zero) {
- if (!prev_zero)
-- printk_deferred("%p: %0*x ...\n",
-- sp, BITS_PER_LONG/4, 0);
-+ printk("%p: %0*x ...\n", sp, BITS_PER_LONG/4, 0);
- continue;
- }
-
-- printk_deferred("%p: %0*lx (%pB)\n",
-- sp, BITS_PER_LONG/4, word, (void *)word);
-+ printk("%p: %0*lx (%pB)\n", sp, BITS_PER_LONG/4, word, (void *)word);
- }
- }
- }
-@@ -341,13 +339,13 @@ bool unwind_next_frame(struct unwind_sta
- goto the_end;
-
- if (state->regs) {
-- printk_deferred_once(KERN_WARNING
-+ pr_warn_once(
- "WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
- state->regs, state->task->comm,
- state->task->pid, next_bp);
- unwind_dump(state);
- } else {
-- printk_deferred_once(KERN_WARNING
-+ pr_warn_once(
- "WARNING: kernel stack frame pointer at %p in %s:%d has bad value %p\n",
- state->bp, state->task->comm,
- state->task->pid, next_bp);
---- a/arch/x86/kernel/unwind_orc.c
-+++ b/arch/x86/kernel/unwind_orc.c
-@@ -9,7 +9,7 @@
- #include <asm/orc_lookup.h>
-
- #define orc_warn(fmt, ...) \
-- printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
-+ pr_warn_once("WARNING: " fmt, ##__VA_ARGS__)
-
- #define orc_warn_current(args...) \
- ({ \
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -1525,9 +1525,8 @@ static void _warn_unseeded_randomness(co
- print_once = true;
- #endif
- if (__ratelimit(&unseeded_warning))
-- printk_deferred(KERN_NOTICE "random: %s called from %pS "
-- "with crng_init=%d\n", func_name, caller,
-- crng_init);
-+ pr_notice("random: %s called from %pS with crng_init=%d\n",
-+ func_name, caller, crng_init);
- }
-
- /*
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -162,21 +162,6 @@ asmlinkage __printf(1, 2) __cold
- int _printk(const char *fmt, ...);
-
- /*
-- * Special printk facility for scheduler/timekeeping use only, _DO_NOT_USE_ !
-- */
--__printf(1, 2) __cold int _printk_deferred(const char *fmt, ...);
--
--extern void __printk_safe_enter(void);
--extern void __printk_safe_exit(void);
--/*
-- * The printk_deferred_enter/exit macros are available only as a hack for
-- * some code paths that need to defer all printk console printing. Interrupts
-- * must be disabled for the deferred duration.
-- */
--#define printk_deferred_enter __printk_safe_enter
--#define printk_deferred_exit __printk_safe_exit
--
--/*
- * Please don't use printk_ratelimit(), because it shares ratelimiting state
- * with all other unrelated printk_ratelimit() callsites. Instead use
- * printk_ratelimited() or plain old __ratelimit().
-@@ -216,19 +201,6 @@ int _printk(const char *s, ...)
- {
- return 0;
- }
--static inline __printf(1, 2) __cold
--int _printk_deferred(const char *s, ...)
--{
-- return 0;
--}
--
--static inline void printk_deferred_enter(void)
--{
--}
--
--static inline void printk_deferred_exit(void)
--{
--}
-
- static inline int printk_ratelimit(void)
- {
-@@ -475,8 +447,6 @@ struct pi_entry {
- * See the vsnprintf() documentation for format string extensions over C99.
- */
- #define printk(fmt, ...) printk_index_wrap(_printk, fmt, ##__VA_ARGS__)
--#define printk_deferred(fmt, ...) \
-- printk_index_wrap(_printk_deferred, fmt, ##__VA_ARGS__)
-
- /**
- * pr_emerg - Print an emergency-level message
-@@ -614,13 +584,9 @@ struct pi_entry {
- #ifdef CONFIG_PRINTK
- #define printk_once(fmt, ...) \
- DO_ONCE_LITE(printk, fmt, ##__VA_ARGS__)
--#define printk_deferred_once(fmt, ...) \
-- DO_ONCE_LITE(printk_deferred, fmt, ##__VA_ARGS__)
- #else
- #define printk_once(fmt, ...) \
- no_printk(fmt, ##__VA_ARGS__)
--#define printk_deferred_once(fmt, ...) \
-- no_printk(fmt, ##__VA_ARGS__)
- #endif
-
- #define pr_emerg_once(fmt, ...) \
---- a/include/linux/suspend.h
-+++ b/include/linux/suspend.h
-@@ -550,23 +550,17 @@ static inline void unlock_system_sleep(v
- #ifdef CONFIG_PM_SLEEP_DEBUG
- extern bool pm_print_times_enabled;
- extern bool pm_debug_messages_on;
--extern __printf(2, 3) void __pm_pr_dbg(bool defer, const char *fmt, ...);
-+extern __printf(1, 2) void pm_pr_dbg(const char *fmt, ...);
- #else
- #define pm_print_times_enabled (false)
- #define pm_debug_messages_on (false)
-
- #include <linux/printk.h>
-
--#define __pm_pr_dbg(defer, fmt, ...) \
-+#define pm_pr_dbg(fmt, ...) \
- no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
- #endif
-
--#define pm_pr_dbg(fmt, ...) \
-- __pm_pr_dbg(false, fmt, ##__VA_ARGS__)
--
--#define pm_deferred_pr_dbg(fmt, ...) \
-- __pm_pr_dbg(true, fmt, ##__VA_ARGS__)
--
- #ifdef CONFIG_PM_AUTOSLEEP
-
- /* kernel/power/autosleep.c */
---- a/kernel/power/main.c
-+++ b/kernel/power/main.c
-@@ -543,14 +543,13 @@ static int __init pm_debug_messages_setu
- __setup("pm_debug_messages", pm_debug_messages_setup);
-
- /**
-- * __pm_pr_dbg - Print a suspend debug message to the kernel log.
-- * @defer: Whether or not to use printk_deferred() to print the message.
-+ * pm_pr_dbg - Print a suspend debug message to the kernel log.
- * @fmt: Message format.
- *
- * The message will be emitted if enabled through the pm_debug_messages
- * sysfs attribute.
- */
--void __pm_pr_dbg(bool defer, const char *fmt, ...)
-+void pm_pr_dbg(const char *fmt, ...)
- {
- struct va_format vaf;
- va_list args;
-@@ -563,10 +562,7 @@ void __pm_pr_dbg(bool defer, const char
- vaf.fmt = fmt;
- vaf.va = &args;
-
-- if (defer)
-- printk_deferred(KERN_DEBUG "PM: %pV", &vaf);
-- else
-- printk(KERN_DEBUG "PM: %pV", &vaf);
-+ printk(KERN_DEBUG "PM: %pV", &vaf);
-
- va_end(args);
- }
---- a/kernel/printk/Makefile
-+++ b/kernel/printk/Makefile
-@@ -1,6 +1,5 @@
- # SPDX-License-Identifier: GPL-2.0-only
- obj-y = printk.o
--obj-$(CONFIG_PRINTK) += printk_safe.o
- obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille.o
- obj-$(CONFIG_PRINTK) += printk_ringbuffer.o
- obj-$(CONFIG_PRINTK_INDEX) += index.o
---- a/kernel/printk/internal.h
-+++ b/kernel/printk/internal.h
-@@ -2,7 +2,6 @@
- /*
- * internal.h - printk internal definitions
- */
--#include <linux/percpu.h>
-
- #ifdef CONFIG_PRINTK
-
-@@ -12,41 +11,6 @@ enum printk_info_flags {
- LOG_CONT = 8, /* text is a fragment of a continuation line */
- };
-
--__printf(4, 0)
--int vprintk_store(int facility, int level,
-- const struct dev_printk_info *dev_info,
-- const char *fmt, va_list args);
--
--__printf(1, 0) int vprintk_default(const char *fmt, va_list args);
--__printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
--
--bool printk_percpu_data_ready(void);
--
--#define printk_safe_enter_irqsave(flags) \
-- do { \
-- local_irq_save(flags); \
-- __printk_safe_enter(); \
-- } while (0)
--
--#define printk_safe_exit_irqrestore(flags) \
-- do { \
-- __printk_safe_exit(); \
-- local_irq_restore(flags); \
-- } while (0)
--
--void defer_console_output(void);
--
- u16 printk_parse_prefix(const char *text, int *level,
- enum printk_info_flags *flags);
--#else
--
--/*
-- * In !PRINTK builds we still export console_sem
-- * semaphore and some of console functions (console_unlock()/etc.), so
-- * printk-safe must preserve the existing local IRQ guarantees.
-- */
--#define printk_safe_enter_irqsave(flags) local_irq_save(flags)
--#define printk_safe_exit_irqrestore(flags) local_irq_restore(flags)
--
--static inline bool printk_percpu_data_ready(void) { return false; }
- #endif /* CONFIG_PRINTK */
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -44,6 +44,7 @@
- #include <linux/irq_work.h>
- #include <linux/ctype.h>
- #include <linux/uio.h>
-+#include <linux/kdb.h>
- #include <linux/kgdb.h>
- #include <linux/kthread.h>
- #include <linux/clocksource.h>
-@@ -228,19 +229,7 @@ static int nr_ext_console_drivers;
-
- static int __down_trylock_console_sem(unsigned long ip)
- {
-- int lock_failed;
-- unsigned long flags;
--
-- /*
-- * Here and in __up_console_sem() we need to be in safe mode,
-- * because spindump/WARN/etc from under console ->lock will
-- * deadlock in printk()->down_trylock_console_sem() otherwise.
-- */
-- printk_safe_enter_irqsave(flags);
-- lock_failed = down_trylock(&console_sem);
-- printk_safe_exit_irqrestore(flags);
--
-- if (lock_failed)
-+ if (down_trylock(&console_sem))
- return 1;
- mutex_acquire(&console_lock_dep_map, 0, 1, ip);
- return 0;
-@@ -249,13 +238,9 @@ static int __down_trylock_console_sem(un
-
- static void __up_console_sem(unsigned long ip)
- {
-- unsigned long flags;
--
- mutex_release(&console_lock_dep_map, ip);
-
-- printk_safe_enter_irqsave(flags);
- up(&console_sem);
-- printk_safe_exit_irqrestore(flags);
- }
- #define up_console_sem() __up_console_sem(_RET_IP_)
-
-@@ -417,7 +402,7 @@ static struct printk_ringbuffer *prb = &
- */
- static bool __printk_percpu_data_ready __read_mostly;
-
--bool printk_percpu_data_ready(void)
-+static bool printk_percpu_data_ready(void)
- {
- return __printk_percpu_data_ready;
- }
-@@ -2024,9 +2009,9 @@ static u16 printk_sprint(char *text, u16
- }
-
- __printf(4, 0)
--int vprintk_store(int facility, int level,
-- const struct dev_printk_info *dev_info,
-- const char *fmt, va_list args)
-+static int vprintk_store(int facility, int level,
-+ const struct dev_printk_info *dev_info,
-+ const char *fmt, va_list args)
- {
- const u32 caller_id = printk_caller_id();
- struct prb_reserved_entry e;
-@@ -2172,11 +2157,28 @@ asmlinkage int vprintk_emit(int facility
- }
- EXPORT_SYMBOL(vprintk_emit);
-
--int vprintk_default(const char *fmt, va_list args)
-+__printf(1, 0)
-+static int vprintk_default(const char *fmt, va_list args)
- {
- return vprintk_emit(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
- }
--EXPORT_SYMBOL_GPL(vprintk_default);
-+
-+__printf(1, 0)
-+static int vprintk_func(const char *fmt, va_list args)
-+{
-+#ifdef CONFIG_KGDB_KDB
-+ /* Allow to pass printk() to kdb but avoid a recursion. */
-+ if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
-+ return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
-+#endif
-+ return vprintk_default(fmt, args);
-+}
-+
-+asmlinkage int vprintk(const char *fmt, va_list args)
-+{
-+ return vprintk_func(fmt, args);
-+}
-+EXPORT_SYMBOL(vprintk);
-
- asmlinkage __visible int _printk(const char *fmt, ...)
- {
-@@ -3137,37 +3139,11 @@ void wake_up_klogd(void)
- preempt_enable();
- }
-
--void defer_console_output(void)
--{
--}
--
- void printk_trigger_flush(void)
- {
- wake_up_klogd();
- }
-
--int vprintk_deferred(const char *fmt, va_list args)
--{
-- int r;
--
-- r = vprintk_emit(0, LOGLEVEL_SCHED, NULL, fmt, args);
-- defer_console_output();
--
-- return r;
--}
--
--int _printk_deferred(const char *fmt, ...)
--{
-- va_list args;
-- int r;
--
-- va_start(args, fmt);
-- r = vprintk_deferred(fmt, args);
-- va_end(args);
--
-- return r;
--}
--
- /*
- * printk rate limiting, lifted from the networking subsystem.
- *
---- a/kernel/printk/printk_safe.c
-+++ /dev/null
-@@ -1,52 +0,0 @@
--// SPDX-License-Identifier: GPL-2.0-or-later
--/*
-- * printk_safe.c - Safe printk for printk-deadlock-prone contexts
-- */
--
--#include <linux/preempt.h>
--#include <linux/kdb.h>
--#include <linux/smp.h>
--#include <linux/cpumask.h>
--#include <linux/printk.h>
--#include <linux/kprobes.h>
--
--#include "internal.h"
--
--static DEFINE_PER_CPU(int, printk_context);
--
--/* Can be preempted by NMI. */
--void __printk_safe_enter(void)
--{
-- this_cpu_inc(printk_context);
--}
--
--/* Can be preempted by NMI. */
--void __printk_safe_exit(void)
--{
-- this_cpu_dec(printk_context);
--}
--
--asmlinkage int vprintk(const char *fmt, va_list args)
--{
--#ifdef CONFIG_KGDB_KDB
-- /* Allow to pass printk() to kdb but avoid a recursion. */
-- if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
-- return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
--#endif
--
-- /*
-- * Use the main logbuf even in NMI. But avoid calling console
-- * drivers that might have their own locks.
-- */
-- if (this_cpu_read(printk_context) || in_nmi()) {
-- int len;
--
-- len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, fmt, args);
-- defer_console_output();
-- return len;
-- }
--
-- /* No obstacles. */
-- return vprintk_default(fmt, args);
--}
--EXPORT_SYMBOL(vprintk);
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -2967,9 +2967,8 @@ void force_compatible_cpus_allowed_ptr(s
-
- out_set_mask:
- if (printk_ratelimit()) {
-- printk_deferred("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-- task_pid_nr(p), p->comm,
-- cpumask_pr_args(override_mask));
-+ printk("Overriding affinity for process %d (%s) to CPUs %*pbl\n",
-+ task_pid_nr(p), p->comm, cpumask_pr_args(override_mask));
- }
-
- WARN_ON(set_cpus_allowed_ptr(p, override_mask));
-@@ -3399,8 +3398,8 @@ static int select_fallback_rq(int cpu, s
- * leave kernel.
- */
- if (p->mm && printk_ratelimit()) {
-- printk_deferred("process %d (%s) no longer affine to cpu%d\n",
-- task_pid_nr(p), p->comm, cpu);
-+ printk("process %d (%s) no longer affine to cpu%d\n",
-+ task_pid_nr(p), p->comm, cpu);
- }
- }
-
---- a/kernel/sched/deadline.c
-+++ b/kernel/sched/deadline.c
-@@ -800,7 +800,7 @@ static void replenish_dl_entity(struct s
- * entity.
- */
- if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
-- printk_deferred_once("sched: DL replenish lagged too much\n");
-+ printk_once("sched: DL replenish lagged too much\n");
- dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline;
- dl_se->runtime = pi_of(dl_se)->dl_runtime;
- }
---- a/kernel/sched/psi.c
-+++ b/kernel/sched/psi.c
-@@ -710,10 +710,10 @@ static void psi_group_change(struct psi_
- if (groupc->tasks[t]) {
- groupc->tasks[t]--;
- } else if (!psi_bug) {
-- printk_deferred(KERN_ERR "psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
-- cpu, t, groupc->tasks[0],
-- groupc->tasks[1], groupc->tasks[2],
-- groupc->tasks[3], clear, set);
-+ pr_err("psi: task underflow! cpu=%d t=%d tasks=[%u %u %u %u] clear=%x set=%x\n",
-+ cpu, t, groupc->tasks[0],
-+ groupc->tasks[1], groupc->tasks[2],
-+ groupc->tasks[3], clear, set);
- psi_bug = 1;
- }
- }
-@@ -779,9 +779,9 @@ static void psi_flags_change(struct task
- if (((task->psi_flags & set) ||
- (task->psi_flags & clear) != clear) &&
- !psi_bug) {
-- printk_deferred(KERN_ERR "psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
-- task->pid, task->comm, task_cpu(task),
-- task->psi_flags, clear, set);
-+ pr_err("psi: inconsistent task state! task=%d:%s cpu=%d psi_flags=%x clear=%x set=%x\n",
-+ task->pid, task->comm, task_cpu(task),
-+ task->psi_flags, clear, set);
- psi_bug = 1;
- }
-
---- a/kernel/sched/rt.c
-+++ b/kernel/sched/rt.c
-@@ -977,7 +977,7 @@ static int sched_rt_runtime_exceeded(str
- */
- if (likely(rt_b->rt_runtime)) {
- rt_rq->rt_throttled = 1;
-- printk_deferred_once("sched: RT throttling activated\n");
-+ printk_once("sched: RT throttling activated\n");
- } else {
- /*
- * In case we did anyway, make it go away,
---- a/kernel/sched/stats.h
-+++ b/kernel/sched/stats.h
-@@ -62,7 +62,7 @@ check_schedstat_required(void)
- trace_sched_stat_iowait_enabled() ||
- trace_sched_stat_blocked_enabled() ||
- trace_sched_stat_runtime_enabled())
-- printk_deferred_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
-+ printk_once("Scheduler tracepoints stat_sleep, stat_iowait, stat_blocked and stat_runtime require the kernel parameter schedstats=enable or kernel.sched_schedstats=1\n");
- }
-
- #else /* !CONFIG_SCHEDSTATS: */
---- a/kernel/time/clockevents.c
-+++ b/kernel/time/clockevents.c
-@@ -203,8 +203,7 @@ static int clockevents_increase_min_delt
- {
- /* Nothing to do if we already reached the limit */
- if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
-- printk_deferred(KERN_WARNING
-- "CE: Reprogramming failure. Giving up\n");
-+ pr_warn("CE: Reprogramming failure. Giving up\n");
- dev->next_event = KTIME_MAX;
- return -ETIME;
- }
-@@ -217,10 +216,8 @@ static int clockevents_increase_min_delt
- if (dev->min_delta_ns > MIN_DELTA_LIMIT)
- dev->min_delta_ns = MIN_DELTA_LIMIT;
-
-- printk_deferred(KERN_WARNING
-- "CE: %s increased min_delta_ns to %llu nsec\n",
-- dev->name ? dev->name : "?",
-- (unsigned long long) dev->min_delta_ns);
-+ pr_warn("CE: %s increased min_delta_ns to %llu nsec\n",
-+ dev->name ? dev->name : "?", (unsigned long long) dev->min_delta_ns);
- return 0;
- }
-
---- a/kernel/time/ntp.c
-+++ b/kernel/time/ntp.c
-@@ -939,9 +939,7 @@ static long hardpps_update_freq(struct p
- time_status |= STA_PPSERROR;
- pps_errcnt++;
- pps_dec_freq_interval();
-- printk_deferred(KERN_ERR
-- "hardpps: PPSERROR: interval too long - %lld s\n",
-- freq_norm.sec);
-+ pr_err("hardpps: PPSERROR: interval too long - %lld s\n", freq_norm.sec);
- return 0;
- }
-
-@@ -954,8 +952,7 @@ static long hardpps_update_freq(struct p
- delta = shift_right(ftemp - pps_freq, NTP_SCALE_SHIFT);
- pps_freq = ftemp;
- if (delta > PPS_MAXWANDER || delta < -PPS_MAXWANDER) {
-- printk_deferred(KERN_WARNING
-- "hardpps: PPSWANDER: change=%ld\n", delta);
-+ pr_warn("hardpps: PPSWANDER: change=%ld\n", delta);
- time_status |= STA_PPSWANDER;
- pps_stbcnt++;
- pps_dec_freq_interval();
-@@ -999,9 +996,8 @@ static void hardpps_update_phase(long er
- * the time offset is updated.
- */
- if (jitter > (pps_jitter << PPS_POPCORN)) {
-- printk_deferred(KERN_WARNING
-- "hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
-- jitter, (pps_jitter << PPS_POPCORN));
-+ pr_warn("hardpps: PPSJITTER: jitter=%ld, limit=%ld\n",
-+ jitter, (pps_jitter << PPS_POPCORN));
- time_status |= STA_PPSJITTER;
- pps_jitcnt++;
- } else if (time_status & STA_PPSTIME) {
-@@ -1058,7 +1054,7 @@ void __hardpps(const struct timespec64 *
- time_status |= STA_PPSJITTER;
- /* restart the frequency calibration interval */
- pps_fbase = *raw_ts;
-- printk_deferred(KERN_ERR "hardpps: PPSJITTER: bad pulse\n");
-+ pr_err("hardpps: PPSJITTER: bad pulse\n");
- return;
- }
-
---- a/kernel/time/timekeeping.c
-+++ b/kernel/time/timekeeping.c
-@@ -203,22 +203,23 @@ static void timekeeping_check_update(str
- const char *name = tk->tkr_mono.clock->name;
-
- if (offset > max_cycles) {
-- printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
-- offset, name, max_cycles);
-- printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
-+ printk("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
-+ offset, name, max_cycles);
-+ printk(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
- } else {
- if (offset > (max_cycles >> 1)) {
-- printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
-- offset, name, max_cycles >> 1);
-- printk_deferred(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
-+ printk("INFO: timekeeping: Cycle offset (%lld) is larger than the '%s' clock's 50%% safety margin (%lld)\n",
-+ offset, name, max_cycles >> 1);
-+ printk(" timekeeping: Your kernel is still fine, but is feeling a bit nervous\n");
- }
- }
-
- if (tk->underflow_seen) {
- if (jiffies - tk->last_warning > WARNING_FREQ) {
-- printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
-- printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
-- printk_deferred(" Your kernel is probably still fine.\n");
-+ printk("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n",
-+ name);
-+ printk(" Please report this, consider using a different clocksource, if possible.\n");
-+ printk(" Your kernel is probably still fine.\n");
- tk->last_warning = jiffies;
- }
- tk->underflow_seen = 0;
-@@ -226,9 +227,10 @@ static void timekeeping_check_update(str
-
- if (tk->overflow_seen) {
- if (jiffies - tk->last_warning > WARNING_FREQ) {
-- printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
-- printk_deferred(" Please report this, consider using a different clocksource, if possible.\n");
-- printk_deferred(" Your kernel is probably still fine.\n");
-+ printk("WARNING: Overflow in clocksource '%s' observed, time update capped.\n",
-+ name);
-+ printk(" Please report this, consider using a different clocksource, if possible.\n");
-+ printk(" Your kernel is probably still fine.\n");
- tk->last_warning = jiffies;
- }
- tk->overflow_seen = 0;
-@@ -1668,9 +1670,7 @@ static void __timekeeping_inject_sleepti
- const struct timespec64 *delta)
- {
- if (!timespec64_valid_strict(delta)) {
-- printk_deferred(KERN_WARNING
-- "__timekeeping_inject_sleeptime: Invalid "
-- "sleep delta value!\n");
-+ pr_warn("%s: Invalid sleep delta value!\n", __func__);
- return;
- }
- tk_xtime_add(tk, delta);
---- a/kernel/time/timekeeping_debug.c
-+++ b/kernel/time/timekeeping_debug.c
-@@ -49,7 +49,7 @@ void tk_debug_account_sleep_time(const s
- int bin = min(fls(t->tv_sec), NUM_BINS-1);
-
- sleep_time_bin[bin]++;
-- pm_deferred_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
-+ pm_pr_dbg("Timekeeping suspended for %lld.%03lu seconds\n",
- (s64)t->tv_sec, t->tv_nsec / NSEC_PER_MSEC);
- }
-
---- a/kernel/workqueue.c
-+++ b/kernel/workqueue.c
-@@ -4845,9 +4845,7 @@ void show_one_workqueue(struct workqueue
- * drivers that queue work while holding locks
- * also taken in their write paths.
- */
-- printk_deferred_enter();
- show_pwq(pwq);
-- printk_deferred_exit();
- }
- raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
- /*
-@@ -4878,7 +4876,6 @@ static void show_one_worker_pool(struct
- * queue work while holding locks also taken in their write
- * paths.
- */
-- printk_deferred_enter();
- pr_info("pool %d:", pool->id);
- pr_cont_pool_info(pool);
- pr_cont(" hung=%us workers=%d",
-@@ -4893,7 +4890,6 @@ static void show_one_worker_pool(struct
- first = false;
- }
- pr_cont("\n");
-- printk_deferred_exit();
- next_pool:
- raw_spin_unlock_irqrestore(&pool->lock, flags);
- /*
---- a/lib/ratelimit.c
-+++ b/lib/ratelimit.c
-@@ -47,9 +47,7 @@ int ___ratelimit(struct ratelimit_state
- if (time_is_before_jiffies(rs->begin + rs->interval)) {
- if (rs->missed) {
- if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
-- printk_deferred(KERN_WARNING
-- "%s: %d callbacks suppressed\n",
-- func, rs->missed);
-+ pr_warn("%s: %d callbacks suppressed\n", func, rs->missed);
- rs->missed = 0;
- }
- }
diff --git a/patches/printk__rename_printk_cpulock_API_and_always_disable_interrupts.patch b/patches/printk__rename_printk_cpulock_API_and_always_disable_interrupts.patch
deleted file mode 100644
index a03c43bd4afa..000000000000
--- a/patches/printk__rename_printk_cpulock_API_and_always_disable_interrupts.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-Subject: printk: rename printk cpulock API and always disable interrupts
-From: John Ogness <john.ogness@linutronix.de>
-Date: Thu Jul 15 09:34:45 2021 +0206
-
-From: John Ogness <john.ogness@linutronix.de>
-
-The printk cpulock functions use local_irq_disable(). This means that
-hardware interrupts are also disabled on PREEMPT_RT. To make this
-clear, rename the functions to use the raw_ prefix:
-
-raw_printk_cpu_lock_irqsave(flags);
-raw_printk_cpu_unlock_irqrestore(flags);
-
-Also, these functions were a NOP for !CONFIG_SMP. But for !CONFIG_SMP
-they still need to disable hardware interrupts. So modify them
-appropriately for this.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- include/linux/printk.h | 30 ++++++++++++++----------------
- lib/dump_stack.c | 4 ++--
- lib/nmi_backtrace.c | 4 ++--
- 3 files changed, 18 insertions(+), 20 deletions(-)
----
---- a/include/linux/printk.h
-+++ b/include/linux/printk.h
-@@ -284,17 +284,22 @@ static inline void printk_trigger_flush(
- extern int __printk_cpu_trylock(void);
- extern void __printk_wait_on_cpu_lock(void);
- extern void __printk_cpu_unlock(void);
-+#else
-+#define __printk_cpu_trylock() 1
-+#define __printk_wait_on_cpu_lock()
-+#define __printk_cpu_unlock()
-+#endif /* CONFIG_SMP */
-
- /**
-- * printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
-- * lock and disable interrupts.
-+ * raw_printk_cpu_lock_irqsave() - Acquire the printk cpu-reentrant spinning
-+ * lock and disable interrupts.
- * @flags: Stack-allocated storage for saving local interrupt state,
-- * to be passed to printk_cpu_unlock_irqrestore().
-+ * to be passed to raw_printk_cpu_unlock_irqrestore().
- *
- * If the lock is owned by another CPU, spin until it becomes available.
- * Interrupts are restored while spinning.
- */
--#define printk_cpu_lock_irqsave(flags) \
-+#define raw_printk_cpu_lock_irqsave(flags) \
- for (;;) { \
- local_irq_save(flags); \
- if (__printk_cpu_trylock()) \
-@@ -304,22 +309,15 @@ extern void __printk_cpu_unlock(void);
- }
-
- /**
-- * printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant spinning
-- * lock and restore interrupts.
-- * @flags: Caller's saved interrupt state, from printk_cpu_lock_irqsave().
-+ * raw_printk_cpu_unlock_irqrestore() - Release the printk cpu-reentrant
-+ * spinning lock and restore interrupts.
-+ * @flags: Caller's saved interrupt state from raw_printk_cpu_lock_irqsave().
- */
--#define printk_cpu_unlock_irqrestore(flags) \
-+#define raw_printk_cpu_unlock_irqrestore(flags) \
- do { \
- __printk_cpu_unlock(); \
- local_irq_restore(flags); \
-- } while (0) \
--
--#else
--
--#define printk_cpu_lock_irqsave(flags) ((void)flags)
--#define printk_cpu_unlock_irqrestore(flags) ((void)flags)
--
--#endif /* CONFIG_SMP */
-+ } while (0)
-
- extern int kptr_restrict;
-
---- a/lib/dump_stack.c
-+++ b/lib/dump_stack.c
-@@ -102,9 +102,9 @@ asmlinkage __visible void dump_stack_lvl
- * Permit this cpu to perform nested stack dumps while serialising
- * against other CPUs
- */
-- printk_cpu_lock_irqsave(flags);
-+ raw_printk_cpu_lock_irqsave(flags);
- __dump_stack(log_lvl);
-- printk_cpu_unlock_irqrestore(flags);
-+ raw_printk_cpu_unlock_irqrestore(flags);
- }
- EXPORT_SYMBOL(dump_stack_lvl);
-
---- a/lib/nmi_backtrace.c
-+++ b/lib/nmi_backtrace.c
-@@ -99,7 +99,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r
- * Allow nested NMI backtraces while serializing
- * against other CPUs.
- */
-- printk_cpu_lock_irqsave(flags);
-+ raw_printk_cpu_lock_irqsave(flags);
- if (!READ_ONCE(backtrace_idle) && regs && cpu_in_idle(instruction_pointer(regs))) {
- pr_warn("NMI backtrace for cpu %d skipped: idling at %pS\n",
- cpu, (void *)instruction_pointer(regs));
-@@ -110,7 +110,7 @@ bool nmi_cpu_backtrace(struct pt_regs *r
- else
- dump_stack();
- }
-- printk_cpu_unlock_irqrestore(flags);
-+ raw_printk_cpu_unlock_irqrestore(flags);
- cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
- return true;
- }
diff --git a/patches/printk__use_seqcount_latch_for_console_seq.patch b/patches/printk__use_seqcount_latch_for_console_seq.patch
deleted file mode 100644
index db5be2c61d85..000000000000
--- a/patches/printk__use_seqcount_latch_for_console_seq.patch
+++ /dev/null
@@ -1,186 +0,0 @@
-Subject: printk: use seqcount_latch for console_seq
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon Nov 30 01:42:05 2020 +0106
-
-From: John Ogness <john.ogness@linutronix.de>
-
-In preparation for atomic printing, change @console_seq to use
-seqcount_latch so that it can be read without requiring @console_sem.
-
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- kernel/printk/printk.c | 73 +++++++++++++++++++++++++++----------------------
- 1 file changed, 41 insertions(+), 32 deletions(-)
----
---- a/kernel/printk/printk.c
-+++ b/kernel/printk/printk.c
-@@ -362,9 +362,7 @@ static u64 syslog_seq;
- static size_t syslog_partial;
- static bool syslog_time;
-
--/* All 3 protected by @console_sem. */
--/* the next printk record to write to the console */
--static u64 console_seq;
-+/* Both protected by @console_sem. */
- static u64 exclusive_console_stop_seq;
- static unsigned long console_dropped;
-
-@@ -374,6 +372,17 @@ struct latched_seq {
- };
-
- /*
-+ * The next printk record to write to the console. There are two
-+ * copies (updated with seqcount_latch) so that reads can locklessly
-+ * access a valid value. Writers are synchronized by @console_sem.
-+ */
-+static struct latched_seq console_seq = {
-+ .latch = SEQCNT_LATCH_ZERO(console_seq.latch),
-+ .val[0] = 0,
-+ .val[1] = 0,
-+};
-+
-+/*
- * The next printk record to read after the last 'clear' command. There are
- * two copies (updated with seqcount_latch) so that reads can locklessly
- * access a valid value. Writers are synchronized by @syslog_lock.
-@@ -436,7 +445,7 @@ bool printk_percpu_data_ready(void)
- return __printk_percpu_data_ready;
- }
-
--/* Must be called under syslog_lock. */
-+/* Must be called under associated write-protection lock. */
- static void latched_seq_write(struct latched_seq *ls, u64 val)
- {
- raw_write_seqcount_latch(&ls->latch);
-@@ -2279,9 +2288,9 @@ EXPORT_SYMBOL(_printk);
-
- #define prb_read_valid(rb, seq, r) false
- #define prb_first_valid_seq(rb) 0
-+#define latched_seq_read_nolock(seq) 0
-+#define latched_seq_write(dst, src)
-
--static u64 syslog_seq;
--static u64 console_seq;
- static u64 exclusive_console_stop_seq;
- static unsigned long console_dropped;
-
-@@ -2609,7 +2618,7 @@ void console_unlock(void)
- bool do_cond_resched, retry;
- struct printk_info info;
- struct printk_record r;
-- u64 __maybe_unused next_seq;
-+ u64 seq;
-
- if (console_suspended) {
- up_console_sem();
-@@ -2653,12 +2662,14 @@ void console_unlock(void)
- size_t len;
-
- skip:
-- if (!prb_read_valid(prb, console_seq, &r))
-+ seq = latched_seq_read_nolock(&console_seq);
-+ if (!prb_read_valid(prb, seq, &r))
- break;
-
-- if (console_seq != r.info->seq) {
-- console_dropped += r.info->seq - console_seq;
-- console_seq = r.info->seq;
-+ if (seq != r.info->seq) {
-+ console_dropped += r.info->seq - seq;
-+ latched_seq_write(&console_seq, r.info->seq);
-+ seq = r.info->seq;
- }
-
- if (suppress_message_printing(r.info->level)) {
-@@ -2667,13 +2678,13 @@ void console_unlock(void)
- * directly to the console when we received it, and
- * record that has level above the console loglevel.
- */
-- console_seq++;
-+ latched_seq_write(&console_seq, seq + 1);
- goto skip;
- }
-
- /* Output to all consoles once old messages replayed. */
- if (unlikely(exclusive_console &&
-- console_seq >= exclusive_console_stop_seq)) {
-+ seq >= exclusive_console_stop_seq)) {
- exclusive_console = NULL;
- }
-
-@@ -2694,7 +2705,7 @@ void console_unlock(void)
- len = record_print_text(&r,
- console_msg_format & MSG_FORMAT_SYSLOG,
- printk_time);
-- console_seq++;
-+ latched_seq_write(&console_seq, seq + 1);
-
- /*
- * While actively printing out messages, if another printk()
-@@ -2722,9 +2733,6 @@ void console_unlock(void)
- cond_resched();
- }
-
-- /* Get consistent value of the next-to-be-used sequence number. */
-- next_seq = console_seq;
--
- console_locked = 0;
- up_console_sem();
-
-@@ -2734,7 +2742,7 @@ void console_unlock(void)
- * there's a new owner and the console_unlock() from them will do the
- * flush, no worries.
- */
-- retry = prb_read_valid(prb, next_seq, NULL);
-+ retry = prb_read_valid(prb, latched_seq_read_nolock(&console_seq), NULL);
- if (retry && console_trylock())
- goto again;
- }
-@@ -2786,18 +2794,19 @@ void console_unblank(void)
- */
- void console_flush_on_panic(enum con_flush_mode mode)
- {
-- /*
-- * If someone else is holding the console lock, trylock will fail
-- * and may_schedule may be set. Ignore and proceed to unlock so
-- * that messages are flushed out. As this can be called from any
-- * context and we don't want to get preempted while flushing,
-- * ensure may_schedule is cleared.
-- */
-- console_trylock();
-- console_may_schedule = 0;
--
-- if (mode == CONSOLE_REPLAY_ALL)
-- console_seq = prb_first_valid_seq(prb);
-+ if (console_trylock()) {
-+ if (mode == CONSOLE_REPLAY_ALL)
-+ latched_seq_write(&console_seq, prb_first_valid_seq(prb));
-+ } else {
-+ /*
-+ * Another context is holding the console lock and
-+ * @console_may_schedule may be set. Ignore and proceed to
-+ * unlock so that messages are flushed out. As this can be
-+ * called from any context and we don't want to get preempted
-+ * while flushing, ensure @console_may_schedule is cleared.
-+ */
-+ console_may_schedule = 0;
-+ }
- console_unlock();
- }
-
-@@ -3033,11 +3042,11 @@ void register_console(struct console *ne
- * ignores console_lock.
- */
- exclusive_console = newcon;
-- exclusive_console_stop_seq = console_seq;
-+ exclusive_console_stop_seq = latched_seq_read_nolock(&console_seq);
-
- /* Get a consistent copy of @syslog_seq. */
- mutex_lock(&syslog_lock);
-- console_seq = syslog_seq;
-+ latched_seq_write(&console_seq, syslog_seq);
- mutex_unlock(&syslog_lock);
- }
- console_unlock();
diff --git a/patches/series b/patches/series
index b8a4fe43d56a..35a3f0dbc205 100644
--- a/patches/series
+++ b/patches/series
@@ -11,19 +11,20 @@ arm64_mm_make_arch_faults_on_old_pte_check_for_migratability.patch
###########################################################################
# John's printk queue
###########################################################################
-printk__rename_printk_cpulock_API_and_always_disable_interrupts.patch
-console__add_write_atomic_interface.patch
-kdb__only_use_atomic_consoles_for_output_mirroring.patch
-serial__8250__implement_write_atomic.patch
-printk__relocate_printk_delay.patch
-printk__call_boot_delay_msec_in_printk_delay.patch
-printk__use_seqcount_latch_for_console_seq.patch
-printk__introduce_kernel_sync_mode.patch
-printk__move_console_printing_to_kthreads.patch
-printk__remove_deferred_printing.patch
-printk__add_console_handover.patch
-printk__add_pr_flush.patch
-printk__Enhance_the_condition_check_of_msleep_in_pr_flush.patch
+0001-printk-rename-cpulock-functions.patch
+0002-printk-cpu-sync-always-disable-interrupts.patch
+0003-printk-use-percpu-flag-instead-of-cpu_online.patch
+0004-printk-get-caller_id-timestamp-after-migration-disab.patch
+0005-printk-refactor-and-rework-printing-logic.patch
+0006-printk-move-buffer-definitions-into-console_emit_nex.patch
+0007-printk-add-pr_flush.patch
+0008-printk-add-kthread-console-printers.patch
+0009-printk-reimplement-console_lock-for-proper-kthread-s.patch
+0010-printk-remove-console_locked.patch
+0011-console-introduce-CON_MIGHT_SLEEP-for-vt.patch
+0012-printk-add-infrastucture-for-atomic-consoles.patch
+0013-serial-8250-implement-write_atomic.patch
+0014-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
###########################################################################
# Posted and applied
@@ -91,8 +92,8 @@ mm__workingset__replace_IRQ-off_check_with_a_lockdep_assert..patch
softirq-Use-a-dedicated-thread-for-timer-wakeups.patch
# These two need some feedback.
-i2c-rcar-Allow-interrupt-handler-to-be-threaded.patch
-i2c-core-Let-i2c_handle_smbus_host_notify-use-handle.patch
+genirq-Provide-generic_handle_irq_safe.patch
+Use-generic_handle_irq_safe-where-it-makes-sense.patch
###########################################################################
# Kconfig bits: