summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-10-26 17:11:43 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-10-26 17:11:43 +0200
commitca7b7a61efc51e861c7a829e09896d9fba4ca39a (patch)
tree731681fe948eab17c37f73ae42e3a85c62811790
parent9b0d72982f9f497f698203c4eea5f19a7f55e15e (diff)
downloadlinux-rt-6.1-rc2-rt1-patches.tar.gz
[ANNOUNCE] v6.1-rc2-rt1v6.1-rc2-rt1-patches
Dear RT folks! I'm pleased to announce the v6.1-rc2-rt1 patch set. Changes since v6.0.3-rt12: - Update to v6.1-rc2. Known issues - Valentin Schneider reported a few splats on ARM64, see https://lkml.kernel.org/r/20210810134127.1394269-1-valentin.schneider@arm.com You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v6.1-rc2-rt1 The RT patch against v6.1-rc2 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/6.1/older/patch-6.1-rc2-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/6.1/older/patches-6.1-rc2-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-lib-vsprintf-Remove-static_branch_likely-from-__ptr_.patch66
-rw-r--r--patches/0001-mm-slub-move-free_debug_processing-further.patch147
-rw-r--r--patches/0001-preempt-Provide-preempt_-dis-en-able_nested.patch84
-rw-r--r--patches/0002-dentry-Use-preempt_-dis-en-able_nested.patch47
-rw-r--r--patches/0002-lib-vsprintf-Initialize-vsprintf-s-pointer-hash-once.patch93
-rw-r--r--patches/0002-mm-slub-restrict-sysfs-validation-to-debug-caches-an.patch471
-rw-r--r--patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch (renamed from patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch)85
-rw-r--r--patches/0003-mm-slub-remove-slab_lock-usage-for-debug-operations.patch97
-rw-r--r--patches/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch140
-rw-r--r--patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch (renamed from patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch)19
-rw-r--r--patches/0003-slub-Make-PREEMPT_RT-support-less-convoluted.patch145
-rw-r--r--patches/0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch2
-rw-r--r--patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch8
-rw-r--r--patches/0004-mm-slub-convert-object_map_lock-to-non-raw-spinlock.patch89
-rw-r--r--patches/0005-mm-slub-simplify-__cmpxchg_double_slab-and-slab_-un-.patch110
-rw-r--r--patches/0008-u64_stats-Streamline-the-implementation.patch262
-rw-r--r--patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch12
-rw-r--r--patches/0016-printk-add-infrastucture-for-atomic-consoles.patch52
-rw-r--r--patches/0017-serial-8250-implement-write_atomic.patch181
-rw-r--r--patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch10
-rw-r--r--patches/ARM__Allow_to_enable_RT.patch16
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/POWERPC__Allow_to_enable_RT.patch4
-rw-r--r--patches/arm__Add_support_for_lazy_preemption.patch2
-rw-r--r--patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch8
-rw-r--r--patches/genirq-Provide-generic_handle_domain_irq_safe.patch152
-rw-r--r--patches/locking-Detect-includes-rwlock.h-outside-of-spinlock.patch104
-rw-r--r--patches/mm-multi-gen-LRU-Move-lru_gen_add_mm-out-of-IRQ-off-.patch42
-rw-r--r--patches/net-Avoid-the-IPI-to-free-the.patch6
-rw-r--r--patches/powerpc__Add_support_for_lazy_preemption.patch2
-rw-r--r--patches/powerpc__traps__Use_PREEMPT_RT.patch2
-rw-r--r--patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch2
-rw-r--r--patches/printk-Bring-back-the-RT-bits.patch114
-rw-r--r--patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch2
-rw-r--r--patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch52
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch48
-rw-r--r--patches/series28
-rw-r--r--patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch4
-rw-r--r--patches/sysfs__Add__sys_kernel_realtime_entry.patch4
-rw-r--r--patches/tty_serial_omap__Make_the_locking_RT_aware.patch4
-rw-r--r--patches/vduse-Remove-include-of-rwlock.h.patch3
-rw-r--r--patches/x86__Enable_RT_also_on_32bit.patch4
-rw-r--r--patches/x86__Support_for_lazy_preemption.patch4
-rw-r--r--patches/x86_entry__Use_should_resched_in_idtentry_exit_cond_resched.patch2
44 files changed, 377 insertions, 2354 deletions
diff --git a/patches/0001-lib-vsprintf-Remove-static_branch_likely-from-__ptr_.patch b/patches/0001-lib-vsprintf-Remove-static_branch_likely-from-__ptr_.patch
deleted file mode 100644
index d8940df9ffaf..000000000000
--- a/patches/0001-lib-vsprintf-Remove-static_branch_likely-from-__ptr_.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 29 Jul 2022 15:52:45 +0200
-Subject: [PATCH 1/2] lib/vsprintf: Remove static_branch_likely() from
- __ptr_to_hashval().
-
-Using static_branch_likely() to signal that ptr_key has been filled is a
-bit much given that it is not a fast path.
-
-Replace static_branch_likely() with bool for condition and a memory
-barrier for ptr_key.
-
-Suggested-by: Petr Mladek <pmladek@suse.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20220729154716.429964-2-bigeasy@linutronix.de
----
- lib/vsprintf.c | 19 ++++++++-----------
- 1 file changed, 8 insertions(+), 11 deletions(-)
-
---- a/lib/vsprintf.c
-+++ b/lib/vsprintf.c
-@@ -750,12 +750,7 @@ static int __init debug_boot_weak_hash_e
- }
- early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-
--static DEFINE_STATIC_KEY_FALSE(filled_random_ptr_key);
--
--static void enable_ptr_key_workfn(struct work_struct *work)
--{
-- static_branch_enable(&filled_random_ptr_key);
--}
-+static bool filled_random_ptr_key;
-
- /* Maps a pointer to a 32 bit unique identifier. */
- static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
-@@ -763,24 +758,26 @@ static inline int __ptr_to_hashval(const
- static siphash_key_t ptr_key __read_mostly;
- unsigned long hashval;
-
-- if (!static_branch_likely(&filled_random_ptr_key)) {
-+ if (!READ_ONCE(filled_random_ptr_key)) {
- static bool filled = false;
- static DEFINE_SPINLOCK(filling);
-- static DECLARE_WORK(enable_ptr_key_work, enable_ptr_key_workfn);
- unsigned long flags;
-
-- if (!system_unbound_wq || !rng_is_initialized() ||
-+ if (!rng_is_initialized() ||
- !spin_trylock_irqsave(&filling, flags))
- return -EAGAIN;
-
- if (!filled) {
- get_random_bytes(&ptr_key, sizeof(ptr_key));
-- queue_work(system_unbound_wq, &enable_ptr_key_work);
-+ /* Pairs with smp_rmb() before reading ptr_key. */
-+ smp_wmb();
-+ WRITE_ONCE(filled_random_ptr_key, true);
- filled = true;
- }
- spin_unlock_irqrestore(&filling, flags);
- }
--
-+ /* Pairs with smp_wmb() after writing ptr_key. */
-+ smp_rmb();
-
- #ifdef CONFIG_64BIT
- hashval = (unsigned long)siphash_1u64((u64)ptr, &ptr_key);
diff --git a/patches/0001-mm-slub-move-free_debug_processing-further.patch b/patches/0001-mm-slub-move-free_debug_processing-further.patch
deleted file mode 100644
index b0104c7a4a23..000000000000
--- a/patches/0001-mm-slub-move-free_debug_processing-further.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Tue, 23 Aug 2022 19:03:56 +0200
-Subject: [PATCH 1/5] mm/slub: move free_debug_processing() further
-
-In the following patch, the function free_debug_processing() will be
-calling add_partial(), remove_partial() and discard_slab(), se move it
-below their definitions to avoid forward declarations. To make review
-easier, separate the move from functional changes.
-
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
-Acked-by: David Rientjes <rientjes@google.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/slub.c | 114 +++++++++++++++++++++++++++++++-------------------------------
- 1 file changed, 57 insertions(+), 57 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -1385,63 +1385,6 @@ static inline int free_consistency_check
- return 1;
- }
-
--/* Supports checking bulk free of a constructed freelist */
--static noinline int free_debug_processing(
-- struct kmem_cache *s, struct slab *slab,
-- void *head, void *tail, int bulk_cnt,
-- unsigned long addr)
--{
-- struct kmem_cache_node *n = get_node(s, slab_nid(slab));
-- void *object = head;
-- int cnt = 0;
-- unsigned long flags, flags2;
-- int ret = 0;
-- depot_stack_handle_t handle = 0;
--
-- if (s->flags & SLAB_STORE_USER)
-- handle = set_track_prepare();
--
-- spin_lock_irqsave(&n->list_lock, flags);
-- slab_lock(slab, &flags2);
--
-- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-- if (!check_slab(s, slab))
-- goto out;
-- }
--
--next_object:
-- cnt++;
--
-- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-- if (!free_consistency_checks(s, slab, object, addr))
-- goto out;
-- }
--
-- if (s->flags & SLAB_STORE_USER)
-- set_track_update(s, object, TRACK_FREE, addr, handle);
-- trace(s, slab, object, 0);
-- /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
-- init_object(s, object, SLUB_RED_INACTIVE);
--
-- /* Reached end of constructed freelist yet? */
-- if (object != tail) {
-- object = get_freepointer(s, object);
-- goto next_object;
-- }
-- ret = 1;
--
--out:
-- if (cnt != bulk_cnt)
-- slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
-- bulk_cnt, cnt);
--
-- slab_unlock(slab, &flags2);
-- spin_unlock_irqrestore(&n->list_lock, flags);
-- if (!ret)
-- slab_fix(s, "Object at 0x%p not freed", object);
-- return ret;
--}
--
- /*
- * Parse a block of slub_debug options. Blocks are delimited by ';'
- *
-@@ -2788,6 +2731,63 @@ static inline unsigned long node_nr_objs
- {
- return atomic_long_read(&n->total_objects);
- }
-+
-+/* Supports checking bulk free of a constructed freelist */
-+static noinline int free_debug_processing(
-+ struct kmem_cache *s, struct slab *slab,
-+ void *head, void *tail, int bulk_cnt,
-+ unsigned long addr)
-+{
-+ struct kmem_cache_node *n = get_node(s, slab_nid(slab));
-+ void *object = head;
-+ int cnt = 0;
-+ unsigned long flags, flags2;
-+ int ret = 0;
-+ depot_stack_handle_t handle = 0;
-+
-+ if (s->flags & SLAB_STORE_USER)
-+ handle = set_track_prepare();
-+
-+ spin_lock_irqsave(&n->list_lock, flags);
-+ slab_lock(slab, &flags2);
-+
-+ if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-+ if (!check_slab(s, slab))
-+ goto out;
-+ }
-+
-+next_object:
-+ cnt++;
-+
-+ if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-+ if (!free_consistency_checks(s, slab, object, addr))
-+ goto out;
-+ }
-+
-+ if (s->flags & SLAB_STORE_USER)
-+ set_track_update(s, object, TRACK_FREE, addr, handle);
-+ trace(s, slab, object, 0);
-+ /* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
-+ init_object(s, object, SLUB_RED_INACTIVE);
-+
-+ /* Reached end of constructed freelist yet? */
-+ if (object != tail) {
-+ object = get_freepointer(s, object);
-+ goto next_object;
-+ }
-+ ret = 1;
-+
-+out:
-+ if (cnt != bulk_cnt)
-+ slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
-+ bulk_cnt, cnt);
-+
-+ slab_unlock(slab, &flags2);
-+ spin_unlock_irqrestore(&n->list_lock, flags);
-+ if (!ret)
-+ slab_fix(s, "Object at 0x%p not freed", object);
-+ return ret;
-+}
- #endif /* CONFIG_SLUB_DEBUG */
-
- #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
diff --git a/patches/0001-preempt-Provide-preempt_-dis-en-able_nested.patch b/patches/0001-preempt-Provide-preempt_-dis-en-able_nested.patch
deleted file mode 100644
index 4baeabe1fc4e..000000000000
--- a/patches/0001-preempt-Provide-preempt_-dis-en-able_nested.patch
+++ /dev/null
@@ -1,84 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 25 Aug 2022 18:41:24 +0200
-Subject: [PATCH 1/8] preempt: Provide preempt_[dis|en]able_nested()
-
-On PREEMPT_RT enabled kernels, spinlocks and rwlocks are neither disabling
-preemption nor interrupts. Though there are a few places which depend on
-the implicit preemption/interrupt disable of those locks, e.g. seqcount
-write sections, per CPU statistics updates etc.
-
-To avoid sprinkling CONFIG_PREEMPT_RT conditionals all over the place, add
-preempt_disable_nested() and preempt_enable_nested() which should be
-descriptive enough.
-
-Add a lockdep assertion for the !PREEMPT_RT case to catch callers which
-do not have preemption disabled.
-
-Cc: Ben Segall <bsegall@google.com>
-Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
-Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
-Cc: Ingo Molnar <mingo@redhat.com>
-Cc: Juri Lelli <juri.lelli@redhat.com>
-Cc: Mel Gorman <mgorman@suse.de>
-Cc: Peter Zijlstra <peterz@infradead.org>
-Cc: Steven Rostedt <rostedt@goodmis.org>
-Cc: Valentin Schneider <vschneid@redhat.com>
-Cc: Vincent Guittot <vincent.guittot@linaro.org>
-Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20220825164131.402717-2-bigeasy@linutronix.de
----
- include/linux/preempt.h | 42 ++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 42 insertions(+)
-
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -421,4 +421,46 @@ static inline void migrate_enable(void)
-
- #endif /* CONFIG_SMP */
-
-+/**
-+ * preempt_disable_nested - Disable preemption inside a normally preempt disabled section
-+ *
-+ * Use for code which requires preemption protection inside a critical
-+ * section which has preemption disabled implicitly on non-PREEMPT_RT
-+ * enabled kernels, by e.g.:
-+ * - holding a spinlock/rwlock
-+ * - soft interrupt context
-+ * - regular interrupt handlers
-+ *
-+ * On PREEMPT_RT enabled kernels spinlock/rwlock held sections, soft
-+ * interrupt context and regular interrupt handlers are preemptible and
-+ * only prevent migration. preempt_disable_nested() ensures that preemption
-+ * is disabled for cases which require CPU local serialization even on
-+ * PREEMPT_RT. For non-PREEMPT_RT kernels this is a NOP.
-+ *
-+ * The use cases are code sequences which are not serialized by a
-+ * particular lock instance, e.g.:
-+ * - seqcount write side critical sections where the seqcount is not
-+ * associated to a particular lock and therefore the automatic
-+ * protection mechanism does not work. This prevents a live lock
-+ * against a preempting high priority reader.
-+ * - RMW per CPU variable updates like vmstat.
-+ */
-+/* Macro to avoid header recursion hell vs. lockdep */
-+#define preempt_disable_nested() \
-+do { \
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
-+ preempt_disable(); \
-+ else \
-+ lockdep_assert_preemption_disabled(); \
-+} while (0)
-+
-+/**
-+ * preempt_enable_nested - Undo the effect of preempt_disable_nested()
-+ */
-+static __always_inline void preempt_enable_nested(void)
-+{
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ preempt_enable();
-+}
-+
- #endif /* __LINUX_PREEMPT_H */
diff --git a/patches/0002-dentry-Use-preempt_-dis-en-able_nested.patch b/patches/0002-dentry-Use-preempt_-dis-en-able_nested.patch
deleted file mode 100644
index acfbc52b1e2e..000000000000
--- a/patches/0002-dentry-Use-preempt_-dis-en-able_nested.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 25 Aug 2022 18:41:25 +0200
-Subject: [PATCH 2/8] dentry: Use preempt_[dis|en]able_nested()
-
-Replace the open coded CONFIG_PREEMPT_RT conditional
-preempt_disable/enable() with the new helper.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Alexander Viro <viro@zeniv.linux.org.uk>
-Cc: linux-fsdevel@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Acked-by: Christian Brauner (Microsoft) <brauner@kernel.org>
-Link: https://lore.kernel.org/r/20220825164131.402717-3-bigeasy@linutronix.de
----
- fs/dcache.c | 13 ++-----------
- 1 file changed, 2 insertions(+), 11 deletions(-)
-
---- a/fs/dcache.c
-+++ b/fs/dcache.c
-@@ -2597,15 +2597,7 @@ EXPORT_SYMBOL(d_rehash);
-
- static inline unsigned start_dir_add(struct inode *dir)
- {
-- /*
-- * The caller holds a spinlock (dentry::d_lock). On !PREEMPT_RT
-- * kernels spin_lock() implicitly disables preemption, but not on
-- * PREEMPT_RT. So for RT it has to be done explicitly to protect
-- * the sequence count write side critical section against a reader
-- * or another writer preempting, which would result in a live lock.
-- */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
- for (;;) {
- unsigned n = dir->i_dir_seq;
- if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n)
-@@ -2618,8 +2610,7 @@ static inline void end_dir_add(struct in
- wait_queue_head_t *d_wait)
- {
- smp_store_release(&dir->i_dir_seq, n + 2);
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- wake_up_all(d_wait);
- }
-
diff --git a/patches/0002-lib-vsprintf-Initialize-vsprintf-s-pointer-hash-once.patch b/patches/0002-lib-vsprintf-Initialize-vsprintf-s-pointer-hash-once.patch
deleted file mode 100644
index ac1dfb892c2b..000000000000
--- a/patches/0002-lib-vsprintf-Initialize-vsprintf-s-pointer-hash-once.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 1 Aug 2022 11:34:33 +0200
-Subject: [PATCH 2/2] lib/vsprintf: Initialize vsprintf's pointer hash once the
- random core is ready.
-
-The printk code invokes vnsprintf in order to compute the complete
-string before adding it into its buffer. This happens in an IRQ-off
-region which leads to a warning on PREEMPT_RT in the random code if the
-format strings contains a %p for pointer printing. This happens because
-the random core acquires locks which become sleeping locks on PREEMPT_RT
-which must not be acquired with disabled interrupts and or preemption
-disabled.
-By default the pointers are hashed which requires a random value on the
-first invocation (either by printk or another user which comes first.
-
-One could argue that there is no need for printk to disable interrupts
-during the vsprintf() invocation which would fix the just mentioned
-problem. However printk itself can be invoked in a context with
-disabled interrupts which would lead to the very same problem.
-
-Move the initialization of ptr_key into a worker and schedule it from
-subsys_initcall(). This happens early but after the workqueue subsystem
-is ready. Use get_random_bytes() to retrieve the random value if the RNG
-core is ready, otherwise schedule a worker in two seconds and try again.
-
-Reported-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/YueeIgPGUJgsnsAh@linutronix.de
----
- lib/vsprintf.c | 46 +++++++++++++++++++++++++++-------------------
- 1 file changed, 27 insertions(+), 19 deletions(-)
-
---- a/lib/vsprintf.c
-+++ b/lib/vsprintf.c
-@@ -751,31 +751,39 @@ static int __init debug_boot_weak_hash_e
- early_param("debug_boot_weak_hash", debug_boot_weak_hash_enable);
-
- static bool filled_random_ptr_key;
-+static siphash_key_t ptr_key __read_mostly;
-+static void fill_ptr_key_workfn(struct work_struct *work);
-+static DECLARE_DELAYED_WORK(fill_ptr_key_work, fill_ptr_key_workfn);
-+
-+static void fill_ptr_key_workfn(struct work_struct *work)
-+{
-+ if (!rng_is_initialized()) {
-+ queue_delayed_work(system_unbound_wq, &fill_ptr_key_work, HZ * 2);
-+ return;
-+ }
-+
-+ get_random_bytes(&ptr_key, sizeof(ptr_key));
-+
-+ /* Pairs with smp_rmb() before reading ptr_key. */
-+ smp_wmb();
-+ WRITE_ONCE(filled_random_ptr_key, true);
-+}
-+
-+static int __init vsprintf_init_hashval(void)
-+{
-+ fill_ptr_key_workfn(NULL);
-+ return 0;
-+}
-+subsys_initcall(vsprintf_init_hashval)
-
- /* Maps a pointer to a 32 bit unique identifier. */
- static inline int __ptr_to_hashval(const void *ptr, unsigned long *hashval_out)
- {
-- static siphash_key_t ptr_key __read_mostly;
- unsigned long hashval;
-
-- if (!READ_ONCE(filled_random_ptr_key)) {
-- static bool filled = false;
-- static DEFINE_SPINLOCK(filling);
-- unsigned long flags;
--
-- if (!rng_is_initialized() ||
-- !spin_trylock_irqsave(&filling, flags))
-- return -EAGAIN;
--
-- if (!filled) {
-- get_random_bytes(&ptr_key, sizeof(ptr_key));
-- /* Pairs with smp_rmb() before reading ptr_key. */
-- smp_wmb();
-- WRITE_ONCE(filled_random_ptr_key, true);
-- filled = true;
-- }
-- spin_unlock_irqrestore(&filling, flags);
-- }
-+ if (!READ_ONCE(filled_random_ptr_key))
-+ return -EBUSY;
-+
- /* Pairs with smp_wmb() after writing ptr_key. */
- smp_rmb();
-
diff --git a/patches/0002-mm-slub-restrict-sysfs-validation-to-debug-caches-an.patch b/patches/0002-mm-slub-restrict-sysfs-validation-to-debug-caches-an.patch
deleted file mode 100644
index 17f335a2a9e1..000000000000
--- a/patches/0002-mm-slub-restrict-sysfs-validation-to-debug-caches-an.patch
+++ /dev/null
@@ -1,471 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Tue, 23 Aug 2022 19:03:57 +0200
-Subject: [PATCH 2/5] mm/slub: restrict sysfs validation to debug caches and
- make it safe
-
-Rongwei Wang reports [1] that cache validation triggered by writing to
-/sys/kernel/slab/<cache>/validate is racy against normal cache
-operations (e.g. freeing) in a way that can cause false positive
-inconsistency reports for caches with debugging enabled. The problem is
-that debugging actions that mark object free or active and actual
-freelist operations are not atomic, and the validation can see an
-inconsistent state.
-
-For caches that do or don't have debugging enabled, additional races
-involving n->nr_slabs are possible that result in false reports of wrong
-slab counts.
-
-This patch attempts to solve these issues while not adding overhead to
-normal (especially fastpath) operations for caches that do not have
-debugging enabled. Such overhead would not be justified to make possible
-userspace-triggered validation safe. Instead, disable the validation for
-caches that don't have debugging enabled and make their sysfs validate
-handler return -EINVAL.
-
-For caches that do have debugging enabled, we can instead extend the
-existing approach of not using percpu freelists to force all alloc/free
-operations to the slow paths where debugging flags is checked and acted
-upon. There can adjust the debug-specific paths to increase n->list_lock
-coverage against concurrent validation as necessary.
-
-The processing on free in free_debug_processing() already happens under
-n->list_lock so we can extend it to actually do the freeing as well and
-thus make it atomic against concurrent validation. As observed by
-Hyeonggon Yoo, we do not really need to take slab_lock() anymore here
-because all paths we could race with are protected by n->list_lock under
-the new scheme, so drop its usage here.
-
-The processing on alloc in alloc_debug_processing() currently doesn't
-take any locks, but we have to first allocate the object from a slab on
-the partial list (as debugging caches have no percpu slabs) and thus
-take the n->list_lock anyway. Add a function alloc_single_from_partial()
-that grabs just the allocated object instead of the whole freelist, and
-does the debug processing. The n->list_lock coverage again makes it
-atomic against validation and it is also ultimately more efficient than
-the current grabbing of freelist immediately followed by slab
-deactivation.
-
-To prevent races on n->nr_slabs updates, make sure that for caches with
-debugging enabled, inc_slabs_node() or dec_slabs_node() is called under
-n->list_lock. When allocating a new slab for a debug cache, handle the
-allocation by a new function alloc_single_from_new_slab() instead of the
-current forced deactivation path.
-
-Neither of these changes affect the fast paths at all. The changes in
-slow paths are negligible for non-debug caches.
-
-[1] https://lore.kernel.org/all/20220529081535.69275-1-rongwei.wang@linux.alibaba.com/
-
-Reported-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/slub.c | 231 ++++++++++++++++++++++++++++++++++++++++++++++++--------------
- 1 file changed, 179 insertions(+), 52 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -1324,17 +1324,14 @@ static inline int alloc_consistency_chec
- }
-
- static noinline int alloc_debug_processing(struct kmem_cache *s,
-- struct slab *slab,
-- void *object, unsigned long addr)
-+ struct slab *slab, void *object)
- {
- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!alloc_consistency_checks(s, slab, object))
- goto bad;
- }
-
-- /* Success perform special debug activities for allocs */
-- if (s->flags & SLAB_STORE_USER)
-- set_track(s, object, TRACK_ALLOC, addr);
-+ /* Success. Perform special debug activities for allocs */
- trace(s, slab, object, 1);
- init_object(s, object, SLUB_RED_ACTIVE);
- return 1;
-@@ -1604,16 +1601,18 @@ static inline
- void setup_slab_debug(struct kmem_cache *s, struct slab *slab, void *addr) {}
-
- static inline int alloc_debug_processing(struct kmem_cache *s,
-- struct slab *slab, void *object, unsigned long addr) { return 0; }
-+ struct slab *slab, void *object) { return 0; }
-
--static inline int free_debug_processing(
-+static inline void free_debug_processing(
- struct kmem_cache *s, struct slab *slab,
- void *head, void *tail, int bulk_cnt,
-- unsigned long addr) { return 0; }
-+ unsigned long addr) {}
-
- static inline void slab_pad_check(struct kmem_cache *s, struct slab *slab) {}
- static inline int check_object(struct kmem_cache *s, struct slab *slab,
- void *object, u8 val) { return 1; }
-+static inline void set_track(struct kmem_cache *s, void *object,
-+ enum track_item alloc, unsigned long addr) {}
- static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
- struct slab *slab) {}
- static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
-@@ -1919,11 +1918,13 @@ static struct slab *allocate_slab(struct
- */
- slab = alloc_slab_page(alloc_gfp, node, oo);
- if (unlikely(!slab))
-- goto out;
-+ return NULL;
- stat(s, ORDER_FALLBACK);
- }
-
- slab->objects = oo_objects(oo);
-+ slab->inuse = 0;
-+ slab->frozen = 0;
-
- account_slab(slab, oo_order(oo), s, flags);
-
-@@ -1950,15 +1951,6 @@ static struct slab *allocate_slab(struct
- set_freepointer(s, p, NULL);
- }
-
-- slab->inuse = slab->objects;
-- slab->frozen = 1;
--
--out:
-- if (!slab)
-- return NULL;
--
-- inc_slabs_node(s, slab_nid(slab), slab->objects);
--
- return slab;
- }
-
-@@ -2046,6 +2038,75 @@ static inline void remove_partial(struct
- }
-
- /*
-+ * Called only for kmem_cache_debug() caches instead of acquire_slab(), with a
-+ * slab from the n->partial list. Remove only a single object from the slab, do
-+ * the alloc_debug_processing() checks and leave the slab on the list, or move
-+ * it to full list if it was the last free object.
-+ */
-+static void *alloc_single_from_partial(struct kmem_cache *s,
-+ struct kmem_cache_node *n, struct slab *slab)
-+{
-+ void *object;
-+
-+ lockdep_assert_held(&n->list_lock);
-+
-+ object = slab->freelist;
-+ slab->freelist = get_freepointer(s, object);
-+ slab->inuse++;
-+
-+ if (!alloc_debug_processing(s, slab, object)) {
-+ remove_partial(n, slab);
-+ return NULL;
-+ }
-+
-+ if (slab->inuse == slab->objects) {
-+ remove_partial(n, slab);
-+ add_full(s, n, slab);
-+ }
-+
-+ return object;
-+}
-+
-+/*
-+ * Called only for kmem_cache_debug() caches to allocate from a freshly
-+ * allocated slab. Allocate a single object instead of whole freelist
-+ * and put the slab to the partial (or full) list.
-+ */
-+static void *alloc_single_from_new_slab(struct kmem_cache *s,
-+ struct slab *slab)
-+{
-+ int nid = slab_nid(slab);
-+ struct kmem_cache_node *n = get_node(s, nid);
-+ unsigned long flags;
-+ void *object;
-+
-+
-+ object = slab->freelist;
-+ slab->freelist = get_freepointer(s, object);
-+ slab->inuse = 1;
-+
-+ if (!alloc_debug_processing(s, slab, object))
-+ /*
-+ * It's not really expected that this would fail on a
-+ * freshly allocated slab, but a concurrent memory
-+ * corruption in theory could cause that.
-+ */
-+ return NULL;
-+
-+ spin_lock_irqsave(&n->list_lock, flags);
-+
-+ if (slab->inuse == slab->objects)
-+ add_full(s, n, slab);
-+ else
-+ add_partial(n, slab, DEACTIVATE_TO_HEAD);
-+
-+ inc_slabs_node(s, nid, slab->objects);
-+ spin_unlock_irqrestore(&n->list_lock, flags);
-+
-+ return object;
-+}
-+
-+/*
- * Remove slab from the partial list, freeze it and
- * return the pointer to the freelist.
- *
-@@ -2125,6 +2186,13 @@ static void *get_partial_node(struct kme
- if (!pfmemalloc_match(slab, gfpflags))
- continue;
-
-+ if (kmem_cache_debug(s)) {
-+ object = alloc_single_from_partial(s, n, slab);
-+ if (object)
-+ break;
-+ continue;
-+ }
-+
- t = acquire_slab(s, n, slab, object == NULL);
- if (!t)
- break;
-@@ -2733,31 +2801,39 @@ static inline unsigned long node_nr_objs
- }
-
- /* Supports checking bulk free of a constructed freelist */
--static noinline int free_debug_processing(
-+static noinline void free_debug_processing(
- struct kmem_cache *s, struct slab *slab,
- void *head, void *tail, int bulk_cnt,
- unsigned long addr)
- {
- struct kmem_cache_node *n = get_node(s, slab_nid(slab));
-+ struct slab *slab_free = NULL;
- void *object = head;
- int cnt = 0;
-- unsigned long flags, flags2;
-- int ret = 0;
-+ unsigned long flags;
-+ bool checks_ok = false;
- depot_stack_handle_t handle = 0;
-
- if (s->flags & SLAB_STORE_USER)
- handle = set_track_prepare();
-
- spin_lock_irqsave(&n->list_lock, flags);
-- slab_lock(slab, &flags2);
-
- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!check_slab(s, slab))
- goto out;
- }
-
-+ if (slab->inuse < bulk_cnt) {
-+ slab_err(s, slab, "Slab has %d allocated objects but %d are to be freed\n",
-+ slab->inuse, bulk_cnt);
-+ goto out;
-+ }
-+
- next_object:
-- cnt++;
-+
-+ if (++cnt > bulk_cnt)
-+ goto out_cnt;
-
- if (s->flags & SLAB_CONSISTENCY_CHECKS) {
- if (!free_consistency_checks(s, slab, object, addr))
-@@ -2775,18 +2851,56 @@ static noinline int free_debug_processin
- object = get_freepointer(s, object);
- goto next_object;
- }
-- ret = 1;
-+ checks_ok = true;
-
--out:
-+out_cnt:
- if (cnt != bulk_cnt)
-- slab_err(s, slab, "Bulk freelist count(%d) invalid(%d)\n",
-+ slab_err(s, slab, "Bulk free expected %d objects but found %d\n",
- bulk_cnt, cnt);
-
-- slab_unlock(slab, &flags2);
-+out:
-+ if (checks_ok) {
-+ void *prior = slab->freelist;
-+
-+ /* Perform the actual freeing while we still hold the locks */
-+ slab->inuse -= cnt;
-+ set_freepointer(s, tail, prior);
-+ slab->freelist = head;
-+
-+ /* Do we need to remove the slab from full or partial list? */
-+ if (!prior) {
-+ remove_full(s, n, slab);
-+ } else if (slab->inuse == 0) {
-+ remove_partial(n, slab);
-+ stat(s, FREE_REMOVE_PARTIAL);
-+ }
-+
-+ /* Do we need to discard the slab or add to partial list? */
-+ if (slab->inuse == 0) {
-+ slab_free = slab;
-+ } else if (!prior) {
-+ add_partial(n, slab, DEACTIVATE_TO_TAIL);
-+ stat(s, FREE_ADD_PARTIAL);
-+ }
-+ }
-+
-+ if (slab_free) {
-+ /*
-+ * Update the counters while still holding n->list_lock to
-+ * prevent spurious validation warnings
-+ */
-+ dec_slabs_node(s, slab_nid(slab_free), slab_free->objects);
-+ }
-+
- spin_unlock_irqrestore(&n->list_lock, flags);
-- if (!ret)
-+
-+ if (!checks_ok)
- slab_fix(s, "Object at 0x%p not freed", object);
-- return ret;
-+
-+ if (slab_free) {
-+ stat(s, FREE_SLAB);
-+ free_slab(s, slab_free);
-+ }
- }
- #endif /* CONFIG_SLUB_DEBUG */
-
-@@ -3036,36 +3150,52 @@ static void *___slab_alloc(struct kmem_c
- return NULL;
- }
-
-+ stat(s, ALLOC_SLAB);
-+
-+ if (kmem_cache_debug(s)) {
-+ freelist = alloc_single_from_new_slab(s, slab);
-+
-+ if (unlikely(!freelist))
-+ goto new_objects;
-+
-+ if (s->flags & SLAB_STORE_USER)
-+ set_track(s, freelist, TRACK_ALLOC, addr);
-+
-+ return freelist;
-+ }
-+
- /*
- * No other reference to the slab yet so we can
- * muck around with it freely without cmpxchg
- */
- freelist = slab->freelist;
- slab->freelist = NULL;
-+ slab->inuse = slab->objects;
-+ slab->frozen = 1;
-
-- stat(s, ALLOC_SLAB);
-+ inc_slabs_node(s, slab_nid(slab), slab->objects);
-
- check_new_slab:
-
- if (kmem_cache_debug(s)) {
-- if (!alloc_debug_processing(s, slab, freelist, addr)) {
-- /* Slab failed checks. Next slab needed */
-- goto new_slab;
-- } else {
-- /*
-- * For debug case, we don't load freelist so that all
-- * allocations go through alloc_debug_processing()
-- */
-- goto return_single;
-- }
-+ /*
-+ * For debug caches here we had to go through
-+ * alloc_single_from_partial() so just store the tracking info
-+ * and return the object
-+ */
-+ if (s->flags & SLAB_STORE_USER)
-+ set_track(s, freelist, TRACK_ALLOC, addr);
-+ return freelist;
- }
-
-- if (unlikely(!pfmemalloc_match(slab, gfpflags)))
-+ if (unlikely(!pfmemalloc_match(slab, gfpflags))) {
- /*
- * For !pfmemalloc_match() case we don't load freelist so that
- * we don't make further mismatched allocations easier.
- */
-- goto return_single;
-+ deactivate_slab(s, slab, get_freepointer(s, freelist));
-+ return freelist;
-+ }
-
- retry_load_slab:
-
-@@ -3089,11 +3219,6 @@ static void *___slab_alloc(struct kmem_c
- c->slab = slab;
-
- goto load_freelist;
--
--return_single:
--
-- deactivate_slab(s, slab, get_freepointer(s, freelist));
-- return freelist;
- }
-
- /*
-@@ -3341,9 +3466,10 @@ static void __slab_free(struct kmem_cach
- if (kfence_free(head))
- return;
-
-- if (kmem_cache_debug(s) &&
-- !free_debug_processing(s, slab, head, tail, cnt, addr))
-+ if (kmem_cache_debug(s)) {
-+ free_debug_processing(s, slab, head, tail, cnt, addr);
- return;
-+ }
-
- do {
- if (unlikely(n)) {
-@@ -3936,6 +4062,7 @@ static void early_kmem_cache_node_alloc(
- slab = new_slab(kmem_cache_node, GFP_NOWAIT, node);
-
- BUG_ON(!slab);
-+ inc_slabs_node(kmem_cache_node, slab_nid(slab), slab->objects);
- if (slab_nid(slab) != node) {
- pr_err("SLUB: Unable to allocate memory from node %d\n", node);
- pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
-@@ -3950,7 +4077,6 @@ static void early_kmem_cache_node_alloc(
- n = kasan_slab_alloc(kmem_cache_node, n, GFP_KERNEL, false);
- slab->freelist = get_freepointer(kmem_cache_node, n);
- slab->inuse = 1;
-- slab->frozen = 0;
- kmem_cache_node->node[node] = n;
- init_kmem_cache_node(n);
- inc_slabs_node(kmem_cache_node, node, slab->objects);
-@@ -4611,6 +4737,7 @@ static int __kmem_cache_do_shrink(struct
- if (free == slab->objects) {
- list_move(&slab->slab_list, &discard);
- n->nr_partial--;
-+ dec_slabs_node(s, node, slab->objects);
- } else if (free <= SHRINK_PROMOTE_MAX)
- list_move(&slab->slab_list, promote + free - 1);
- }
-@@ -4626,7 +4753,7 @@ static int __kmem_cache_do_shrink(struct
-
- /* Release empty slabs */
- list_for_each_entry_safe(slab, t, &discard, slab_list)
-- discard_slab(s, slab);
-+ free_slab(s, slab);
-
- if (slabs_node(s, node))
- ret = 1;
-@@ -5601,7 +5728,7 @@ static ssize_t validate_store(struct kme
- {
- int ret = -EINVAL;
-
-- if (buf[0] == '1') {
-+ if (buf[0] == '1' && kmem_cache_debug(s)) {
- ret = validate_slab_cache(s);
- if (ret >= 0)
- ret = length;
diff --git a/patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch b/patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
index 3cd9ab9fad7b..7cc26235c3d1 100644
--- a/patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+++ b/patches/0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
@@ -1,6 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 25 Aug 2022 16:15:44 +0200
-Subject: [PATCH 2/4] net: Remove the obsolte u64_stats_fetch_*_irq() users (part one).
+Subject: [PATCH 2/4] net: Remove the obsolte u64_stats_fetch_*_irq() users
+ (drivers).
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
count, there is no need for the fetch_irq() variants anymore.
@@ -57,6 +58,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
drivers/net/ethernet/ti/am65-cpsw-nuss.c | 4 +-
drivers/net/ethernet/ti/netcp_core.c | 8 ++--
drivers/net/ethernet/via/via-rhine.c | 8 ++--
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 8 ++--
drivers/net/hyperv/netvsc_drv.c | 32 +++++++++----------
drivers/net/ifb.c | 12 +++----
drivers/net/ipvlan/ipvlan_main.c | 4 +-
@@ -73,7 +75,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
drivers/net/vxlan/vxlan_vnifilter.c | 4 +-
drivers/net/wwan/mhi_wwan_mbim.c | 8 ++--
drivers/net/xen-netfront.c | 8 ++--
- 62 files changed, 270 insertions(+), 270 deletions(-)
+ 63 files changed, 274 insertions(+), 274 deletions(-)
--- a/drivers/net/ethernet/alacritech/slic.h
+++ b/drivers/net/ethernet/alacritech/slic.h
@@ -113,7 +115,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
static void ena_queue_stats(struct ena_adapter *adapter, u64 **data)
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
-@@ -3270,10 +3270,10 @@ static void ena_get_stats64(struct net_d
+@@ -3268,10 +3268,10 @@ static void ena_get_stats64(struct net_d
tx_ring = &adapter->tx_ring[i];
do {
@@ -126,7 +128,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->tx_packets += packets;
stats->tx_bytes += bytes;
-@@ -3281,20 +3281,20 @@ static void ena_get_stats64(struct net_d
+@@ -3279,20 +3279,20 @@ static void ena_get_stats64(struct net_d
rx_ring = &adapter->rx_ring[i];
do {
@@ -554,7 +556,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
start));
s->tx_packets += packets;
s->tx_bytes += bytes;
-@@ -1274,9 +1274,9 @@ void gve_handle_report_stats(struct gve_
+@@ -1273,9 +1273,9 @@ void gve_handle_report_stats(struct gve_
}
do {
@@ -685,7 +687,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
*data += size;
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
-@@ -418,10 +418,10 @@ static void i40e_get_netdev_stats_struct
+@@ -419,10 +419,10 @@ static void i40e_get_netdev_stats_struct
unsigned int start;
do {
@@ -698,7 +700,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->tx_packets += packets;
stats->tx_bytes += bytes;
-@@ -471,10 +471,10 @@ static void i40e_get_netdev_stats_struct
+@@ -472,10 +472,10 @@ static void i40e_get_netdev_stats_struct
if (!ring)
continue;
do {
@@ -711,7 +713,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->rx_packets += packets;
stats->rx_bytes += bytes;
-@@ -896,10 +896,10 @@ static void i40e_update_vsi_stats(struct
+@@ -897,10 +897,10 @@ static void i40e_update_vsi_stats(struct
continue;
do {
@@ -724,7 +726,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
tx_b += bytes;
tx_p += packets;
tx_restart += p->tx_stats.restart_queue;
-@@ -914,10 +914,10 @@ static void i40e_update_vsi_stats(struct
+@@ -915,10 +915,10 @@ static void i40e_update_vsi_stats(struct
continue;
do {
@@ -737,7 +739,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
rx_b += bytes;
rx_p += packets;
rx_buf += p->rx_stats.alloc_buff_failed;
-@@ -934,10 +934,10 @@ static void i40e_update_vsi_stats(struct
+@@ -935,10 +935,10 @@ static void i40e_update_vsi_stats(struct
continue;
do {
@@ -781,7 +783,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
*data += size;
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
-@@ -6297,10 +6297,10 @@ ice_fetch_u64_stats_per_ring(struct u64_
+@@ -6370,10 +6370,10 @@ ice_fetch_u64_stats_per_ring(struct u64_
unsigned int start;
do {
@@ -834,7 +836,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_unlock(&adapter->stats64_lock);
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
-@@ -6633,10 +6633,10 @@ void igb_update_stats(struct igb_adapter
+@@ -6632,10 +6632,10 @@ void igb_update_stats(struct igb_adapter
}
do {
@@ -847,7 +849,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
bytes += _bytes;
packets += _packets;
}
-@@ -6649,10 +6649,10 @@ void igb_update_stats(struct igb_adapter
+@@ -6648,10 +6648,10 @@ void igb_update_stats(struct igb_adapter
for (i = 0; i < adapter->num_tx_queues; i++) {
struct igb_ring *ring = adapter->tx_ring[i];
do {
@@ -900,7 +902,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_unlock(&adapter->stats64_lock);
--- a/drivers/net/ethernet/intel/igc/igc_main.c
+++ b/drivers/net/ethernet/intel/igc/igc_main.c
-@@ -4645,10 +4645,10 @@ void igc_update_stats(struct igc_adapter
+@@ -4682,10 +4682,10 @@ void igc_update_stats(struct igc_adapter
}
do {
@@ -913,7 +915,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
bytes += _bytes;
packets += _packets;
}
-@@ -4662,10 +4662,10 @@ void igc_update_stats(struct igc_adapter
+@@ -4699,10 +4699,10 @@ void igc_update_stats(struct igc_adapter
struct igc_ring *ring = adapter->tx_ring[i];
do {
@@ -1151,7 +1153,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->tx_bytes = _bytes;
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
-@@ -853,7 +853,7 @@ static void mtk_get_stats64(struct net_d
+@@ -865,7 +865,7 @@ static void mtk_get_stats64(struct net_d
}
do {
@@ -1160,7 +1162,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
storage->rx_packets = hw_stats->rx_packets;
storage->tx_packets = hw_stats->tx_packets;
storage->rx_bytes = hw_stats->rx_bytes;
-@@ -865,7 +865,7 @@ static void mtk_get_stats64(struct net_d
+@@ -877,7 +877,7 @@ static void mtk_get_stats64(struct net_d
storage->rx_crc_errors = hw_stats->rx_fcs_errors;
storage->rx_errors = hw_stats->rx_checksum_errors;
storage->tx_aborted_errors = hw_stats->tx_skip;
@@ -1169,7 +1171,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
storage->tx_errors = dev->stats.tx_errors;
storage->rx_dropped = dev->stats.rx_dropped;
-@@ -3664,13 +3664,13 @@ static void mtk_get_ethtool_stats(struct
+@@ -3684,13 +3684,13 @@ static void mtk_get_ethtool_stats(struct
do {
data_dst = data;
@@ -1264,7 +1266,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
data[i++] = bytes;
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
-@@ -1630,21 +1630,21 @@ static void nfp_net_stat64(struct net_de
+@@ -1631,21 +1631,21 @@ static void nfp_net_stat64(struct net_de
unsigned int start;
do {
@@ -1292,7 +1294,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->tx_errors += data[2];
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
-@@ -649,7 +649,7 @@ static u64 *nfp_vnic_get_sw_stats(struct
+@@ -686,7 +686,7 @@ static u64 *nfp_vnic_get_sw_stats(struct
unsigned int start;
do {
@@ -1301,7 +1303,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
data[0] = nn->r_vecs[i].rx_pkts;
tmp[0] = nn->r_vecs[i].hw_csum_rx_ok;
tmp[1] = nn->r_vecs[i].hw_csum_rx_inner_ok;
-@@ -657,10 +657,10 @@ static u64 *nfp_vnic_get_sw_stats(struct
+@@ -694,10 +694,10 @@ static u64 *nfp_vnic_get_sw_stats(struct
tmp[3] = nn->r_vecs[i].hw_csum_rx_error;
tmp[4] = nn->r_vecs[i].rx_replace_buf_alloc_fail;
tmp[5] = nn->r_vecs[i].hw_tls_rx;
@@ -1314,7 +1316,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
data[1] = nn->r_vecs[i].tx_pkts;
data[2] = nn->r_vecs[i].tx_busy;
tmp[6] = nn->r_vecs[i].hw_csum_tx;
-@@ -670,7 +670,7 @@ static u64 *nfp_vnic_get_sw_stats(struct
+@@ -707,7 +707,7 @@ static u64 *nfp_vnic_get_sw_stats(struct
tmp[10] = nn->r_vecs[i].hw_tls_tx;
tmp[11] = nn->r_vecs[i].tls_tx_fallback;
tmp[12] = nn->r_vecs[i].tls_tx_no_fallback;
@@ -1434,7 +1436,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
stats->tx_errors = priv->stats_tx.errors;
--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
-@@ -1365,12 +1365,12 @@ static void am65_cpsw_nuss_ndo_get_stats
+@@ -1362,12 +1362,12 @@ static void am65_cpsw_nuss_ndo_get_stats
cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
do {
@@ -1495,6 +1497,29 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
}
static void rhine_set_rx_mode(struct net_device *dev)
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1305,16 +1305,16 @@ axienet_get_stats64(struct net_device *d
+ netdev_stats_to_stats64(stats, &dev->stats);
+
+ do {
+- start = u64_stats_fetch_begin_irq(&lp->rx_stat_sync);
++ start = u64_stats_fetch_begin(&lp->rx_stat_sync);
+ stats->rx_packets = u64_stats_read(&lp->rx_packets);
+ stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
+- } while (u64_stats_fetch_retry_irq(&lp->rx_stat_sync, start));
++ } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
+
+ do {
+- start = u64_stats_fetch_begin_irq(&lp->tx_stat_sync);
++ start = u64_stats_fetch_begin(&lp->tx_stat_sync);
+ stats->tx_packets = u64_stats_read(&lp->tx_packets);
+ stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
+- } while (u64_stats_fetch_retry_irq(&lp->tx_stat_sync, start));
++ } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
+ }
+
+ static const struct net_device_ops axienet_netdev_ops = {
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1264,12 +1264,12 @@ static void netvsc_get_vf_stats(struct n
@@ -1682,7 +1707,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
}
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
-@@ -2823,9 +2823,9 @@ static void get_rx_sc_stats(struct net_d
+@@ -2795,9 +2795,9 @@ static void get_rx_sc_stats(struct net_d
stats = per_cpu_ptr(rx_sc->stats, cpu);
do {
@@ -1694,7 +1719,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
sum->InOctetsValidated += tmp.InOctetsValidated;
sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
-@@ -2904,9 +2904,9 @@ static void get_tx_sc_stats(struct net_d
+@@ -2876,9 +2876,9 @@ static void get_tx_sc_stats(struct net_d
stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
do {
@@ -1706,7 +1731,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
sum->OutPktsProtected += tmp.OutPktsProtected;
sum->OutPktsEncrypted += tmp.OutPktsEncrypted;
-@@ -2960,9 +2960,9 @@ static void get_secy_stats(struct net_de
+@@ -2932,9 +2932,9 @@ static void get_secy_stats(struct net_de
stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
do {
@@ -1779,7 +1804,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
static int
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
-@@ -1853,13 +1853,13 @@ team_get_stats64(struct net_device *dev,
+@@ -1865,13 +1865,13 @@ team_get_stats64(struct net_device *dev,
for_each_possible_cpu(i) {
p = per_cpu_ptr(team->pcpu_stats, i);
do {
@@ -1859,7 +1884,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
result->xdp_packets += packets;
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
-@@ -2066,18 +2066,18 @@ static void virtnet_stats(struct net_dev
+@@ -2069,18 +2069,18 @@ static void virtnet_stats(struct net_dev
struct send_queue *sq = &vi->sq[i];
do {
@@ -1882,7 +1907,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
tot->rx_packets += rpackets;
tot->tx_packets += tpackets;
-@@ -2688,12 +2688,12 @@ static void virtnet_get_ethtool_stats(st
+@@ -2691,12 +2691,12 @@ static void virtnet_get_ethtool_stats(st
stats_base = (u8 *)&rq->stats;
do {
@@ -1897,7 +1922,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
idx += VIRTNET_RQ_STATS_LEN;
}
-@@ -2702,12 +2702,12 @@ static void virtnet_get_ethtool_stats(st
+@@ -2705,12 +2705,12 @@ static void virtnet_get_ethtool_stats(st
stats_base = (u8 *)&sq->stats;
do {
diff --git a/patches/0003-mm-slub-remove-slab_lock-usage-for-debug-operations.patch b/patches/0003-mm-slub-remove-slab_lock-usage-for-debug-operations.patch
deleted file mode 100644
index 0e434c194f1e..000000000000
--- a/patches/0003-mm-slub-remove-slab_lock-usage-for-debug-operations.patch
+++ /dev/null
@@ -1,97 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Tue, 23 Aug 2022 19:03:58 +0200
-Subject: [PATCH 3/5] mm/slub: remove slab_lock() usage for debug operations
-
-All alloc and free operations on debug caches are now serialized by
-n->list_lock, so we can remove slab_lock() usage in validate_slab()
-and list_slab_objects() as those also happen under n->list_lock.
-
-Note the usage in list_slab_objects() could happen even on non-debug
-caches, but only during cache shutdown time, so there should not be any
-parallel freeing activity anymore. Except for buggy slab users, but in
-that case the slab_lock() would not help against the common cmpxchg
-based fast paths (in non-debug caches) anyway.
-
-Also adjust documentation comments accordingly.
-
-Suggested-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
-Acked-by: David Rientjes <rientjes@google.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/slub.c | 19 ++++++++-----------
- 1 file changed, 8 insertions(+), 11 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -50,7 +50,7 @@
- * 1. slab_mutex (Global Mutex)
- * 2. node->list_lock (Spinlock)
- * 3. kmem_cache->cpu_slab->lock (Local lock)
-- * 4. slab_lock(slab) (Only on some arches or for debugging)
-+ * 4. slab_lock(slab) (Only on some arches)
- * 5. object_map_lock (Only for debugging)
- *
- * slab_mutex
-@@ -64,8 +64,9 @@
- * The slab_lock is a wrapper around the page lock, thus it is a bit
- * spinlock.
- *
-- * The slab_lock is only used for debugging and on arches that do not
-- * have the ability to do a cmpxchg_double. It only protects:
-+ * The slab_lock is only used on arches that do not have the ability
-+ * to do a cmpxchg_double. It only protects:
-+ *
- * A. slab->freelist -> List of free objects in a slab
- * B. slab->inuse -> Number of objects in use
- * C. slab->objects -> Number of objects in slab
-@@ -94,6 +95,9 @@
- * allocating a long series of objects that fill up slabs does not require
- * the list lock.
- *
-+ * For debug caches, all allocations are forced to go through a list_lock
-+ * protected region to serialize against concurrent validation.
-+ *
- * cpu_slab->lock local lock
- *
- * This locks protect slowpath manipulation of all kmem_cache_cpu fields
-@@ -4368,7 +4372,6 @@ static void list_slab_objects(struct kme
- void *p;
-
- slab_err(s, slab, text, s->name);
-- slab_lock(slab, &flags);
-
- map = get_map(s, slab);
- for_each_object(p, s, addr, slab->objects) {
-@@ -4379,7 +4382,6 @@ static void list_slab_objects(struct kme
- }
- }
- put_map(map);
-- slab_unlock(slab, &flags);
- #endif
- }
-
-@@ -5107,12 +5109,9 @@ static void validate_slab(struct kmem_ca
- {
- void *p;
- void *addr = slab_address(slab);
-- unsigned long flags;
--
-- slab_lock(slab, &flags);
-
- if (!check_slab(s, slab) || !on_freelist(s, slab, NULL))
-- goto unlock;
-+ return;
-
- /* Now we know that a valid freelist exists */
- __fill_map(obj_map, s, slab);
-@@ -5123,8 +5122,6 @@ static void validate_slab(struct kmem_ca
- if (!check_object(s, slab, p, val))
- break;
- }
--unlock:
-- slab_unlock(slab, &flags);
- }
-
- static int validate_slab_node(struct kmem_cache *s,
diff --git a/patches/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch b/patches/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
deleted file mode 100644
index 8dda257c65b9..000000000000
--- a/patches/0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
+++ /dev/null
@@ -1,140 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 25 Aug 2022 18:41:26 +0200
-Subject: [PATCH 3/8] mm/vmstat: Use preempt_[dis|en]able_nested()
-
-Replace the open coded CONFIG_PREEMPT_RT conditional
-preempt_enable/disable() pairs with the new helper functions which hide
-the underlying implementation details.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: linux-mm@kvack.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20220825164131.402717-4-bigeasy@linutronix.de
----
- mm/vmstat.c | 36 ++++++++++++------------------------
- 1 file changed, 12 insertions(+), 24 deletions(-)
-
---- a/mm/vmstat.c
-+++ b/mm/vmstat.c
-@@ -355,8 +355,7 @@ void __mod_zone_page_state(struct zone *
- * CPU migrations and preemption potentially corrupts a counter so
- * disable preemption.
- */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- x = delta + __this_cpu_read(*p);
-
-@@ -368,8 +367,7 @@ void __mod_zone_page_state(struct zone *
- }
- __this_cpu_write(*p, x);
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
- EXPORT_SYMBOL(__mod_zone_page_state);
-
-@@ -393,8 +391,7 @@ void __mod_node_page_state(struct pglist
- }
-
- /* See __mod_node_page_state */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- x = delta + __this_cpu_read(*p);
-
-@@ -406,8 +403,7 @@ void __mod_node_page_state(struct pglist
- }
- __this_cpu_write(*p, x);
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
- EXPORT_SYMBOL(__mod_node_page_state);
-
-@@ -441,8 +437,7 @@ void __inc_zone_state(struct zone *zone,
- s8 v, t;
-
- /* See __mod_node_page_state */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- v = __this_cpu_inc_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
-@@ -453,8 +448,7 @@ void __inc_zone_state(struct zone *zone,
- __this_cpu_write(*p, -overstep);
- }
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
-
- void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -466,8 +460,7 @@ void __inc_node_state(struct pglist_data
- VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
-
- /* See __mod_node_page_state */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- v = __this_cpu_inc_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
-@@ -478,8 +471,7 @@ void __inc_node_state(struct pglist_data
- __this_cpu_write(*p, -overstep);
- }
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
-
- void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -501,8 +493,7 @@ void __dec_zone_state(struct zone *zone,
- s8 v, t;
-
- /* See __mod_node_page_state */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- v = __this_cpu_dec_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
-@@ -513,8 +504,7 @@ void __dec_zone_state(struct zone *zone,
- __this_cpu_write(*p, overstep);
- }
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
-
- void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -526,8 +516,7 @@ void __dec_node_state(struct pglist_data
- VM_WARN_ON_ONCE(vmstat_item_in_bytes(item));
-
- /* See __mod_node_page_state */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
-
- v = __this_cpu_dec_return(*p);
- t = __this_cpu_read(pcp->stat_threshold);
-@@ -538,8 +527,7 @@ void __dec_node_state(struct pglist_data
- __this_cpu_write(*p, overstep);
- }
-
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-+ preempt_enable_nested();
- }
-
- void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
diff --git a/patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch b/patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
index 32eb23a2fc72..3e63311e9e3f 100644
--- a/patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+++ b/patches/0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
@@ -1,6 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 25 Aug 2022 16:17:37 +0200
-Subject: [PATCH 3/4] net: Remove the obsolte u64_stats_fetch_*_irq() users (part two).
+Subject: [PATCH 3/4] net: Remove the obsolte u64_stats_fetch_*_irq() users
+ (net).
Now that the 32bit UP oddity is gone and 32bit uses always a sequence
count, there is no need for the fetch_irq() variants anymore.
@@ -80,7 +81,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
u64_stats_add(&stats->rx_bytes, rxbytes);
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -10492,12 +10492,12 @@ void dev_fetch_sw_netstats(struct rtnl_l
+@@ -10477,12 +10477,12 @@ void dev_fetch_sw_netstats(struct rtnl_l
stats = per_cpu_ptr(netstats, cpu);
do {
@@ -97,7 +98,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
s->rx_bytes += rx_bytes;
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
-@@ -8268,10 +8268,10 @@ static void devlink_trap_stats_read(stru
+@@ -8304,10 +8304,10 @@ static void devlink_trap_stats_read(stru
cpu_stats = per_cpu_ptr(trap_stats, i);
do {
@@ -192,7 +193,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
static int
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
-@@ -934,12 +934,12 @@ static void dsa_slave_get_ethtool_stats(
+@@ -976,12 +976,12 @@ static void dsa_slave_get_ethtool_stats(
s = per_cpu_ptr(dev->tstats, i);
do {
@@ -209,7 +210,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
data[2] += rx_packets;
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
-@@ -1684,9 +1684,9 @@ u64 snmp_get_cpu_field64(void __percpu *
+@@ -1706,9 +1706,9 @@ u64 snmp_get_cpu_field64(void __percpu *
bhptr = per_cpu_ptr(mib, cpu);
syncp = (struct u64_stats_sync *)(bhptr + syncp_offset);
do {
@@ -223,7 +224,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
}
--- a/net/ipv6/seg6_local.c
+++ b/net/ipv6/seg6_local.c
-@@ -1508,13 +1508,13 @@ static int put_nla_counters(struct sk_bu
+@@ -1644,13 +1644,13 @@ static int put_nla_counters(struct sk_bu
pcounters = per_cpu_ptr(slwt->pcpu_counters, i);
do {
@@ -241,7 +242,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
counters.bytes += bytes;
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
-@@ -2316,9 +2316,9 @@ static inline u64 sta_get_tidstats_msdu(
+@@ -2396,9 +2396,9 @@ static inline u64 sta_get_tidstats_msdu(
u64 value;
do {
@@ -253,7 +254,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
return value;
}
-@@ -2384,9 +2384,9 @@ static inline u64 sta_get_stats_bytes(st
+@@ -2464,9 +2464,9 @@ static inline u64 sta_get_stats_bytes(st
u64 value;
do {
@@ -314,7 +315,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
}
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
-@@ -707,9 +707,9 @@ static void get_dp_stats(const struct da
+@@ -716,9 +716,9 @@ static void get_dp_stats(const struct da
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
diff --git a/patches/0003-slub-Make-PREEMPT_RT-support-less-convoluted.patch b/patches/0003-slub-Make-PREEMPT_RT-support-less-convoluted.patch
deleted file mode 100644
index 8987582e719f..000000000000
--- a/patches/0003-slub-Make-PREEMPT_RT-support-less-convoluted.patch
+++ /dev/null
@@ -1,145 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 17 Aug 2022 18:26:55 +0200
-Subject: [PATCH 03/12] slub: Make PREEMPT_RT support less convoluted
-
-The slub code already has a few helpers depending on PREEMPT_RT. Add a few
-more and get rid of the CONFIG_PREEMPT_RT conditionals all over the place.
-
-No functional change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Andrew Morton <akpm@linux-foundation.org>
-Cc: Christoph Lameter <cl@linux.com>
-Cc: David Rientjes <rientjes@google.com>
-Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
-Cc: Pekka Enberg <penberg@kernel.org>
-Cc: Vlastimil Babka <vbabka@suse.cz>
-Cc: linux-mm@kvack.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lore.kernel.org/r/YwcqCCJM1oLREWZc@linutronix.de
----
- mm/slub.c | 56 ++++++++++++++++++++++++--------------------------------
- 1 file changed, 24 insertions(+), 32 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -104,9 +104,11 @@
- * except the stat counters. This is a percpu structure manipulated only by
- * the local cpu, so the lock protects against being preempted or interrupted
- * by an irq. Fast path operations rely on lockless operations instead.
-- * On PREEMPT_RT, the local lock does not actually disable irqs (and thus
-- * prevent the lockless operations), so fastpath operations also need to take
-- * the lock and are no longer lockless.
-+ *
-+ * On PREEMPT_RT, the local lock neither disables interrupts nor preemption
-+ * which means the lockless fastpath cannot be used as it might interfere with
-+ * an in-progress slow path operations. In this case the local lock is always
-+ * taken but it still utilizes the freelist for the common operations.
- *
- * lockless fastpaths
- *
-@@ -167,8 +169,9 @@
- * function call even on !PREEMPT_RT, use inline preempt_disable() there.
- */
- #ifndef CONFIG_PREEMPT_RT
--#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
--#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
-+#define slub_get_cpu_ptr(var) get_cpu_ptr(var)
-+#define slub_put_cpu_ptr(var) put_cpu_ptr(var)
-+#define USE_LOCKLESS_FAST_PATH() (true)
- #else
- #define slub_get_cpu_ptr(var) \
- ({ \
-@@ -180,6 +183,7 @@ do { \
- (void)(var); \
- migrate_enable(); \
- } while (0)
-+#define USE_LOCKLESS_FAST_PATH() (false)
- #endif
-
- #ifdef CONFIG_SLUB_DEBUG
-@@ -474,7 +478,7 @@ static inline bool __cmpxchg_double_slab
- void *freelist_new, unsigned long counters_new,
- const char *n)
- {
-- if (!IS_ENABLED(CONFIG_PREEMPT_RT))
-+ if (USE_LOCKLESS_FAST_PATH())
- lockdep_assert_irqs_disabled();
- #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
- defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
-@@ -3287,14 +3291,8 @@ static __always_inline void *slab_alloc_
-
- object = c->freelist;
- slab = c->slab;
-- /*
-- * We cannot use the lockless fastpath on PREEMPT_RT because if a
-- * slowpath has taken the local_lock_irqsave(), it is not protected
-- * against a fast path operation in an irq handler. So we need to take
-- * the slow path which uses local_lock. It is still relatively fast if
-- * there is a suitable cpu freelist.
-- */
-- if (IS_ENABLED(CONFIG_PREEMPT_RT) ||
-+
-+ if (!USE_LOCKLESS_FAST_PATH() ||
- unlikely(!object || !slab || !node_match(slab, node))) {
- object = __slab_alloc(s, gfpflags, node, addr, c);
- } else {
-@@ -3554,6 +3552,7 @@ static __always_inline void do_slab_free
- void *tail_obj = tail ? : head;
- struct kmem_cache_cpu *c;
- unsigned long tid;
-+ void **freelist;
-
- redo:
- /*
-@@ -3568,9 +3567,13 @@ static __always_inline void do_slab_free
- /* Same with comment on barrier() in slab_alloc_node() */
- barrier();
-
-- if (likely(slab == c->slab)) {
--#ifndef CONFIG_PREEMPT_RT
-- void **freelist = READ_ONCE(c->freelist);
-+ if (unlikely(slab != c->slab)) {
-+ __slab_free(s, slab, head, tail_obj, cnt, addr);
-+ return;
-+ }
-+
-+ if (USE_LOCKLESS_FAST_PATH()) {
-+ freelist = READ_ONCE(c->freelist);
-
- set_freepointer(s, tail_obj, freelist);
-
-@@ -3582,16 +3585,8 @@ static __always_inline void do_slab_free
- note_cmpxchg_failure("slab_free", s, tid);
- goto redo;
- }
--#else /* CONFIG_PREEMPT_RT */
-- /*
-- * We cannot use the lockless fastpath on PREEMPT_RT because if
-- * a slowpath has taken the local_lock_irqsave(), it is not
-- * protected against a fast path operation in an irq handler. So
-- * we need to take the local_lock. We shouldn't simply defer to
-- * __slab_free() as that wouldn't use the cpu freelist at all.
-- */
-- void **freelist;
--
-+ } else {
-+ /* Update the free list under the local lock */
- local_lock(&s->cpu_slab->lock);
- c = this_cpu_ptr(s->cpu_slab);
- if (unlikely(slab != c->slab)) {
-@@ -3606,11 +3601,8 @@ static __always_inline void do_slab_free
- c->tid = next_tid(tid);
-
- local_unlock(&s->cpu_slab->lock);
--#endif
-- stat(s, FREE_FASTPATH);
-- } else
-- __slab_free(s, slab, head, tail_obj, cnt, addr);
--
-+ }
-+ stat(s, FREE_FASTPATH);
- }
-
- static __always_inline void slab_free(struct kmem_cache *s, struct slab *slab,
diff --git a/patches/0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch b/patches/0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
index 03c5a125ccf7..15ef168d6b0f 100644
--- a/patches/0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+++ b/patches/0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
@@ -28,7 +28,7 @@ Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
-@@ -2107,11 +2107,11 @@ static void bpf_prog_get_stats(const str
+@@ -2117,11 +2117,11 @@ static void bpf_prog_get_stats(const str
st = per_cpu_ptr(prog->stats, cpu);
do {
diff --git a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
index 374f6dfdabb5..271195a3507b 100644
--- a/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
+++ b/patches/0004-drm-i915-Don-t-disable-interrupts-on-PREEMPT_RT-duri.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
-@@ -522,7 +522,8 @@ void intel_pipe_update_start(struct inte
+@@ -521,7 +521,8 @@ void intel_pipe_update_start(struct inte
*/
intel_psr_wait_for_idle_locked(new_crtc_state);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
crtc->debug.min_vbl = min;
crtc->debug.max_vbl = max;
-@@ -547,11 +548,13 @@ void intel_pipe_update_start(struct inte
+@@ -546,11 +547,13 @@ void intel_pipe_update_start(struct inte
break;
}
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -584,7 +587,8 @@ void intel_pipe_update_start(struct inte
+@@ -583,7 +586,8 @@ void intel_pipe_update_start(struct inte
return;
irq_disable:
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_VBLANK_EVADE)
-@@ -685,7 +689,8 @@ void intel_pipe_update_end(struct intel_
+@@ -684,7 +688,8 @@ void intel_pipe_update_end(struct intel_
*/
intel_vrr_send_push(new_crtc_state);
diff --git a/patches/0004-mm-slub-convert-object_map_lock-to-non-raw-spinlock.patch b/patches/0004-mm-slub-convert-object_map_lock-to-non-raw-spinlock.patch
deleted file mode 100644
index 2dacdc7f664b..000000000000
--- a/patches/0004-mm-slub-convert-object_map_lock-to-non-raw-spinlock.patch
+++ /dev/null
@@ -1,89 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Tue, 23 Aug 2022 19:03:59 +0200
-Subject: [PATCH 4/5] mm/slub: convert object_map_lock to non-raw spinlock
-
-The only remaining user of object_map_lock is list_slab_objects().
-Obtaining the lock there used to happen under slab_lock() which implied
-disabling irqs on PREEMPT_RT, thus it's a raw_spinlock. With the
-slab_lock() removed, we can convert it to a normal spinlock.
-
-Also remove the get_map()/put_map() wrappers as list_slab_objects()
-became their only remaining user.
-
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Acked-by: David Rientjes <rientjes@google.com>
-Reviewed-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/slub.c | 36 ++++++------------------------------
- 1 file changed, 6 insertions(+), 30 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -565,7 +565,7 @@ static inline bool cmpxchg_double_slab(s
-
- #ifdef CONFIG_SLUB_DEBUG
- static unsigned long object_map[BITS_TO_LONGS(MAX_OBJS_PER_PAGE)];
--static DEFINE_RAW_SPINLOCK(object_map_lock);
-+static DEFINE_SPINLOCK(object_map_lock);
-
- static void __fill_map(unsigned long *obj_map, struct kmem_cache *s,
- struct slab *slab)
-@@ -599,30 +599,6 @@ static bool slab_add_kunit_errors(void)
- static inline bool slab_add_kunit_errors(void) { return false; }
- #endif
-
--/*
-- * Determine a map of objects in use in a slab.
-- *
-- * Node listlock must be held to guarantee that the slab does
-- * not vanish from under us.
-- */
--static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
-- __acquires(&object_map_lock)
--{
-- VM_BUG_ON(!irqs_disabled());
--
-- raw_spin_lock(&object_map_lock);
--
-- __fill_map(object_map, s, slab);
--
-- return object_map;
--}
--
--static void put_map(unsigned long *map) __releases(&object_map_lock)
--{
-- VM_BUG_ON(map != object_map);
-- raw_spin_unlock(&object_map_lock);
--}
--
- static inline unsigned int size_from_object(struct kmem_cache *s)
- {
- if (s->flags & SLAB_RED_ZONE)
-@@ -4367,21 +4343,21 @@ static void list_slab_objects(struct kme
- {
- #ifdef CONFIG_SLUB_DEBUG
- void *addr = slab_address(slab);
-- unsigned long flags;
-- unsigned long *map;
- void *p;
-
- slab_err(s, slab, text, s->name);
-
-- map = get_map(s, slab);
-+ spin_lock(&object_map_lock);
-+ __fill_map(object_map, s, slab);
-+
- for_each_object(p, s, addr, slab->objects) {
-
-- if (!test_bit(__obj_to_index(s, addr, p), map)) {
-+ if (!test_bit(__obj_to_index(s, addr, p), object_map)) {
- pr_err("Object 0x%p @offset=%tu\n", p, p - addr);
- print_tracking(s, p);
- }
- }
-- put_map(map);
-+ spin_unlock(&object_map_lock);
- #endif
- }
-
diff --git a/patches/0005-mm-slub-simplify-__cmpxchg_double_slab-and-slab_-un-.patch b/patches/0005-mm-slub-simplify-__cmpxchg_double_slab-and-slab_-un-.patch
deleted file mode 100644
index 1c1046ddbbaa..000000000000
--- a/patches/0005-mm-slub-simplify-__cmpxchg_double_slab-and-slab_-un-.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From: Vlastimil Babka <vbabka@suse.cz>
-Date: Tue, 23 Aug 2022 19:04:00 +0200
-Subject: [PATCH 5/5] mm/slub: simplify __cmpxchg_double_slab() and
- slab_[un]lock()
-
-The PREEMPT_RT specific disabling of irqs in __cmpxchg_double_slab()
-(through slab_[un]lock()) is unnecessary as bit_spin_lock() disables
-preemption and that's sufficient on RT where interrupts are threaded.
-
-That means we no longer need the slab_[un]lock() wrappers, so delete
-them and rename the current __slab_[un]lock() to slab_[un]lock().
-
-Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
-Acked-by: David Rientjes <rientjes@google.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/slub.c | 39 ++++++++++++---------------------------
- 1 file changed, 12 insertions(+), 27 deletions(-)
-
---- a/mm/slub.c
-+++ b/mm/slub.c
-@@ -446,7 +446,7 @@ slub_set_cpu_partial(struct kmem_cache *
- /*
- * Per slab locking using the pagelock
- */
--static __always_inline void __slab_lock(struct slab *slab)
-+static __always_inline void slab_lock(struct slab *slab)
- {
- struct page *page = slab_page(slab);
-
-@@ -454,7 +454,7 @@ static __always_inline void __slab_lock(
- bit_spin_lock(PG_locked, &page->flags);
- }
-
--static __always_inline void __slab_unlock(struct slab *slab)
-+static __always_inline void slab_unlock(struct slab *slab)
- {
- struct page *page = slab_page(slab);
-
-@@ -462,24 +462,12 @@ static __always_inline void __slab_unloc
- __bit_spin_unlock(PG_locked, &page->flags);
- }
-
--static __always_inline void slab_lock(struct slab *slab, unsigned long *flags)
--{
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- local_irq_save(*flags);
-- __slab_lock(slab);
--}
--
--static __always_inline void slab_unlock(struct slab *slab, unsigned long *flags)
--{
-- __slab_unlock(slab);
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- local_irq_restore(*flags);
--}
--
- /*
- * Interrupts must be disabled (for the fallback code to work right), typically
-- * by an _irqsave() lock variant. Except on PREEMPT_RT where locks are different
-- * so we disable interrupts as part of slab_[un]lock().
-+ * by an _irqsave() lock variant. Except on PREEMPT_RT where these variants do
-+ * not actually disable interrupts. On the other hand the migrate_disable()
-+ * done by bit_spin_lock() is sufficient on PREEMPT_RT thanks to its threaded
-+ * interrupts.
- */
- static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct slab *slab,
- void *freelist_old, unsigned long counters_old,
-@@ -498,18 +486,15 @@ static inline bool __cmpxchg_double_slab
- } else
- #endif
- {
-- /* init to 0 to prevent spurious warnings */
-- unsigned long flags = 0;
--
-- slab_lock(slab, &flags);
-+ slab_lock(slab);
- if (slab->freelist == freelist_old &&
- slab->counters == counters_old) {
- slab->freelist = freelist_new;
- slab->counters = counters_new;
-- slab_unlock(slab, &flags);
-+ slab_unlock(slab);
- return true;
- }
-- slab_unlock(slab, &flags);
-+ slab_unlock(slab);
- }
-
- cpu_relax();
-@@ -540,16 +525,16 @@ static inline bool cmpxchg_double_slab(s
- unsigned long flags;
-
- local_irq_save(flags);
-- __slab_lock(slab);
-+ slab_lock(slab);
- if (slab->freelist == freelist_old &&
- slab->counters == counters_old) {
- slab->freelist = freelist_new;
- slab->counters = counters_new;
-- __slab_unlock(slab);
-+ slab_unlock(slab);
- local_irq_restore(flags);
- return true;
- }
-- __slab_unlock(slab);
-+ slab_unlock(slab);
- local_irq_restore(flags);
- }
-
diff --git a/patches/0008-u64_stats-Streamline-the-implementation.patch b/patches/0008-u64_stats-Streamline-the-implementation.patch
deleted file mode 100644
index 73432aeb9814..000000000000
--- a/patches/0008-u64_stats-Streamline-the-implementation.patch
+++ /dev/null
@@ -1,262 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 25 Aug 2022 18:41:31 +0200
-Subject: [PATCH 8/8] u64_stats: Streamline the implementation
-
-The u64 stats code handles 3 different cases:
-
- - 32bit UP
- - 32bit SMP
- - 64bit
-
-with an unreadable #ifdef maze, which was recently expanded with PREEMPT_RT
-conditionals.
-
-Reduce it to two cases (32bit and 64bit) and drop the optimization for
-32bit UP as suggested by Linus.
-
-Use the new preempt_disable/enable_nested() helpers to get rid of the
-CONFIG_PREEMPT_RT conditionals.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: netdev@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Link: https://lore.kernel.org/r/20220825164131.402717-9-bigeasy@linutronix.de
----
- include/linux/u64_stats_sync.h | 145 ++++++++++++++++++-----------------------
- 1 file changed, 64 insertions(+), 81 deletions(-)
-
---- a/include/linux/u64_stats_sync.h
-+++ b/include/linux/u64_stats_sync.h
-@@ -8,7 +8,7 @@
- *
- * Key points :
- *
-- * - Use a seqcount on 32-bit SMP, only disable preemption for 32-bit UP.
-+ * - Use a seqcount on 32-bit
- * - The whole thing is a no-op on 64-bit architectures.
- *
- * Usage constraints:
-@@ -20,7 +20,8 @@
- * writer and also spin forever.
- *
- * 3) Write side must use the _irqsave() variant if other writers, or a reader,
-- * can be invoked from an IRQ context.
-+ * can be invoked from an IRQ context. On 64bit systems this variant does not
-+ * disable interrupts.
- *
- * 4) If reader fetches several counters, there is no guarantee the whole values
- * are consistent w.r.t. each other (remember point #2: seqcounts are not
-@@ -29,11 +30,6 @@
- * 5) Readers are allowed to sleep or be preempted/interrupted: they perform
- * pure reads.
- *
-- * 6) Readers must use both u64_stats_fetch_{begin,retry}_irq() if the stats
-- * might be updated from a hardirq or softirq context (remember point #1:
-- * seqcounts are not used for UP kernels). 32-bit UP stat readers could read
-- * corrupted 64-bit values otherwise.
-- *
- * Usage :
- *
- * Stats producer (writer) should use following template granted it already got
-@@ -66,7 +62,7 @@
- #include <linux/seqlock.h>
-
- struct u64_stats_sync {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-+#if BITS_PER_LONG == 32
- seqcount_t seq;
- #endif
- };
-@@ -98,7 +94,22 @@ static inline void u64_stats_inc(u64_sta
- local64_inc(&p->v);
- }
-
--#else
-+static inline void u64_stats_init(struct u64_stats_sync *syncp) { }
-+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp) { }
-+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp) { }
-+static inline unsigned long __u64_stats_irqsave(void) { return 0; }
-+static inline void __u64_stats_irqrestore(unsigned long flags) { }
-+static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
-+{
-+ return 0;
-+}
-+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
-+ unsigned int start)
-+{
-+ return false;
-+}
-+
-+#else /* 64 bit */
-
- typedef struct {
- u64 v;
-@@ -123,123 +134,95 @@ static inline void u64_stats_inc(u64_sta
- {
- p->v++;
- }
--#endif
-
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
--#define u64_stats_init(syncp) seqcount_init(&(syncp)->seq)
--#else
- static inline void u64_stats_init(struct u64_stats_sync *syncp)
- {
-+ seqcount_init(&syncp->seq);
- }
--#endif
-
--static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
-+static inline void __u64_stats_update_begin(struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-+ preempt_disable_nested();
- write_seqcount_begin(&syncp->seq);
--#endif
- }
-
--static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
-+static inline void __u64_stats_update_end(struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- write_seqcount_end(&syncp->seq);
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
--#endif
-+ preempt_enable_nested();
- }
-
--static inline unsigned long
--u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
-+static inline unsigned long __u64_stats_irqsave(void)
- {
-- unsigned long flags = 0;
-+ unsigned long flags;
-
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_disable();
-- else
-- local_irq_save(flags);
-- write_seqcount_begin(&syncp->seq);
--#endif
-+ local_irq_save(flags);
- return flags;
- }
-
--static inline void
--u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
-- unsigned long flags)
-+static inline void __u64_stats_irqrestore(unsigned long flags)
- {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-- write_seqcount_end(&syncp->seq);
-- if (IS_ENABLED(CONFIG_PREEMPT_RT))
-- preempt_enable();
-- else
-- local_irq_restore(flags);
--#endif
-+ local_irq_restore(flags);
- }
-
- static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
- return read_seqcount_begin(&syncp->seq);
--#else
-- return 0;
--#endif
- }
-
--static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
-+static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
-+ unsigned int start)
- {
--#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
-- preempt_disable();
--#endif
-- return __u64_stats_fetch_begin(syncp);
-+ return read_seqcount_retry(&syncp->seq, start);
- }
-+#endif /* !64 bit */
-
--static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
-- unsigned int start)
-+static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG == 32 && (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT))
-- return read_seqcount_retry(&syncp->seq, start);
--#else
-- return false;
--#endif
-+ __u64_stats_update_begin(syncp);
-+}
-+
-+static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
-+{
-+ __u64_stats_update_end(syncp);
-+}
-+
-+static inline unsigned long u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
-+{
-+ unsigned long flags = __u64_stats_irqsave();
-+
-+ __u64_stats_update_begin(syncp);
-+ return flags;
-+}
-+
-+static inline void u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
-+ unsigned long flags)
-+{
-+ __u64_stats_update_end(syncp);
-+ __u64_stats_irqrestore(flags);
-+}
-+
-+static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
-+{
-+ return __u64_stats_fetch_begin(syncp);
- }
-
- static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
- unsigned int start)
- {
--#if BITS_PER_LONG == 32 && (!defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT))
-- preempt_enable();
--#endif
- return __u64_stats_fetch_retry(syncp, start);
- }
-
--/*
-- * In case irq handlers can update u64 counters, readers can use following helpers
-- * - SMP 32bit arches use seqcount protection, irq safe.
-- * - UP 32bit must disable irqs.
-- * - 64bit have no problem atomically reading u64 values, irq safe.
-- */
-+/* Obsolete interfaces */
- static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
- {
--#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
-- preempt_disable();
--#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
-- local_irq_disable();
--#endif
-- return __u64_stats_fetch_begin(syncp);
-+ return u64_stats_fetch_begin(syncp);
- }
-
- static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
- unsigned int start)
- {
--#if BITS_PER_LONG == 32 && defined(CONFIG_PREEMPT_RT)
-- preempt_enable();
--#elif BITS_PER_LONG == 32 && !defined(CONFIG_SMP)
-- local_irq_enable();
--#endif
-- return __u64_stats_fetch_retry(syncp, start);
-+ return u64_stats_fetch_retry(syncp, start);
- }
-
- #endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
index 0f11b4604f14..3b029a9e95d1 100644
--- a/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
+++ b/patches/0009-drm-i915-gt-Use-spin_lock_irq-instead-of-local_irq_d.patch
@@ -27,7 +27,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
-@@ -1299,7 +1299,7 @@ static void execlists_dequeue(struct int
+@@ -1302,7 +1302,7 @@ static void execlists_dequeue(struct int
* and context switches) submission.
*/
@@ -36,7 +36,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
/*
* If the queue is higher priority than the last
-@@ -1399,7 +1399,7 @@ static void execlists_dequeue(struct int
+@@ -1402,7 +1402,7 @@ static void execlists_dequeue(struct int
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
@@ -45,7 +45,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
return;
}
}
-@@ -1425,7 +1425,7 @@ static void execlists_dequeue(struct int
+@@ -1428,7 +1428,7 @@ static void execlists_dequeue(struct int
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.sched_engine->lock);
@@ -54,7 +54,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
return; /* leave this for another sibling */
}
-@@ -1587,7 +1587,7 @@ static void execlists_dequeue(struct int
+@@ -1590,7 +1590,7 @@ static void execlists_dequeue(struct int
*/
sched_engine->queue_priority_hint = queue_prio(sched_engine);
i915_sched_engine_reset_on_empty(sched_engine);
@@ -63,7 +63,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
/*
* We can skip poking the HW if we ended up with exactly the same set
-@@ -1613,13 +1613,6 @@ static void execlists_dequeue(struct int
+@@ -1616,13 +1616,6 @@ static void execlists_dequeue(struct int
}
}
@@ -77,7 +77,7 @@ Reviewed-by: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
static void clear_ports(struct i915_request **ports, int count)
{
memset_p((void **)ports, NULL, count);
-@@ -2449,7 +2442,7 @@ static void execlists_submission_tasklet
+@@ -2468,7 +2461,7 @@ static void execlists_submission_tasklet
}
if (!engine->execlists.pending[0]) {
diff --git a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
index 2ad5fd8c3c1b..0555cc62d6e4 100644
--- a/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
+++ b/patches/0016-printk-add-infrastucture-for-atomic-consoles.patch
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1574,6 +1574,10 @@ config PRINTK
+@@ -1581,6 +1581,10 @@ config PRINTK
very difficult to diagnose system problems, saying N here is
strongly discouraged.
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/sched/clock.h>
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
-@@ -2063,19 +2064,28 @@ static int console_trylock_spinning(void
+@@ -2060,19 +2061,28 @@ static int console_trylock_spinning(void
* dropped, a dropped message will be written out first.
*/
static void call_console_driver(struct console *con, const char *text, size_t len,
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2429,6 +2439,76 @@ asmlinkage __visible int _printk(const c
+@@ -2426,6 +2436,76 @@ asmlinkage __visible int _printk(const c
}
EXPORT_SYMBOL(_printk);
@@ -227,10 +227,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+#endif /* CONFIG_HAVE_ATOMIC_CONSOLE */
+
+ static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
- static void printk_start_kthread(struct console *con);
-@@ -2443,6 +2523,8 @@ static void printk_start_kthread(struct
+@@ -2441,6 +2521,8 @@ static void printk_start_kthread(struct
#define prb_first_valid_seq(rb) 0
#define prb_next_seq(rb) 0
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 syslog_seq;
static size_t record_print_text(const struct printk_record *r,
-@@ -2461,7 +2543,7 @@ static ssize_t msg_print_ext_body(char *
+@@ -2459,7 +2541,7 @@ static ssize_t msg_print_ext_body(char *
static void console_lock_spinning_enable(void) { }
static int console_lock_spinning_disable_and_check(void) { return 0; }
static void call_console_driver(struct console *con, const char *text, size_t len,
@@ -248,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
static bool suppress_message_printing(int level) { return false; }
-@@ -2803,10 +2885,20 @@ static inline bool __console_is_usable(s
+@@ -2802,10 +2884,20 @@ static inline bool __console_is_usable(s
*
* Requires holding the console_lock.
*/
@@ -271,7 +271,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return __console_is_usable(con->flags);
}
-@@ -2831,6 +2923,66 @@ static void __console_unlock(void)
+@@ -2830,6 +2922,66 @@ static void __console_unlock(void)
up_console_sem();
}
@@ -338,7 +338,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print one record for the given console. The record printed is whatever
* record is the next available record for the given console.
-@@ -2843,6 +2995,8 @@ static void __console_unlock(void)
+@@ -2842,6 +2994,8 @@ static void __console_unlock(void)
* If dropped messages should be printed, @dropped_text is a buffer of size
* DROPPED_TEXT_MAX. Otherwise @dropped_text must be NULL.
*
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
* console_lock. Otherwise it is set to false. A NULL pointer may be provided
-@@ -2855,7 +3009,8 @@ static void __console_unlock(void)
+@@ -2854,7 +3008,8 @@ static void __console_unlock(void)
* Requires con->lock otherwise.
*/
static bool __console_emit_next_record(struct console *con, char *text, char *ext_text,
@@ -357,7 +357,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
static atomic_t panic_console_dropped = ATOMIC_INIT(0);
struct printk_info info;
-@@ -2863,18 +3018,22 @@ static bool __console_emit_next_record(s
+@@ -2862,18 +3017,22 @@ static bool __console_emit_next_record(s
unsigned long flags;
char *write_text;
size_t len;
@@ -384,7 +384,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (panic_in_progress() &&
atomic_fetch_inc_relaxed(&panic_console_dropped) > 10) {
suppress_panic_printk = 1;
-@@ -2884,7 +3043,7 @@ static bool __console_emit_next_record(s
+@@ -2883,7 +3042,7 @@ static bool __console_emit_next_record(s
/* Skip record that has level above the console loglevel. */
if (suppress_message_printing(r.info->level)) {
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto skip;
}
-@@ -2916,9 +3075,9 @@ static bool __console_emit_next_record(s
+@@ -2915,9 +3074,9 @@ static bool __console_emit_next_record(s
stop_critical_timings();
}
@@ -405,7 +405,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (handover) {
start_critical_timings();
-@@ -2950,7 +3109,7 @@ static bool console_emit_next_record_tra
+@@ -2949,7 +3108,7 @@ static bool console_emit_next_record_tra
handover = NULL;
}
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2998,7 +3157,7 @@ static bool console_flush_all(bool do_co
+@@ -2997,7 +3156,7 @@ static bool console_flush_all(bool do_co
for_each_console(con) {
bool progress;
@@ -423,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
any_usable = true;
-@@ -3033,6 +3192,68 @@ static bool console_flush_all(bool do_co
+@@ -3032,6 +3191,68 @@ static bool console_flush_all(bool do_co
return any_usable;
}
@@ -492,7 +492,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_unlock - unlock the console system
*
-@@ -3148,6 +3369,11 @@ void console_unblank(void)
+@@ -3147,6 +3368,11 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -504,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If someone else is holding the console lock, trylock will fail
* and may_schedule may be set. Ignore and proceed to unlock so
-@@ -3164,7 +3390,7 @@ void console_flush_on_panic(enum con_flu
+@@ -3163,7 +3389,7 @@ void console_flush_on_panic(enum con_flu
seq = prb_first_valid_seq(prb);
for_each_console(c)
@@ -513,9 +513,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
console_unlock();
}
-@@ -3407,19 +3633,22 @@ void register_console(struct console *ne
- if (newcon->flags & CON_EXTENDED)
- nr_ext_console_drivers++;
+@@ -3403,19 +3629,22 @@ void register_console(struct console *ne
+ console_drivers->next = newcon;
+ }
- newcon->dropped = 0;
+ atomic_long_set(&newcon->dropped, 0);
@@ -539,7 +539,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (printk_kthreads_available)
-@@ -3508,6 +3737,10 @@ int unregister_console(struct console *c
+@@ -3498,6 +3727,10 @@ int unregister_console(struct console *c
console_sysfs_notify();
@@ -550,7 +550,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console->exit)
res = console->exit(console);
-@@ -3638,7 +3871,7 @@ static bool __pr_flush(struct console *c
+@@ -3628,7 +3861,7 @@ static bool __pr_flush(struct console *c
for_each_console(c) {
if (con && con != c)
continue;
@@ -559,7 +559,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
printk_seq = c->seq;
if (printk_seq < seq)
-@@ -3728,9 +3961,10 @@ static void printk_fallback_preferred_di
+@@ -3717,9 +3950,10 @@ static void printk_fallback_preferred_di
* See __console_emit_next_record() for argument and return details.
*/
static bool console_emit_next_record(struct console *con, char *text, char *ext_text,
@@ -572,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static bool printer_should_wake(struct console *con, u64 seq)
-@@ -3771,6 +4005,11 @@ static int printk_kthread_func(void *dat
+@@ -3760,6 +3994,11 @@ static int printk_kthread_func(void *dat
char *text;
int error;
@@ -584,7 +584,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(CONSOLE_LOG_MAX, GFP_KERNEL);
if (!text) {
con_printk(KERN_ERR, con, "failed to allocate text buffer\n");
-@@ -3849,7 +4088,7 @@ static int printk_kthread_func(void *dat
+@@ -3837,7 +4076,7 @@ static int printk_kthread_func(void *dat
* which can conditionally invoke cond_resched().
*/
console_may_schedule = 0;
diff --git a/patches/0017-serial-8250-implement-write_atomic.patch b/patches/0017-serial-8250-implement-write_atomic.patch
index c4b347b02c81..94912c9c2b6b 100644
--- a/patches/0017-serial-8250-implement-write_atomic.patch
+++ b/patches/0017-serial-8250-implement-write_atomic.patch
@@ -19,23 +19,23 @@ write_atomic() synchronizes per line.
Signed-off-by: John Ogness <john.ogness@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/tty/serial/8250/8250.h | 66 +++++++++++-
+ drivers/tty/serial/8250/8250.h | 41 ++++++-
drivers/tty/serial/8250/8250_aspeed_vuart.c | 2
drivers/tty/serial/8250/8250_bcm7271.c | 21 +++
- drivers/tty/serial/8250/8250_core.c | 24 ++++
+ drivers/tty/serial/8250/8250_core.c | 24 +++-
drivers/tty/serial/8250/8250_exar.c | 4
drivers/tty/serial/8250/8250_fsl.c | 3
drivers/tty/serial/8250/8250_ingenic.c | 3
drivers/tty/serial/8250/8250_mtk.c | 32 +++++
drivers/tty/serial/8250/8250_omap.c | 20 +--
- drivers/tty/serial/8250/8250_port.c | 151 +++++++++++++++++++---------
+ drivers/tty/serial/8250/8250_port.c | 158 ++++++++++++++++++++++------
drivers/tty/serial/8250/Kconfig | 1
include/linux/serial_8250.h | 5
- 12 files changed, 267 insertions(+), 65 deletions(-)
+ 12 files changed, 262 insertions(+), 52 deletions(-)
--- a/drivers/tty/serial/8250/8250.h
+++ b/drivers/tty/serial/8250/8250.h
-@@ -177,12 +177,74 @@ static inline void serial_dl_write(struc
+@@ -177,12 +177,49 @@ static inline void serial_dl_write(struc
up->dl_write(up, value);
}
@@ -76,31 +76,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ printk_cpu_sync_put_irqrestore(flags);
+}
+
-+static inline int serial8250_clear_IER(struct uart_8250_port *up)
-+{
-+ struct uart_port *port = &up->port;
-+ unsigned int clearval = 0;
-+ unsigned long flags;
-+ bool is_console;
-+ int prior;
-+
-+ is_console = uart_console(port);
-+
-+ if (up->capabilities & UART_CAP_UUE)
-+ clearval = UART_IER_UUE;
-+
-+ if (is_console)
-+ printk_cpu_sync_get_irqsave(flags);
-+
-+ prior = serial_in(up, UART_IER);
-+ serial_out(up, UART_IER, clearval);
-+
-+ if (is_console)
-+ printk_cpu_sync_put_irqrestore(flags);
-+
-+ return prior;
-+}
-+
static inline bool serial8250_set_THRI(struct uart_8250_port *up)
{
if (up->ier & UART_IER_THRI)
@@ -111,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return true;
}
-@@ -191,7 +253,7 @@ static inline bool serial8250_clear_THRI
+@@ -191,7 +228,7 @@ static inline bool serial8250_clear_THRI
if (!(up->ier & UART_IER_THRI))
return false;
up->ier &= ~UART_IER_THRI;
@@ -450,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
goto out;
}
-@@ -1148,12 +1148,12 @@ static void am654_8250_handle_rx_dma(str
+@@ -1146,12 +1146,12 @@ static void am654_8250_handle_rx_dma(str
* periodic timeouts, re-enable interrupts.
*/
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
@@ -476,7 +451,41 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (p->capabilities & UART_CAP_EFR) {
serial_out(p, UART_LCR, UART_LCR_CONF_MODE_B);
serial_out(p, UART_EFR, efr);
-@@ -1017,8 +1017,11 @@ static int broken_efr(struct uart_8250_p
+@@ -754,12 +754,29 @@ static void serial8250_set_sleep(struct
+ serial8250_rpm_put(p);
+ }
+
+-static void serial8250_clear_IER(struct uart_8250_port *up)
++static unsigned int serial8250_clear_IER(struct uart_8250_port *up)
+ {
++ struct uart_port *port = &up->port;
++ unsigned int clearval = 0;
++ unsigned long flags;
++ bool is_console;
++ unsigned int prior;
++
++ is_console = uart_console(port);
++
+ if (up->capabilities & UART_CAP_UUE)
+- serial_out(up, UART_IER, UART_IER_UUE);
+- else
+- serial_out(up, UART_IER, 0);
++ clearval = UART_IER_UUE;
++
++ if (is_console)
++ printk_cpu_sync_get_irqsave(flags);
++
++ prior = serial_in(up, UART_IER);
++ serial_out(up, UART_IER, clearval);
++
++ if (is_console)
++ printk_cpu_sync_put_irqrestore(flags);
++
++ return prior;
+ }
+
+ #ifdef CONFIG_SERIAL_8250_RSA
+@@ -1025,8 +1042,11 @@ static int broken_efr(struct uart_8250_p
*/
static void autoconfig_16550a(struct uart_8250_port *up)
{
@@ -488,7 +497,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
up->port.type = PORT_16550A;
up->capabilities |= UART_CAP_FIFO;
-@@ -1130,6 +1133,11 @@ static void autoconfig_16550a(struct uar
+@@ -1138,6 +1158,11 @@ static void autoconfig_16550a(struct uar
return;
}
@@ -500,7 +509,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Try writing and reading the UART_IER_UUE bit (b6).
* If it works, this is probably one of the Xscale platform's
-@@ -1165,6 +1173,9 @@ static void autoconfig_16550a(struct uar
+@@ -1173,6 +1198,9 @@ static void autoconfig_16550a(struct uar
}
serial_out(up, UART_IER, iersave);
@@ -510,7 +519,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We distinguish between 16550A and U6 16550A by counting
* how many bytes are in the FIFO.
-@@ -1187,8 +1198,10 @@ static void autoconfig(struct uart_8250_
+@@ -1195,8 +1223,10 @@ static void autoconfig(struct uart_8250_
unsigned char status1, scratch, scratch2, scratch3;
unsigned char save_lcr, save_mcr;
struct uart_port *port = &up->port;
@@ -521,7 +530,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!port->iobase && !port->mapbase && !port->membase)
return;
-@@ -1206,6 +1219,11 @@ static void autoconfig(struct uart_8250_
+@@ -1214,6 +1244,11 @@ static void autoconfig(struct uart_8250_
up->bugs = 0;
if (!(port->flags & UPF_BUGGY_UART)) {
@@ -533,7 +542,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Do a simple existence test first; if we fail this,
* there's no point trying anything else.
-@@ -1235,6 +1253,10 @@ static void autoconfig(struct uart_8250_
+@@ -1243,6 +1278,10 @@ static void autoconfig(struct uart_8250_
#endif
scratch3 = serial_in(up, UART_IER) & 0x0f;
serial_out(up, UART_IER, scratch);
@@ -544,19 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (scratch2 != 0 || scratch3 != 0x0F) {
/*
* We failed; there's nothing here
-@@ -1332,10 +1354,7 @@ static void autoconfig(struct uart_8250_
- serial8250_out_MCR(up, save_mcr);
- serial8250_clear_fifos(up);
- serial_in(up, UART_RX);
-- if (up->capabilities & UART_CAP_UUE)
-- serial_out(up, UART_IER, UART_IER_UUE);
-- else
-- serial_out(up, UART_IER, 0);
-+ serial8250_clear_IER(up);
-
- out_unlock:
- spin_unlock_irqrestore(&port->lock, flags);
-@@ -1361,7 +1380,9 @@ static void autoconfig_irq(struct uart_8
+@@ -1366,7 +1405,9 @@ static void autoconfig_irq(struct uart_8
unsigned char save_mcr, save_ier;
unsigned char save_ICP = 0;
unsigned int ICP = 0;
@@ -566,7 +563,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int irq;
if (port->flags & UPF_FOURPORT) {
-@@ -1371,8 +1392,12 @@ static void autoconfig_irq(struct uart_8
+@@ -1376,8 +1417,12 @@ static void autoconfig_irq(struct uart_8
inb_p(ICP);
}
@@ -580,7 +577,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* forget possible initially masked and pending IRQ */
probe_irq_off(probe_irq_on());
-@@ -1404,8 +1429,10 @@ static void autoconfig_irq(struct uart_8
+@@ -1409,8 +1454,10 @@ static void autoconfig_irq(struct uart_8
if (port->flags & UPF_FOURPORT)
outb_p(save_ICP, ICP);
@@ -592,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
port->irq = (irq > 0) ? irq : 0;
}
-@@ -1418,7 +1445,7 @@ static void serial8250_stop_rx(struct ua
+@@ -1423,7 +1470,7 @@ static void serial8250_stop_rx(struct ua
up->ier &= ~(UART_IER_RLSI | UART_IER_RDI);
up->port.read_status_mask &= ~UART_LSR_DR;
@@ -601,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -1448,7 +1475,7 @@ void serial8250_em485_stop_tx(struct uar
+@@ -1453,7 +1500,7 @@ void serial8250_em485_stop_tx(struct uar
serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
@@ -610,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(serial8250_em485_stop_tx);
-@@ -1697,7 +1724,7 @@ static void serial8250_disable_ms(struct
+@@ -1702,7 +1749,7 @@ static void serial8250_disable_ms(struct
mctrl_gpio_disable_ms(up->gpios);
up->ier &= ~UART_IER_MSI;
@@ -619,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void serial8250_enable_ms(struct uart_port *port)
-@@ -1713,7 +1740,7 @@ static void serial8250_enable_ms(struct
+@@ -1718,7 +1765,7 @@ static void serial8250_enable_ms(struct
up->ier |= UART_IER_MSI;
serial8250_rpm_get(up);
@@ -628,23 +625,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2144,14 +2171,7 @@ static void serial8250_put_poll_char(str
- struct uart_8250_port *up = up_to_u8250p(port);
-
- serial8250_rpm_get(up);
-- /*
-- * First save the IER then disable the interrupts
-- */
+@@ -2152,8 +2199,7 @@ static void serial8250_put_poll_char(str
+ /*
+ * First save the IER then disable the interrupts
+ */
- ier = serial_port_in(port, UART_IER);
-- if (up->capabilities & UART_CAP_UUE)
-- serial_port_out(port, UART_IER, UART_IER_UUE);
-- else
-- serial_port_out(port, UART_IER, 0);
+- serial8250_clear_IER(up);
+ ier = serial8250_clear_IER(up);
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
/*
-@@ -2164,7 +2184,7 @@ static void serial8250_put_poll_char(str
+@@ -2166,7 +2212,7 @@ static void serial8250_put_poll_char(str
* and restore the IER
*/
wait_for_xmitr(up, UART_LSR_BOTH_EMPTY);
@@ -653,7 +644,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial8250_rpm_put(up);
}
-@@ -2173,8 +2193,10 @@ static void serial8250_put_poll_char(str
+@@ -2175,8 +2221,10 @@ static void serial8250_put_poll_char(str
int serial8250_do_startup(struct uart_port *port)
{
struct uart_8250_port *up = up_to_u8250p(port);
@@ -664,7 +655,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int retval;
u16 lsr;
-@@ -2195,7 +2217,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2197,7 +2245,7 @@ int serial8250_do_startup(struct uart_po
up->acr = 0;
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
serial_port_out(port, UART_EFR, UART_EFR_ECB);
@@ -673,7 +664,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial_port_out(port, UART_LCR, 0);
serial_icr_write(up, UART_CSR, 0); /* Reset the UART */
serial_port_out(port, UART_LCR, UART_LCR_CONF_MODE_B);
-@@ -2205,7 +2227,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2207,7 +2255,7 @@ int serial8250_do_startup(struct uart_po
if (port->type == PORT_DA830) {
/* Reset the port */
@@ -682,7 +673,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
serial_port_out(port, UART_DA830_PWREMU_MGMT, 0);
mdelay(10);
-@@ -2304,6 +2326,8 @@ int serial8250_do_startup(struct uart_po
+@@ -2306,6 +2354,8 @@ int serial8250_do_startup(struct uart_po
if (retval)
goto out;
@@ -691,7 +682,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
-@@ -2320,6 +2344,9 @@ int serial8250_do_startup(struct uart_po
+@@ -2322,6 +2372,9 @@ int serial8250_do_startup(struct uart_po
*/
spin_lock_irqsave(&port->lock, flags);
@@ -701,7 +692,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
wait_for_xmitr(up, UART_LSR_THRE);
serial_port_out_sync(port, UART_IER, UART_IER_THRI);
udelay(1); /* allow THRE to set */
-@@ -2330,6 +2357,9 @@ int serial8250_do_startup(struct uart_po
+@@ -2332,6 +2385,9 @@ int serial8250_do_startup(struct uart_po
iir = serial_port_in(port, UART_IIR);
serial_port_out(port, UART_IER, 0);
@@ -711,7 +702,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
if (port->irqflags & IRQF_SHARED)
-@@ -2384,10 +2414,14 @@ int serial8250_do_startup(struct uart_po
+@@ -2386,10 +2442,14 @@ int serial8250_do_startup(struct uart_po
* Do a quick test to see if we receive an interrupt when we enable
* the TX irq.
*/
@@ -726,7 +717,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (lsr & UART_LSR_TEMT && iir & UART_IIR_NO_INT) {
if (!(up->bugs & UART_BUG_TXEN)) {
-@@ -2419,7 +2453,7 @@ int serial8250_do_startup(struct uart_po
+@@ -2421,7 +2481,7 @@ int serial8250_do_startup(struct uart_po
if (up->dma) {
const char *msg = NULL;
@@ -735,7 +726,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
msg = "forbid DMA for kernel console";
else if (serial8250_request_dma(up))
msg = "failed to request DMA";
-@@ -2470,7 +2504,7 @@ void serial8250_do_shutdown(struct uart_
+@@ -2472,7 +2532,7 @@ void serial8250_do_shutdown(struct uart_
*/
spin_lock_irqsave(&port->lock, flags);
up->ier = 0;
@@ -744,7 +735,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&port->lock, flags);
synchronize_irq(port->irq);
-@@ -2836,7 +2870,7 @@ serial8250_do_set_termios(struct uart_po
+@@ -2838,7 +2898,7 @@ serial8250_do_set_termios(struct uart_po
if (up->capabilities & UART_CAP_RTOIE)
up->ier |= UART_IER_RTOIE;
@@ -753,7 +744,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (up->capabilities & UART_CAP_EFR) {
unsigned char efr = 0;
-@@ -3301,7 +3335,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
+@@ -3303,7 +3363,7 @@ EXPORT_SYMBOL_GPL(serial8250_set_default
#ifdef CONFIG_SERIAL_8250_CONSOLE
@@ -762,7 +753,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct uart_8250_port *up = up_to_u8250p(port);
-@@ -3309,6 +3343,18 @@ static void serial8250_console_putchar(s
+@@ -3311,6 +3371,18 @@ static void serial8250_console_putchar(s
serial_port_out(port, UART_TX, ch);
}
@@ -781,7 +772,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Restore serial console when h/w power-off detected
*/
-@@ -3335,6 +3381,32 @@ static void serial8250_console_restore(s
+@@ -3337,6 +3409,32 @@ static void serial8250_console_restore(s
serial8250_out_MCR(up, up->mcr | UART_MCR_DTR | UART_MCR_RTS);
}
@@ -814,7 +805,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Print a string to the serial port using the device FIFO
*
-@@ -3380,24 +3452,12 @@ void serial8250_console_write(struct uar
+@@ -3382,20 +3480,15 @@ void serial8250_console_write(struct uar
struct uart_port *port = &up->port;
unsigned long flags;
unsigned int ier, use_fifo;
@@ -826,22 +817,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- locked = spin_trylock_irqsave(&port->lock, flags);
- else
- spin_lock_irqsave(&port->lock, flags);
--
-- /*
-- * First save the IER then disable the interrupts
-- */
-- ier = serial_port_in(port, UART_IER);
+ spin_lock_irqsave(&port->lock, flags);
-- if (up->capabilities & UART_CAP_UUE)
-- serial_port_out(port, UART_IER, UART_IER_UUE);
-- else
-- serial_port_out(port, UART_IER, 0);
+ /*
+ * First save the IER then disable the interrupts
+ */
+- ier = serial_port_in(port, UART_IER);
+- serial8250_clear_IER(up);
+ ier = serial8250_clear_IER(up);
/* check scratch reg to see if port powered off during system sleep */
if (up->canary && (up->canary != serial_port_in(port, UART_SCR))) {
-@@ -3431,10 +3491,12 @@ void serial8250_console_write(struct uar
+@@ -3429,10 +3522,12 @@ void serial8250_console_write(struct uar
*/
!(up->port.flags & UPF_CONS_FLOW);
@@ -854,7 +841,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Finally, wait for transmitter to become empty
-@@ -3447,8 +3509,7 @@ void serial8250_console_write(struct uar
+@@ -3445,8 +3540,7 @@ void serial8250_console_write(struct uar
if (em485->tx_stopped)
up->rs485_stop_tx(up);
}
@@ -864,7 +851,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The receive handling will happen properly because the
-@@ -3460,8 +3521,7 @@ void serial8250_console_write(struct uar
+@@ -3458,8 +3552,7 @@ void serial8250_console_write(struct uar
if (up->msr_saved_flags)
serial8250_modem_status(up);
@@ -874,7 +861,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned int probe_baud(struct uart_port *port)
-@@ -3481,6 +3541,7 @@ static unsigned int probe_baud(struct ua
+@@ -3479,6 +3572,7 @@ static unsigned int probe_baud(struct ua
int serial8250_console_setup(struct uart_port *port, char *options, bool probe)
{
@@ -882,7 +869,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int baud = 9600;
int bits = 8;
int parity = 'n';
-@@ -3490,6 +3551,8 @@ int serial8250_console_setup(struct uart
+@@ -3488,6 +3582,8 @@ int serial8250_console_setup(struct uart
if (!port->iobase && !port->membase)
return -ENODEV;
diff --git a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
index 645def427f10..11435485d460 100644
--- a/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
+++ b/patches/0018-printk-avoid-preempt_disable-for-PREEMPT_RT.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1984,6 +1984,7 @@ static int console_lock_spinning_disable
+@@ -1981,6 +1981,7 @@ static int console_lock_spinning_disable
return 1;
}
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_trylock_spinning - try to get console_lock by busy waiting
*
-@@ -2057,6 +2058,7 @@ static int console_trylock_spinning(void
+@@ -2054,6 +2055,7 @@ static int console_trylock_spinning(void
return 1;
}
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Call the specified console driver, asking it to write out the specified
-@@ -2396,6 +2398,18 @@ asmlinkage int vprintk_emit(int facility
+@@ -2393,6 +2395,18 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched && allow_direct_printing()) {
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The caller may be holding system-critical or
* timing-sensitive locks. Disable preemption during direct
-@@ -2413,6 +2427,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2410,6 +2424,7 @@ asmlinkage int vprintk_emit(int facility
if (console_trylock_spinning())
console_unlock();
preempt_enable();
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
wake_up_klogd();
-@@ -3103,8 +3118,12 @@ static bool console_emit_next_record_tra
+@@ -3102,8 +3117,12 @@ static bool console_emit_next_record_tra
/*
* Handovers are only supported if threaded printers are atomically
* blocked. The context taking over the console_lock may be atomic.
diff --git a/patches/ARM__Allow_to_enable_RT.patch b/patches/ARM__Allow_to_enable_RT.patch
index baf7f5516da7..faf7c48b9e33 100644
--- a/patches/ARM__Allow_to_enable_RT.patch
+++ b/patches/ARM__Allow_to_enable_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -33,6 +33,7 @@ config ARM
+@@ -32,6 +32,7 @@ config ARM
select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE
@@ -24,11 +24,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
-@@ -129,6 +130,7 @@ config ARM
- select OLD_SIGSUSPEND3
- select PCI_SYSCALL if PCI
- select PERF_USE_VMALLOC
+@@ -114,6 +115,7 @@ config ARM
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_POSIX_CPU_TIMERS_TASK_WORK if !KVM
- select RTC_LIB
- select SYS_SUPPORTS_APM_EMULATION
- select THREAD_INFO_IN_TASK
+ select HAVE_PREEMPT_LAZY
+ select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
+ select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 7bc90935ffb7..0e6dbb0e3c10 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt1
diff --git a/patches/POWERPC__Allow_to_enable_RT.patch b/patches/POWERPC__Allow_to_enable_RT.patch
index 70b2e2d5a9fb..37b00f7c0426 100644
--- a/patches/POWERPC__Allow_to_enable_RT.patch
+++ b/patches/POWERPC__Allow_to_enable_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -149,6 +149,7 @@ config PPC
+@@ -150,6 +150,7 @@ config PPC
select ARCH_STACKWALK
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC_BOOK3S || PPC_8xx || 40x
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
-@@ -244,6 +245,7 @@ config PPC
+@@ -245,6 +246,7 @@ config PPC
select HAVE_PREEMPT_LAZY
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
diff --git a/patches/arm__Add_support_for_lazy_preemption.patch b/patches/arm__Add_support_for_lazy_preemption.patch
index a0ed87066591..d1e813f40e5b 100644
--- a/patches/arm__Add_support_for_lazy_preemption.patch
+++ b/patches/arm__Add_support_for_lazy_preemption.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -113,6 +113,7 @@ config ARM
+@@ -114,6 +114,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
index 76da29d9e612..3aa350c273e2 100644
--- a/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
+++ b/patches/drivers_block_zram__Replace_bit_spinlocks_with_rtmutex_for_-rt.patch
@@ -21,7 +21,7 @@ Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
---
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -60,6 +60,40 @@ static void zram_free_page(struct zram *
+@@ -57,6 +57,40 @@ static void zram_free_page(struct zram *
static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
u32 index, int offset, struct bio *bio);
@@ -62,7 +62,7 @@ Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
static int zram_slot_trylock(struct zram *zram, u32 index)
{
-@@ -75,6 +109,7 @@ static void zram_slot_unlock(struct zram
+@@ -72,6 +106,7 @@ static void zram_slot_unlock(struct zram
{
bit_spin_unlock(ZRAM_LOCK, &zram->table[index].flags);
}
@@ -70,7 +70,7 @@ Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
static inline bool init_done(struct zram *zram)
{
-@@ -1198,6 +1233,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1187,6 +1222,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
@@ -80,7 +80,7 @@ Link: https://lkml.kernel.org/r/YqIbMuHCPiQk+Ac2@linutronix.de
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -63,6 +63,9 @@ struct zram_table_entry {
+@@ -62,6 +62,9 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long flags;
diff --git a/patches/genirq-Provide-generic_handle_domain_irq_safe.patch b/patches/genirq-Provide-generic_handle_domain_irq_safe.patch
deleted file mode 100644
index ddf71f0ffa33..000000000000
--- a/patches/genirq-Provide-generic_handle_domain_irq_safe.patch
+++ /dev/null
@@ -1,152 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 9 May 2022 16:04:08 +0200
-Subject: [PATCH] genirq: Provide generic_handle_domain_irq_safe().
-
-Provide generic_handle_domain_irq_safe() which can used from any context.
-This similar to commit
- 509853f9e1e7b ("genirq: Provide generic_handle_irq_safe()")
-
-but this time for the irq-domains interface. It has been reported for
-the amd-pinctrl driver via bugzilla
- https://bugzilla.kernel.org/show_bug.cgi?id=215954
-
-I looked around and added a few users so it is not just one user API :)
-Instead of generic_handle_irq(irq_find_mapping)) one can use
-generic_handle_domain_irq().
-
-The problem with generic_handle_domain_irq() is that with `threadirqs'
-it will trigger "WARN_ON_ONCE(!in_hardirq())". That interrupt handler
-can't be marked non-threaded because it is a shared handler (it is
-marked as such and I can't tell the interrupt can be really shared on
-the system).
-Ignoring the just mentioned warning, on PREEMPT_RT the threaded handler
-is invoked with enabled interrupts leading other problems.
-
-Do we do this?
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/YnkfWFzvusFFktSt@linutronix.de
----
- drivers/bcma/driver_gpio.c | 2 +-
- drivers/gpio/gpio-mlxbf2.c | 6 ++----
- drivers/pinctrl/pinctrl-amd.c | 2 +-
- drivers/platform/x86/intel/int0002_vgpio.c | 3 +--
- drivers/ssb/driver_gpio.c | 6 ++++--
- include/linux/irqdesc.h | 1 +
- kernel/irq/irqdesc.c | 24 ++++++++++++++++++++++++
- 7 files changed, 34 insertions(+), 10 deletions(-)
-
---- a/drivers/bcma/driver_gpio.c
-+++ b/drivers/bcma/driver_gpio.c
-@@ -115,7 +115,7 @@ static irqreturn_t bcma_gpio_irq_handler
- return IRQ_NONE;
-
- for_each_set_bit(gpio, &irqs, gc->ngpio)
-- generic_handle_irq(irq_find_mapping(gc->irq.domain, gpio));
-+ generic_handle_domain_irq_safe(gc->irq.domain, gpio);
- bcma_chipco_gpio_polarity(cc, irqs, val & irqs);
-
- return IRQ_HANDLED;
---- a/drivers/gpio/gpio-mlxbf2.c
-+++ b/drivers/gpio/gpio-mlxbf2.c
-@@ -273,10 +273,8 @@ static irqreturn_t mlxbf2_gpio_irq_handl
- pending = readl(gs->gpio_io + YU_GPIO_CAUSE_OR_CAUSE_EVTEN0);
- writel(pending, gs->gpio_io + YU_GPIO_CAUSE_OR_CLRCAUSE);
-
-- for_each_set_bit(level, &pending, gc->ngpio) {
-- int gpio_irq = irq_find_mapping(gc->irq.domain, level);
-- generic_handle_irq(gpio_irq);
-- }
-+ for_each_set_bit(level, &pending, gc->ngpio)
-+ generic_handle_domain_irq_safe(gc->irq.domain, level);
-
- return IRQ_RETVAL(pending);
- }
---- a/drivers/pinctrl/pinctrl-amd.c
-+++ b/drivers/pinctrl/pinctrl-amd.c
-@@ -639,7 +639,7 @@ static bool do_amd_gpio_irq_handler(int
- if (!(regval & PIN_IRQ_PENDING) ||
- !(regval & BIT(INTERRUPT_MASK_OFF)))
- continue;
-- generic_handle_domain_irq(gc->irq.domain, irqnr + i);
-+ generic_handle_domain_irq_safe(gc->irq.domain, irqnr + i);
-
- /* Clear interrupt.
- * We must read the pin register again, in case the
---- a/drivers/platform/x86/intel/int0002_vgpio.c
-+++ b/drivers/platform/x86/intel/int0002_vgpio.c
-@@ -125,8 +125,7 @@ static irqreturn_t int0002_irq(int irq,
- if (!(gpe_sts_reg & GPE0A_PME_B0_STS_BIT))
- return IRQ_NONE;
-
-- generic_handle_irq(irq_find_mapping(chip->irq.domain,
-- GPE0A_PME_B0_VIRT_GPIO_PIN));
-+ generic_handle_domain_irq_safe(chip->irq.domain, GPE0A_PME_B0_VIRT_GPIO_PIN);
-
- pm_wakeup_hard_event(chip->parent);
-
---- a/drivers/ssb/driver_gpio.c
-+++ b/drivers/ssb/driver_gpio.c
-@@ -132,7 +132,8 @@ static irqreturn_t ssb_gpio_irq_chipco_h
- return IRQ_NONE;
-
- for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
-- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
-+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
-+
- ssb_chipco_gpio_polarity(chipco, irqs, val & irqs);
-
- return IRQ_HANDLED;
-@@ -330,7 +331,8 @@ static irqreturn_t ssb_gpio_irq_extif_ha
- return IRQ_NONE;
-
- for_each_set_bit(gpio, &irqs, bus->gpio.ngpio)
-- generic_handle_irq(ssb_gpio_to_irq(&bus->gpio, gpio));
-+ generic_handle_domain_irq_safe(bus->irq_domain, gpio);
-+
- ssb_extif_gpio_polarity(extif, irqs, val & irqs);
-
- return IRQ_HANDLED;
---- a/include/linux/irqdesc.h
-+++ b/include/linux/irqdesc.h
-@@ -169,6 +169,7 @@ int generic_handle_irq_safe(unsigned int
- * conversion failed.
- */
- int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq);
-+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq);
- int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq);
- #endif
-
---- a/kernel/irq/irqdesc.c
-+++ b/kernel/irq/irqdesc.c
-@@ -705,6 +705,30 @@ int generic_handle_domain_irq(struct irq
- }
- EXPORT_SYMBOL_GPL(generic_handle_domain_irq);
-
-+ /**
-+ * generic_handle_irq_safe - Invoke the handler for a HW irq belonging
-+ * to a domain from any context.
-+ * @domain: The domain where to perform the lookup
-+ * @hwirq: The HW irq number to convert to a logical one
-+ *
-+ * Returns: 0 on success, a negative value on error.
-+ *
-+ * This function can be called from any context (IRQ or process context). It
-+ * will report an error if not invoked from IRQ context and the irq has been
-+ * marked to enforce IRQ-context only.
-+ */
-+int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq)
-+{
-+ unsigned long flags;
-+ int ret;
-+
-+ local_irq_save(flags);
-+ ret = handle_irq_desc(irq_resolve_mapping(domain, hwirq));
-+ local_irq_restore(flags);
-+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe);
-+
- /**
- * generic_handle_domain_nmi - Invoke the handler for a HW nmi belonging
- * to a domain.
diff --git a/patches/locking-Detect-includes-rwlock.h-outside-of-spinlock.patch b/patches/locking-Detect-includes-rwlock.h-outside-of-spinlock.patch
deleted file mode 100644
index 74edca18540a..000000000000
--- a/patches/locking-Detect-includes-rwlock.h-outside-of-spinlock.patch
+++ /dev/null
@@ -1,104 +0,0 @@
-From: Michael S. Tsirkin <mst@redhat.com>
-Date: Thu, 25 Aug 2022 17:30:49 +0200
-Subject: [PATCH] locking: Detect includes rwlock.h outside of spinlock.h
-
-The check for __LINUX_SPINLOCK_H within rwlock.h (and other files)
-detects the direct include of the header file if it is at the very
-beginning of the include section.
-If it is listed later then chances are high that spinlock.h was already
-included (including rwlock.h) and the additional listing of rwlock.h
-will not cause any failure.
-
-On PREEMPT_RT this additional rwlock.h will lead to compile failures
-since it uses a different rwlock implementation.
-
-Add __LINUX_INSIDE_SPINLOCK_H to spinlock.h and check for this instead
-of __LINUX_SPINLOCK_H to detect wrong includes. This will help detect
-direct includes of rwlock.h with without PREEMPT_RT enabled.
-
-[ bigeasy: add remaining __LINUX_SPINLOCK_H user and rewrite
- commit description. ]
-
-Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/YweemHxJx7O8rjBx@linutronix.de
----
- include/linux/rwlock.h | 2 +-
- include/linux/spinlock.h | 2 ++
- include/linux/spinlock_api_smp.h | 2 +-
- include/linux/spinlock_api_up.h | 2 +-
- include/linux/spinlock_rt.h | 2 +-
- include/linux/spinlock_up.h | 2 +-
- 6 files changed, 7 insertions(+), 5 deletions(-)
-
---- a/include/linux/rwlock.h
-+++ b/include/linux/rwlock.h
-@@ -1,7 +1,7 @@
- #ifndef __LINUX_RWLOCK_H
- #define __LINUX_RWLOCK_H
-
--#ifndef __LINUX_SPINLOCK_H
-+#ifndef __LINUX_INSIDE_SPINLOCK_H
- # error "please don't include this file directly"
- #endif
-
---- a/include/linux/spinlock.h
-+++ b/include/linux/spinlock.h
-@@ -1,6 +1,7 @@
- /* SPDX-License-Identifier: GPL-2.0 */
- #ifndef __LINUX_SPINLOCK_H
- #define __LINUX_SPINLOCK_H
-+#define __LINUX_INSIDE_SPINLOCK_H
-
- /*
- * include/linux/spinlock.h - generic spinlock/rwlock declarations
-@@ -492,4 +493,5 @@ int __alloc_bucket_spinlocks(spinlock_t
-
- void free_bucket_spinlocks(spinlock_t *locks);
-
-+#undef __LINUX_INSIDE_SPINLOCK_H
- #endif /* __LINUX_SPINLOCK_H */
---- a/include/linux/spinlock_api_smp.h
-+++ b/include/linux/spinlock_api_smp.h
-@@ -1,7 +1,7 @@
- #ifndef __LINUX_SPINLOCK_API_SMP_H
- #define __LINUX_SPINLOCK_API_SMP_H
-
--#ifndef __LINUX_SPINLOCK_H
-+#ifndef __LINUX_INSIDE_SPINLOCK_H
- # error "please don't include this file directly"
- #endif
-
---- a/include/linux/spinlock_api_up.h
-+++ b/include/linux/spinlock_api_up.h
-@@ -1,7 +1,7 @@
- #ifndef __LINUX_SPINLOCK_API_UP_H
- #define __LINUX_SPINLOCK_API_UP_H
-
--#ifndef __LINUX_SPINLOCK_H
-+#ifndef __LINUX_INSIDE_SPINLOCK_H
- # error "please don't include this file directly"
- #endif
-
---- a/include/linux/spinlock_rt.h
-+++ b/include/linux/spinlock_rt.h
-@@ -2,7 +2,7 @@
- #ifndef __LINUX_SPINLOCK_RT_H
- #define __LINUX_SPINLOCK_RT_H
-
--#ifndef __LINUX_SPINLOCK_H
-+#ifndef __LINUX_INSIDE_SPINLOCK_H
- #error Do not include directly. Use spinlock.h
- #endif
-
---- a/include/linux/spinlock_up.h
-+++ b/include/linux/spinlock_up.h
-@@ -1,7 +1,7 @@
- #ifndef __LINUX_SPINLOCK_UP_H
- #define __LINUX_SPINLOCK_UP_H
-
--#ifndef __LINUX_SPINLOCK_H
-+#ifndef __LINUX_INSIDE_SPINLOCK_H
- # error "please don't include this file directly"
- #endif
-
diff --git a/patches/mm-multi-gen-LRU-Move-lru_gen_add_mm-out-of-IRQ-off-.patch b/patches/mm-multi-gen-LRU-Move-lru_gen_add_mm-out-of-IRQ-off-.patch
new file mode 100644
index 000000000000..9dcf84be02f0
--- /dev/null
+++ b/patches/mm-multi-gen-LRU-Move-lru_gen_add_mm-out-of-IRQ-off-.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 26 Oct 2022 10:33:49 +0200
+Subject: [PATCH] mm: multi-gen LRU: Move lru_gen_add_mm() out of IRQ-off
+ region.
+
+lru_gen_add_mm() has been added within an IRQ-off region in the commit
+mentioned below. The other invocations of lru_gen_add_mm() are not within
+an IRQ-off region.
+The invocation within IRQ-off region is problematic on PREEMPT_RT
+because the function is using a spin_lock_t which must not be used
+within IRQ-disabled regions.
+
+The other invocations of lru_gen_add_mm() occur while task_struct::alloc_lock
+is acquired.
+Move lru_gen_add_mm() after interrupts are enabled and before
+task_unlock().
+
+Fixes: bd74fdaea1460 ("mm: multi-gen LRU: support page table walks")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/20221026134830.711887-1-bigeasy@linutronix.de
+---
+ fs/exec.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1012,7 +1012,6 @@ static int exec_mmap(struct mm_struct *m
+ active_mm = tsk->active_mm;
+ tsk->active_mm = mm;
+ tsk->mm = mm;
+- lru_gen_add_mm(mm);
+ /*
+ * This prevents preemption while active_mm is being loaded and
+ * it and mm are being updated, which could cause problems for
+@@ -1025,6 +1024,7 @@ static int exec_mmap(struct mm_struct *m
+ activate_mm(active_mm, mm);
+ if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
+ local_irq_enable();
++ lru_gen_add_mm(mm);
+ task_unlock(tsk);
+ lru_gen_use_mm(mm);
+ if (old_mm) {
diff --git a/patches/net-Avoid-the-IPI-to-free-the.patch b/patches/net-Avoid-the-IPI-to-free-the.patch
index 02245bc3bb41..4109b430ff4c 100644
--- a/patches/net-Avoid-the-IPI-to-free-the.patch
+++ b/patches/net-Avoid-the-IPI-to-free-the.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Check if this softnet_data structure is another cpu one
* If yes, queue it to our IPI list and return 1
-@@ -6661,6 +6652,30 @@ static void skb_defer_free_flush(struct
+@@ -6648,6 +6639,30 @@ static void skb_defer_free_flush(struct
}
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static __latent_entropy void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
-@@ -11412,7 +11427,11 @@ static int __init net_dev_init(void)
+@@ -11397,7 +11412,11 @@ static int __init net_dev_init(void)
INIT_CSD(&sd->csd, rps_trigger_softirq, sd);
sd->cpu = i;
#endif
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
init_gro_hash(&sd->backlog);
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -6555,6 +6555,11 @@ nodefer: __kfree_skb(skb);
+@@ -6658,6 +6658,11 @@ nodefer: __kfree_skb(skb);
/* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU
* if we are unlucky enough (this seems very unlikely).
*/
diff --git a/patches/powerpc__Add_support_for_lazy_preemption.patch b/patches/powerpc__Add_support_for_lazy_preemption.patch
index f0b172fa6a79..d68020e4f915 100644
--- a/patches/powerpc__Add_support_for_lazy_preemption.patch
+++ b/patches/powerpc__Add_support_for_lazy_preemption.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -241,6 +241,7 @@ config PPC
+@@ -242,6 +242,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/powerpc__traps__Use_PREEMPT_RT.patch b/patches/powerpc__traps__Use_PREEMPT_RT.patch
index 8864dc30e92a..0a7493943546 100644
--- a/patches/powerpc__traps__Use_PREEMPT_RT.patch
+++ b/patches/powerpc__traps__Use_PREEMPT_RT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
-@@ -260,12 +260,17 @@ static char *get_mmu_str(void)
+@@ -261,12 +261,17 @@ static char *get_mmu_str(void)
static int __die(const char *str, struct pt_regs *regs, long err)
{
diff --git a/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch b/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
index 943ebb6f1128..0e729e55dc50 100644
--- a/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
+++ b/patches/powerpc_kvm__Disable_in-kernel_MPIC_emulation_for_PREEMPT_RT.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -221,6 +221,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
- depends on KVM && E500
+ depends on KVM && PPC_E500
+ depends on !PREEMPT_RT
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQFD
diff --git a/patches/printk-Bring-back-the-RT-bits.patch b/patches/printk-Bring-back-the-RT-bits.patch
index 021096dd417b..e0d2d9c32d91 100644
--- a/patches/printk-Bring-back-the-RT-bits.patch
+++ b/patches/printk-Bring-back-the-RT-bits.patch
@@ -16,17 +16,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/tty/sysrq.c | 2
include/linux/console.h | 17 +
- include/linux/printk.h | 16 +
+ include/linux/printk.h | 15 +
kernel/hung_task.c | 11
kernel/panic.c | 6
kernel/printk/internal.h | 2
- kernel/printk/printk.c | 593 +++++++++++++++++++++++++++++++++++++++-----
+ kernel/printk/printk.c | 592 +++++++++++++++++++++++++++++++++++++++-----
kernel/printk/printk_safe.c | 32 ++
kernel/rcu/tree_stall.h | 2
kernel/reboot.c | 16 +
kernel/watchdog.c | 4
kernel/watchdog_hld.c | 4
- 12 files changed, 640 insertions(+), 65 deletions(-)
+ 12 files changed, 638 insertions(+), 65 deletions(-)
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -81,19 +81,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -169,7 +169,11 @@ extern void __printk_safe_exit(void);
+@@ -168,6 +168,9 @@ extern void __printk_safe_exit(void);
+ */
#define printk_deferred_enter __printk_safe_enter
#define printk_deferred_exit __printk_safe_exit
-
+extern void printk_prefer_direct_enter(void);
+extern void printk_prefer_direct_exit(void);
-+
- extern bool pr_flush(int timeout_ms, bool reset_on_progress);
+extern void try_block_console_kthreads(int timeout_ms);
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
-@@ -221,11 +225,23 @@ static inline void printk_deferred_exit(
+@@ -219,6 +222,18 @@ static inline void printk_deferred_exit(
{
}
@@ -105,11 +103,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+}
+
- static inline bool pr_flush(int timeout_ms, bool reset_on_progress)
- {
- return true;
- }
-
+static inline void try_block_console_kthreads(int timeout_ms)
+{
+}
@@ -137,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
touch_nmi_watchdog();
-@@ -204,12 +208,17 @@ static void check_hung_uninterruptible_t
+@@ -212,12 +216,17 @@ static void check_hung_uninterruptible_t
}
unlock:
rcu_read_unlock();
@@ -174,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
crash_smp_send_stop();
}
-@@ -604,6 +606,8 @@ void __warn(const char *file, int line,
+@@ -601,6 +603,8 @@ void __warn(const char *file, int line,
{
disable_trace_on_warning();
@@ -183,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (file)
pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS\n",
raw_smp_processor_id(), current->pid, file, line,
-@@ -633,6 +637,8 @@ void __warn(const char *file, int line,
+@@ -630,6 +634,8 @@ void __warn(const char *file, int line,
/* Just a warning, don't kill lockdep. */
add_taint(taint, LOCKDEP_STILL_OK);
@@ -205,8 +198,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
const struct dev_printk_info *dev_info,
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -224,6 +224,36 @@ int devkmsg_sysctl_set_loglvl(struct ctl
- static int nr_ext_console_drivers;
+@@ -221,6 +221,36 @@ int devkmsg_sysctl_set_loglvl(struct ctl
+ #endif /* CONFIG_PRINTK && CONFIG_SYSCTL */
/*
+ * Used to synchronize printing kthreads against direct printing via
@@ -242,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Helper macros to handle lockdep when locking/unlocking console_sem. We use
* macros instead of functions so that _RET_IP_ contains useful information.
*/
-@@ -271,14 +301,49 @@ static bool panic_in_progress(void)
+@@ -268,14 +298,49 @@ static bool panic_in_progress(void)
}
/*
@@ -299,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Array of consoles built from command line options (console=)
-@@ -361,7 +426,75 @@ static int console_msg_format = MSG_FORM
+@@ -358,7 +423,75 @@ static int console_msg_format = MSG_FORM
/* syslog_lock protects syslog_* variables and write access to clear_seq. */
static DEFINE_MUTEX(syslog_lock);
@@ -375,7 +368,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -2252,10 +2385,10 @@ asmlinkage int vprintk_emit(int facility
+@@ -2249,10 +2382,10 @@ asmlinkage int vprintk_emit(int facility
printed_len = vprintk_store(facility, level, dev_info, fmt, args);
/* If called from the scheduler, we can not call up(). */
@@ -388,8 +381,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* printing of all remaining records to all consoles so that
* this context can return as soon as possible. Hopefully
* another printk() caller will take over the printing.
-@@ -2298,6 +2431,8 @@ EXPORT_SYMBOL(_printk);
-
+@@ -2296,6 +2429,8 @@ EXPORT_SYMBOL(_printk);
+ static bool pr_flush(int timeout_ms, bool reset_on_progress);
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress);
+static void printk_start_kthread(struct console *con);
@@ -397,16 +390,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* CONFIG_PRINTK */
#define CONSOLE_LOG_MAX 0
-@@ -2331,6 +2466,8 @@ static void call_console_driver(struct c
- }
+@@ -2330,6 +2465,8 @@ static void call_console_driver(struct c
static bool suppress_message_printing(int level) { return false; }
+ static bool pr_flush(int timeout_ms, bool reset_on_progress) { return true; }
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress) { return true; }
+static void printk_start_kthread(struct console *con) { }
+static bool allow_direct_printing(void) { return true; }
#endif /* CONFIG_PRINTK */
-@@ -2549,6 +2686,14 @@ static int console_cpu_notify(unsigned i
+@@ -2548,6 +2685,14 @@ static int console_cpu_notify(unsigned i
/* If trylock fails, someone else is doing the printing */
if (console_trylock())
console_unlock();
@@ -421,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return 0;
}
-@@ -2568,7 +2713,7 @@ void console_lock(void)
+@@ -2567,7 +2712,7 @@ void console_lock(void)
down_console_sem();
if (console_suspended)
return;
@@ -430,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_may_schedule = 1;
}
EXPORT_SYMBOL(console_lock);
-@@ -2589,15 +2734,30 @@ int console_trylock(void)
+@@ -2588,15 +2733,30 @@ int console_trylock(void)
up_console_sem();
return 0;
}
@@ -463,7 +456,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(is_console_locked);
-@@ -2620,18 +2780,9 @@ static bool abandon_console_lock_in_pani
+@@ -2619,18 +2779,9 @@ static bool abandon_console_lock_in_pani
return atomic_read(&panic_cpu) != raw_smp_processor_id();
}
@@ -484,7 +477,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
/*
-@@ -2640,15 +2791,43 @@ static inline bool console_is_usable(str
+@@ -2639,15 +2790,43 @@ static inline bool console_is_usable(str
* cope (CON_ANYTIME) don't call them until this CPU is officially up.
*/
if (!cpu_online(raw_smp_processor_id()) &&
@@ -530,7 +523,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
up_console_sem();
}
-@@ -2666,17 +2845,19 @@ static void __console_unlock(void)
+@@ -2665,17 +2844,19 @@ static void __console_unlock(void)
*
* @handover will be set to true if a printk waiter has taken over the
* console_lock, in which case the caller is no longer holding the
@@ -555,7 +548,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct printk_info info;
struct printk_record r;
unsigned long flags;
-@@ -2685,7 +2866,8 @@ static bool console_emit_next_record(str
+@@ -2684,7 +2865,8 @@ static bool console_emit_next_record(str
prb_rec_init_rd(&r, &info, text, CONSOLE_LOG_MAX);
@@ -565,7 +558,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!prb_read_valid(prb, con->seq, &r))
return false;
-@@ -2693,7 +2875,8 @@ static bool console_emit_next_record(str
+@@ -2692,7 +2874,8 @@ static bool console_emit_next_record(str
if (con->seq != r.info->seq) {
con->dropped += r.info->seq - con->seq;
con->seq = r.info->seq;
@@ -575,7 +568,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
suppress_panic_printk = 1;
pr_warn_once("Too many dropped messages. Suppress messages on non-panic CPUs to prevent livelock.\n");
}
-@@ -2715,32 +2898,62 @@ static bool console_emit_next_record(str
+@@ -2714,32 +2897,62 @@ static bool console_emit_next_record(str
len = record_print_text(&r, console_msg_format & MSG_FORMAT_SYSLOG, printk_time);
}
@@ -654,7 +647,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Print out all remaining records to all consoles.
*
* @do_cond_resched is set by the caller. It can be true only in schedulable
-@@ -2758,8 +2971,8 @@ static bool console_emit_next_record(str
+@@ -2757,8 +2970,8 @@ static bool console_emit_next_record(str
* were flushed to all usable consoles. A returned false informs the caller
* that everything was not flushed (either there were no usable consoles or
* another context has taken over printing or it is a panic situation and this
@@ -665,7 +658,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Requires the console_lock.
*/
-@@ -2776,6 +2989,10 @@ static bool console_flush_all(bool do_co
+@@ -2775,6 +2988,10 @@ static bool console_flush_all(bool do_co
*handover = false;
do {
@@ -676,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
any_progress = false;
for_each_console(con) {
-@@ -2787,13 +3004,11 @@ static bool console_flush_all(bool do_co
+@@ -2786,13 +3003,11 @@ static bool console_flush_all(bool do_co
if (con->flags & CON_EXTENDED) {
/* Extended consoles do not print "dropped messages". */
@@ -694,7 +687,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (*handover)
return false;
-@@ -2908,10 +3123,13 @@ void console_unblank(void)
+@@ -2907,10 +3122,13 @@ void console_unblank(void)
if (oops_in_progress) {
if (down_trylock_console_sem() != 0)
return;
@@ -709,8 +702,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_may_schedule = 0;
for_each_console(c)
if ((c->flags & CON_ENABLED) && c->unblank)
-@@ -3190,6 +3408,10 @@ void register_console(struct console *ne
- nr_ext_console_drivers++;
+@@ -3186,6 +3404,10 @@ void register_console(struct console *ne
+ }
newcon->dropped = 0;
+ newcon->thread = NULL;
@@ -720,7 +713,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (newcon->flags & CON_PRINTBUFFER) {
/* Get a consistent copy of @syslog_seq. */
mutex_lock(&syslog_lock);
-@@ -3199,6 +3421,10 @@ void register_console(struct console *ne
+@@ -3195,6 +3417,10 @@ void register_console(struct console *ne
/* Begin with next message. */
newcon->seq = prb_next_seq(prb);
}
@@ -731,7 +724,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
console_sysfs_notify();
-@@ -3225,6 +3451,7 @@ EXPORT_SYMBOL(register_console);
+@@ -3218,6 +3444,7 @@ EXPORT_SYMBOL(register_console);
int unregister_console(struct console *console)
{
@@ -739,7 +732,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct console *con;
int res;
-@@ -3265,7 +3492,20 @@ int unregister_console(struct console *c
+@@ -3255,7 +3482,20 @@ int unregister_console(struct console *c
console_drivers->flags |= CON_CONSDEV;
console->flags &= ~CON_ENABLED;
@@ -760,7 +753,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_sysfs_notify();
if (console->exit)
-@@ -3361,6 +3601,20 @@ static int __init printk_late_init(void)
+@@ -3351,6 +3591,20 @@ static int __init printk_late_init(void)
}
late_initcall(printk_late_init);
@@ -781,9 +774,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#if defined CONFIG_PRINTK
/* If @con is specified, only wait for that console. Otherwise wait for all. */
static bool __pr_flush(struct console *con, int timeout_ms, bool reset_on_progress)
-@@ -3444,11 +3698,209 @@ bool pr_flush(int timeout_ms, bool reset
+@@ -3433,11 +3687,208 @@ static bool pr_flush(int timeout_ms, boo
+ return __pr_flush(NULL, timeout_ms, reset_on_progress);
}
- EXPORT_SYMBOL(pr_flush);
+static void __printk_fallback_preferred_direct(void)
+{
@@ -882,7 +875,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ }
+
+ con_printk(KERN_INFO, con, "printing thread started\n");
-+
+ for (;;) {
+ /*
+ * Guarantee this task is visible on the waitqueue before
@@ -895,7 +887,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * This pairs with __wake_up_klogd:A.
+ */
+ error = wait_event_interruptible(log_wait,
-+ printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
++ printer_should_wake(con, seq)); /* LMM(printk_kthread_func:A) */
+
+ if (kthread_should_stop() || !printk_kthreads_available)
+ break;
@@ -993,7 +985,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(int, printk_pending);
-@@ -3456,10 +3908,14 @@ static void wake_up_klogd_work_func(stru
+@@ -3445,10 +3896,14 @@ static void wake_up_klogd_work_func(stru
{
int pending = this_cpu_xchg(printk_pending, 0);
@@ -1009,7 +1001,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (pending & PRINTK_PENDING_WAKEUP)
-@@ -3484,10 +3940,11 @@ static void __wake_up_klogd(int val)
+@@ -3473,10 +3928,11 @@ static void __wake_up_klogd(int val)
* prepare_to_wait_event(), which is called after ___wait_event() adds
* the waiter but before it has checked the wait condition.
*
@@ -1023,7 +1015,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
this_cpu_or(printk_pending, val);
irq_work_queue(this_cpu_ptr(&wake_up_klogd_work));
}
-@@ -3505,7 +3962,17 @@ void defer_console_output(void)
+@@ -3494,7 +3950,17 @@ void defer_console_output(void)
* New messages may have been added directly to the ringbuffer
* using vprintk_store(), so wake any waiters as well.
*/
@@ -1090,7 +1082,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
--- a/kernel/rcu/tree_stall.h
+++ b/kernel/rcu/tree_stall.h
-@@ -643,6 +643,7 @@ static void print_cpu_stall(unsigned lon
+@@ -642,6 +642,7 @@ static void print_cpu_stall(unsigned lon
* See Documentation/RCU/stallwarn.rst for info on how to debug
* RCU CPU stall warnings.
*/
@@ -1098,7 +1090,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_rcu_stall_warning(rcu_state.name, TPS("SelfDetected"));
pr_err("INFO: %s self-detected stall on CPU\n", rcu_state.name);
raw_spin_lock_irqsave_rcu_node(rdp->mynode, flags);
-@@ -677,6 +678,7 @@ static void print_cpu_stall(unsigned lon
+@@ -676,6 +677,7 @@ static void print_cpu_stall(unsigned lon
*/
set_tsk_need_resched(current);
set_preempt_need_resched();
@@ -1116,7 +1108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
usermodehelper_disable();
device_shutdown();
}
-@@ -270,6 +271,7 @@ static void kernel_shutdown_prepare(enum
+@@ -282,6 +283,7 @@ static void kernel_shutdown_prepare(enum
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT) ? SYS_HALT : SYS_POWER_OFF, NULL);
system_state = state;
@@ -1124,7 +1116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
usermodehelper_disable();
device_shutdown();
}
-@@ -819,9 +821,11 @@ static int __orderly_reboot(void)
+@@ -836,9 +838,11 @@ static int __orderly_reboot(void)
ret = run_cmd(reboot_cmd);
if (ret) {
@@ -1136,7 +1128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return ret;
-@@ -834,6 +838,7 @@ static int __orderly_poweroff(bool force
+@@ -851,6 +855,7 @@ static int __orderly_poweroff(bool force
ret = run_cmd(poweroff_cmd);
if (ret && force) {
@@ -1144,7 +1136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn("Failed to start orderly shutdown: forcing the issue\n");
/*
-@@ -843,6 +848,7 @@ static int __orderly_poweroff(bool force
+@@ -860,6 +865,7 @@ static int __orderly_poweroff(bool force
*/
emergency_sync();
kernel_power_off();
@@ -1152,7 +1144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return ret;
-@@ -900,6 +906,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
+@@ -917,6 +923,8 @@ EXPORT_SYMBOL_GPL(orderly_reboot);
*/
static void hw_failure_emergency_poweroff_func(struct work_struct *work)
{
@@ -1161,7 +1153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We have reached here after the emergency shutdown waiting period has
* expired. This means orderly_poweroff has not been able to shut off
-@@ -916,6 +924,8 @@ static void hw_failure_emergency_powerof
+@@ -933,6 +941,8 @@ static void hw_failure_emergency_powerof
*/
pr_emerg("Hardware protection shutdown failed. Trying emergency restart\n");
emergency_restart();
@@ -1170,7 +1162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static DECLARE_DELAYED_WORK(hw_failure_emergency_poweroff_work,
-@@ -954,11 +964,13 @@ void hw_protection_shutdown(const char *
+@@ -971,11 +981,13 @@ void hw_protection_shutdown(const char *
{
static atomic_t allow_proceed = ATOMIC_INIT(1);
@@ -1185,7 +1177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue a backup emergency shutdown in the event of
-@@ -966,6 +978,8 @@ void hw_protection_shutdown(const char *
+@@ -983,6 +995,8 @@ void hw_protection_shutdown(const char *
*/
hw_failure_emergency_poweroff(ms_until_forced);
orderly_poweroff(true);
diff --git a/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch b/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
index 5a93b2b61f9b..c7151dcdea7e 100644
--- a/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
+++ b/patches/rcutorture-Also-force-sched-priority-to-timersd-on-b.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -2157,6 +2157,12 @@ static int rcutorture_booster_init(unsig
+@@ -2363,6 +2363,12 @@ static int rcutorture_booster_init(unsig
WARN_ON_ONCE(!t);
sp.sched_priority = 2;
sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
diff --git a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
index 11018a6d7fb5..30f45d9bc20c 100644
--- a/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
+++ b/patches/sched-Consider-task_struct-saved_state-in-wait_task_.patch
@@ -21,12 +21,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Reviewed-by: Valentin Schneider <vschneid@redhat.com>
Link: https://lkml.kernel.org/r/Yt%2FpQAFQ1xKNK0RY@linutronix.de
---
- kernel/sched/core.c | 76 ++++++++++++++++++++++++++++++++++++++++++++++++----
- 1 file changed, 71 insertions(+), 5 deletions(-)
+ kernel/sched/core.c | 81 ++++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 76 insertions(+), 5 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3251,6 +3251,70 @@ int migrate_swap(struct task_struct *cur
+@@ -3245,6 +3245,76 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -67,37 +67,43 @@ Link: https://lkml.kernel.org/r/Yt%2FpQAFQ1xKNK0RY@linutronix.de
+ bool mismatch;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
-+ mismatch = READ_ONCE(p->__state) != match_state &&
-+ READ_ONCE(p->saved_state) != match_state;
++ if (READ_ONCE(p->__state) & match_state)
++ mismatch = false;
++ else if (READ_ONCE(p->saved_state) & match_state)
++ mismatch = false;
++ else
++ mismatch = true;
++
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return mismatch;
+}
+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
+ bool *wait)
+{
-+ if (READ_ONCE(p->__state) == match_state)
++ if (READ_ONCE(p->__state) & match_state)
++ return true;
++ if (READ_ONCE(p->saved_state) & match_state) {
++ *wait = true;
+ return true;
-+ if (READ_ONCE(p->saved_state) != match_state)
-+ return false;
-+ *wait = true;
-+ return true;
++ }
++ return false;
+}
+#else
+static __always_inline bool state_mismatch(struct task_struct *p, unsigned int match_state)
+{
-+ return READ_ONCE(p->__state) != match_state;
++ return !(READ_ONCE(p->__state) & match_state);
+}
+static __always_inline bool state_match(struct task_struct *p, unsigned int match_state,
+ bool *wait)
+{
-+ return READ_ONCE(p->__state) == match_state;
++ return (READ_ONCE(p->__state) & match_state);
+}
+#endif
+
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -3269,7 +3333,7 @@ int migrate_swap(struct task_struct *cur
+@@ -3263,7 +3333,7 @@ int migrate_swap(struct task_struct *cur
*/
unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state)
{
@@ -106,31 +112,29 @@ Link: https://lkml.kernel.org/r/Yt%2FpQAFQ1xKNK0RY@linutronix.de
struct rq_flags rf;
unsigned long ncsw;
struct rq *rq;
-@@ -3295,7 +3359,7 @@ unsigned long wait_task_inactive(struct
+@@ -3289,7 +3359,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
- while (task_running(rq, p)) {
-- if (match_state && unlikely(READ_ONCE(p->__state) != match_state))
-+ if (match_state && state_mismatch(p, match_state))
+ while (task_on_cpu(rq, p)) {
+- if (!(READ_ONCE(p->__state) & match_state))
++ if (state_mismatch(p, match_state))
return 0;
cpu_relax();
}
-@@ -3308,10 +3372,12 @@ unsigned long wait_task_inactive(struct
+@@ -3302,9 +3372,10 @@ unsigned long wait_task_inactive(struct
rq = task_rq_lock(p, &rf);
trace_sched_wait_task(p);
- running = task_running(rq, p);
+ running = task_on_cpu(rq, p);
- queued = task_on_rq_queued(p);
+ wait = task_on_rq_queued(p);
ncsw = 0;
-- if (!match_state || READ_ONCE(p->__state) == match_state)
+- if (READ_ONCE(p->__state) & match_state)
+
-+ if (!match_state || state_match(p, match_state, &wait))
++ if (state_match(p, match_state, &wait))
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
-+
task_rq_unlock(rq, p, &rf);
- /*
-@@ -3340,7 +3406,7 @@ unsigned long wait_task_inactive(struct
+@@ -3334,7 +3405,7 @@ unsigned long wait_task_inactive(struct
* running right now), it's preempted, and we should
* yield - it could be a while.
*/
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index e6d4254ab797..74d11e514112 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2038,6 +2038,43 @@ static inline int test_tsk_need_resched(
+@@ -2059,6 +2059,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -252,7 +252,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
#define TRACE_EVENT_TYPE_MAX \
-@@ -158,9 +159,10 @@ static inline void tracing_generic_entry
+@@ -159,9 +160,10 @@ static inline void tracing_generic_entry
unsigned int trace_ctx)
{
entry->preempt_count = trace_ctx & 0xff;
@@ -264,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
-@@ -171,7 +173,13 @@ enum trace_flag_type {
+@@ -172,7 +174,13 @@ enum trace_flag_type {
TRACE_FLAG_NEED_RESCHED = 0x04,
TRACE_FLAG_HARDIRQ = 0x08,
TRACE_FLAG_SOFTIRQ = 0x10,
@@ -294,7 +294,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1046,6 +1046,46 @@ void resched_curr(struct rq *rq)
+@@ -1040,6 +1040,46 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2227,6 +2267,7 @@ void migrate_disable(void)
+@@ -2221,6 +2261,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
@@ -349,7 +349,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -2258,6 +2299,7 @@ void migrate_enable(void)
+@@ -2252,6 +2293,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
@@ -357,7 +357,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4655,6 +4697,9 @@ int sched_fork(unsigned long clone_flags
+@@ -4668,6 +4710,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -367,7 +367,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -6523,6 +6568,7 @@ static void __sched notrace __schedule(u
+@@ -6537,6 +6582,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -375,7 +375,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6737,6 +6783,30 @@ static void __sched notrace preempt_sche
+@@ -6751,6 +6797,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6750,6 +6820,8 @@ asmlinkage __visible void __sched notrac
+@@ -6764,6 +6834,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6797,6 +6869,9 @@ asmlinkage __visible void __sched notrac
+@@ -6811,6 +6883,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -9054,7 +9129,9 @@ void __init init_idle(struct task_struct
+@@ -9068,7 +9143,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4576,7 +4576,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4756,7 +4756,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4600,7 +4600,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4780,7 +4780,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -456,7 +456,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -4746,7 +4746,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4926,7 +4926,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -465,7 +465,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4895,7 +4895,7 @@ static void __account_cfs_rq_runtime(str
+@@ -5075,7 +5075,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -474,7 +474,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5646,7 +5646,7 @@ static void hrtick_start_fair(struct rq
+@@ -5826,7 +5826,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (task_current(rq, p))
@@ -483,7 +483,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -7307,7 +7307,7 @@ static void check_preempt_wakeup(struct
+@@ -7473,7 +7473,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -11454,7 +11454,7 @@ static void task_fork_fair(struct task_s
+@@ -11622,7 +11622,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -501,7 +501,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -11481,7 +11481,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -11649,7 +11649,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (task_current(rq, p)) {
if (p->prio > oldprio)
@@ -524,7 +524,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -2356,6 +2356,15 @@ extern void reweight_task(struct task_st
+@@ -2350,6 +2350,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -542,7 +542,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2625,11 +2625,19 @@ unsigned int tracing_gen_ctx_irq_test(un
+@@ -2640,11 +2640,19 @@ unsigned int tracing_gen_ctx_irq_test(un
if (softirq_count() >> (SOFTIRQ_SHIFT + 1))
trace_flags |= TRACE_FLAG_BH_OFF;
@@ -564,7 +564,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(min_t(unsigned int, migration_disable_value(), 0xf)) << 4;
}
-@@ -4215,15 +4223,17 @@ unsigned long trace_total_entries(struct
+@@ -4230,15 +4238,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -591,7 +591,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -4257,14 +4267,16 @@ static void print_func_help_header_irq(s
+@@ -4272,14 +4282,16 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
diff --git a/patches/series b/patches/series
index f607d9f5625d..bd755620f7a1 100644
--- a/patches/series
+++ b/patches/series
@@ -9,40 +9,18 @@
###########################################################################
# Posted
###########################################################################
-genirq-Provide-generic_handle_domain_irq_safe.patch
-0001-lib-vsprintf-Remove-static_branch_likely-from-__ptr_.patch
-0002-lib-vsprintf-Initialize-vsprintf-s-pointer-hash-once.patch
-locking-Detect-includes-rwlock.h-outside-of-spinlock.patch
vduse-Remove-include-of-rwlock.h.patch
+mm-multi-gen-LRU-Move-lru_gen_add_mm-out-of-IRQ-off-.patch
# Hacks to get ptrace to work.
signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
sched-Consider-task_struct-saved_state-in-wait_task_.patch
-# Vlastimil Babka [PATCH v2 0/5] mm/slub: fix validation races and cleanup locking
-# 20220823170400.26546-1-vbabka@suse.cz
-0001-mm-slub-move-free_debug_processing-further.patch
-0002-mm-slub-restrict-sysfs-validation-to-debug-caches-an.patch
-0003-mm-slub-remove-slab_lock-usage-for-debug-operations.patch
-0004-mm-slub-convert-object_map_lock-to-non-raw-spinlock.patch
-0005-mm-slub-simplify-__cmpxchg_double_slab-and-slab_-un-.patch
-
# ifdef RT cleanups.
-# staged to slub
-0003-slub-Make-PREEMPT_RT-support-less-convoluted.patch
-# pending
-0001-preempt-Provide-preempt_-dis-en-able_nested.patch
-0002-dentry-Use-preempt_-dis-en-able_nested.patch
-0003-mm-vmstat-Use-preempt_-dis-en-able_nested.patch
-0004-mm-debug-Provide-VM_WARN_ON_IRQS_ENABLED.patch
-0005-mm-memcontrol-Replace-the-PREEMPT_RT-conditionals.patch
-0006-mm-compaction-Get-rid-of-RT-ifdeffery.patch
-0007-flex_proportions-Disable-preemption-entering-the-wri.patch
-0008-u64_stats-Streamline-the-implementation.patch
# Wait until after the previous patch is upstream.
0001-spi-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
-0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
-0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
+0002-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-d.patch
+0003-net-Remove-the-obsolte-u64_stats_fetch_-_irq-users-n.patch
0004-bpf-Remove-the-obsolte-u64_stats_fetch_-_irq-users.patch
# Wait until previous four are upstream.
u64_stat-Remove-the-obsolete-fetch_irq-variants.patch
diff --git a/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch b/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
index d09bd014e32f..54116505447e 100644
--- a/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
+++ b/patches/signal-Don-t-disable-preemption-in-ptrace_stop-on-PR.patch
@@ -39,7 +39,7 @@ Link: https://lkml.kernel.org/r/20220720154435.232749-2-bigeasy@linutronix.de
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2297,13 +2297,13 @@ static int ptrace_stop(int exit_code, in
+@@ -2298,13 +2298,13 @@ static int ptrace_stop(int exit_code, in
/*
* Don't want to allow preemption here, because
* sys_ptrace() needs this task to be inactive.
@@ -54,6 +54,6 @@ Link: https://lkml.kernel.org/r/20220720154435.232749-2-bigeasy@linutronix.de
- preempt_enable_no_resched();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable_no_resched();
- freezable_schedule();
+ schedule();
cgroup_leave_frozen(true);
diff --git a/patches/sysfs__Add__sys_kernel_realtime_entry.patch b/patches/sysfs__Add__sys_kernel_realtime_entry.patch
index 7fd249eadabf..35e7a3ee8b90 100644
--- a/patches/sysfs__Add__sys_kernel_realtime_entry.patch
+++ b/patches/sysfs__Add__sys_kernel_realtime_entry.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
-@@ -137,6 +137,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
+@@ -142,6 +142,15 @@ KERNEL_ATTR_RO(vmcoreinfo);
#endif /* CONFIG_CRASH_CORE */
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* whether file capabilities are enabled */
static ssize_t fscaps_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
-@@ -228,6 +237,9 @@ static struct attribute * kernel_attrs[]
+@@ -233,6 +242,9 @@ static struct attribute * kernel_attrs[]
&rcu_expedited_attr.attr,
&rcu_normal_attr.attr,
#endif
diff --git a/patches/tty_serial_omap__Make_the_locking_RT_aware.patch b/patches/tty_serial_omap__Make_the_locking_RT_aware.patch
index d11c31af4688..0170136bc1c2 100644
--- a/patches/tty_serial_omap__Make_the_locking_RT_aware.patch
+++ b/patches/tty_serial_omap__Make_the_locking_RT_aware.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
-@@ -1240,13 +1240,10 @@ serial_omap_console_write(struct console
+@@ -1241,13 +1241,10 @@ serial_omap_console_write(struct console
unsigned int ier;
int locked = 1;
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the IER then disable the interrupts
-@@ -1273,8 +1270,7 @@ serial_omap_console_write(struct console
+@@ -1274,8 +1271,7 @@ serial_omap_console_write(struct console
check_modem_status(up);
if (locked)
diff --git a/patches/vduse-Remove-include-of-rwlock.h.patch b/patches/vduse-Remove-include-of-rwlock.h.patch
index 7458578a780c..2168e3afbbaa 100644
--- a/patches/vduse-Remove-include-of-rwlock.h.patch
+++ b/patches/vduse-Remove-include-of-rwlock.h.patch
@@ -8,7 +8,8 @@ should be included. Including it directly will break the RT build.
Remove the rwlock.h include.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lkml.kernel.org/r/20220816074816.173227-1-bigeasy@linutronix.de
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lkml.kernel.org/r/20221026134407.711768-1-bigeasy@linutronix.de
---
drivers/vdpa/vdpa_user/iova_domain.h | 1 -
1 file changed, 1 deletion(-)
diff --git a/patches/x86__Enable_RT_also_on_32bit.patch b/patches/x86__Enable_RT_also_on_32bit.patch
index f52dde183193..32ab22667525 100644
--- a/patches/x86__Enable_RT_also_on_32bit.patch
+++ b/patches/x86__Enable_RT_also_on_32bit.patch
@@ -22,8 +22,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -110,6 +109,7 @@ config X86
- select ARCH_SUPPORTS_KMAP_LOCAL_FORCE_MAP if NR_CPUS <= 4096
+@@ -113,6 +112,7 @@ config X86
+ select ARCH_USES_CFI_TRAPS if X86_64 && CFI_CLANG
select ARCH_SUPPORTS_LTO_CLANG
select ARCH_SUPPORTS_LTO_CLANG_THIN
+ select ARCH_SUPPORTS_RT
diff --git a/patches/x86__Support_for_lazy_preemption.patch b/patches/x86__Support_for_lazy_preemption.patch
index 8c8752c8e6f1..90bbb19950c3 100644
--- a/patches/x86__Support_for_lazy_preemption.patch
+++ b/patches/x86__Support_for_lazy_preemption.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -244,6 +244,7 @@ config X86
+@@ -250,6 +250,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -153,7 +153,7 @@ static unsigned long exit_to_user_mode_l
+@@ -155,7 +155,7 @@ static unsigned long exit_to_user_mode_l
local_irq_enable_exit_to_user(ti_work);
diff --git a/patches/x86_entry__Use_should_resched_in_idtentry_exit_cond_resched.patch b/patches/x86_entry__Use_should_resched_in_idtentry_exit_cond_resched.patch
index d2068a6f3bd0..6ffdc838bfcd 100644
--- a/patches/x86_entry__Use_should_resched_in_idtentry_exit_cond_resched.patch
+++ b/patches/x86_entry__Use_should_resched_in_idtentry_exit_cond_resched.patch
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -381,7 +381,7 @@ void raw_irqentry_exit_cond_resched(void
+@@ -385,7 +385,7 @@ void raw_irqentry_exit_cond_resched(void
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());