summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-16 10:41:20 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-16 10:41:20 +0200
commit7909bbc59ec2a39c42b7b71d0ba1bbfb837c79e5 (patch)
tree63731933734f80646abb0263282bcabff684d6c9
parent1e5923812d3108f98464a1d9515c1b9c1550fd62 (diff)
downloadlinux-rt-7909bbc59ec2a39c42b7b71d0ba1bbfb837c79e5.tar.gz
[ANNOUNCE] v5.14-rc6-rt10v5.14-rc6-rt10-patches
Dear RT folks! I'm pleased to announce the v5.14-rc6-rt10 patch set. Changes since v5.14-rc6-rt9: - The locking bits haven been updated updated by Thomas Gleixner. They have been posted as v5 to the list. - The patches for locking's API selftest have been updated. As a result the locking selftest can be enabled. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. - Clark Williams reported issues in i915 (execlists_dequeue_irq()) - Clark Williams reported issues with kcov enabled. - Valentin Schneider reported a few splats on ARM64, see https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/ - PPC64 does not build (due to atomic header includes). - Clark Williams reported a crash in the SLUB memory allocator. It does not look like a recent regression and might been there since the SLUB rework started in v5.13-rt1. The delta patch against v5.14-rc6-rt9 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc6-rt9-rt10.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc6-rt10 The RT patch against v5.14-rc6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc6-rt10.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc6-rt10.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch2
-rw-r--r--patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch27
-rw-r--r--patches/0002-io-wq-Don-t-mix-raw_spinlock_irq-spin_lock_irq.patch6
-rw-r--r--patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch6
-rw-r--r--patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch10
-rw-r--r--patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch47
-rw-r--r--patches/0004-mm-slub-don-t-disable-irq-for-debug_check_no_locks_f.patch2
-rw-r--r--patches/0004-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch115
-rw-r--r--patches/0005-lockdep-Make-it-RT-aware.patch (renamed from patches/lockdep__Make_it_RT_aware.patch)20
-rw-r--r--patches/0005-mm-slub-remove-redundant-unfreeze_partials-from-put_.patch2
-rw-r--r--patches/0006-lockdep-selftests-Add-rtmutex-to-the-last-column.patch23
-rw-r--r--patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch82
-rw-r--r--patches/0007-mm-slub-extract-get_partial-from-new_slab_objects.patch6
-rw-r--r--patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch217
-rw-r--r--patches/0008-mm-slub-dissolve-new_slab_objects-into-___slab_alloc.patch6
-rw-r--r--patches/0009-lockdep-selftests-Use-correct-depmap-for-local_lock-.patch30
-rw-r--r--patches/0009-mm-slub-return-slab-page-from-get_partial-and-set-c-.patch16
-rw-r--r--patches/0010-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch251
-rw-r--r--patches/0010-mm-slub-restructure-new-page-checks-in-___slab_alloc.patch2
-rw-r--r--patches/0011-mm-slub-simplify-kmem_cache_cpu-and-tid-setup.patch4
-rw-r--r--patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch20
-rw-r--r--patches/0013-mm-slub-do-initial-checks-in-___slab_alloc-with-irqs.patch12
-rw-r--r--patches/0014-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch10
-rw-r--r--patches/0015-mm-slub-restore-irqs-around-calling-new_slab.patch6
-rw-r--r--patches/0016-mm-slub-validate-slab-from-partial-list-or-page-allo.patch8
-rw-r--r--patches/0017-mm-slub-check-new-pages-with-restored-irqs.patch10
-rw-r--r--patches/0018-mm-slub-stop-disabling-irqs-around-get_partial.patch10
-rw-r--r--patches/0019-mm-slub-move-reset-of-c-page-and-freelist-out-of-dea.patch10
-rw-r--r--patches/0020-mm-slub-make-locking-in-deactivate_slab-irq-safe.patch8
-rw-r--r--patches/0021-mm-slub-call-deactivate_slab-without-disabling-irqs.patch4
-rw-r--r--patches/0022-mm-slub-move-irq-control-into-unfreeze_partials.patch8
-rw-r--r--patches/0023-mm-slub-discard-slabs-in-unfreeze_partials-without-i.patch4
-rw-r--r--patches/0024-mm-slub-detach-whole-partial-list-at-once-in-unfreez.patch2
-rw-r--r--patches/0025-mm-slub-separate-detaching-of-partial-list-in-unfree.patch10
-rw-r--r--patches/0026-mm-slub-only-disable-irq-with-spin_lock-in-__unfreez.patch6
-rw-r--r--patches/0027-mm-slub-don-t-disable-irqs-in-slub_cpu_dead.patch2
-rw-r--r--patches/0028-mm-slab-make-flush_slab-possible-to-call-with-irqs-e.patch6
-rw-r--r--patches/0029-mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch10
-rw-r--r--patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch8
-rw-r--r--patches/0033-mm-slub-protect-put_cpu_partial-with-disabled-irqs-i.patch6
-rw-r--r--patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch14
-rw-r--r--patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch38
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch6
-rw-r--r--patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch2
-rw-r--r--patches/lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch115
-rw-r--r--patches/lockdep__disable_self-test.patch30
-rw-r--r--patches/lockdep__selftest__Only_do_hardirq_context_test_for_raw_spinlock.patch59
-rw-r--r--patches/lockdep__selftest__fix_warnings_due_to_missing_PREEMPT_RT_conditionals.patch148
-rw-r--r--patches/locking-local_lock--Add-PREEMPT_RT-support.patch79
-rw-r--r--patches/locking-local_lock--Add-missing-owner-initialization.patch87
-rw-r--r--patches/locking-rtmutex--Set-proper-wait-context-for-lockdep.patch57
-rw-r--r--patches/locking-spinlock-rt--Prepare-for-RT-local_lock.patch110
-rw-r--r--patches/locking_RT__Add_might_sleeping_annotation..patch24
-rw-r--r--patches/locking__dont_check_for___LINUX_SPINLOCK_TYPES_H_on_-RT_archs.patch147
-rw-r--r--patches/locking_local_lock__Add_RT_support.patch87
-rw-r--r--patches/locking_local_lock__Prepare_for_RT_support.patch76
-rw-r--r--patches/locking_mutex__Consolidate_core_headers.patch35
-rw-r--r--patches/locking_rtmutex__Add_mutex_variant_for_RT.patch21
-rw-r--r--patches/locking_rtmutex__Include_only_rbtree_types.patch6
-rw-r--r--patches/locking_rtmutex__Prevent_future_include_recursion_hell.patch6
-rw-r--r--patches/locking_rwlock__Provide_RT_variant.patch61
-rw-r--r--patches/locking_rwsem__Add_rtmutex_based_R_W_semaphore_implementation.patch37
-rw-r--r--patches/locking_spinlock__Provide_RT_variant.patch11
-rw-r--r--patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch4
-rw-r--r--patches/rtmutex--Provide-rt_mutex_base_is_locked--.patch10
-rw-r--r--patches/rtmutex--Remove-rt_mutex_is_locked--.patch2
-rw-r--r--patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch42
-rw-r--r--patches/rtmutex__Split_API_and_implementation.patch4
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch14
-rw-r--r--patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch2
-rw-r--r--patches/sched__Move_mmdrop_to_RCU_on_RT.patch4
-rw-r--r--patches/series24
-rw-r--r--patches/softirq__Disable_softirq_stacks_for_RT.patch2
74 files changed, 1569 insertions, 831 deletions
diff --git a/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch b/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
index 8ed00bc011f2..1e0a3c0e60c0 100644
--- a/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
+++ b/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -5822,9 +5822,6 @@ static int slab_debug_trace_open(struct
+@@ -5825,9 +5825,6 @@ static int slab_debug_trace_open(struct
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
return -ENOMEM;
diff --git a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
new file mode 100644
index 000000000000..b595324950fd
--- /dev/null
+++ b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 14:40:05 +0200
+Subject: [PATCH 01/10] sched: Trigger warning if ->migration_disabled counter
+ underflows.
+
+If migrate_enable() is used more often than its counter part then it
+remains undetected and rq::nr_pinned will underflow, too.
+
+Add a warning if migrate_enable() is attempted if without a matching a
+migrate_disable().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -2119,6 +2119,8 @@ void migrate_enable(void)
+ if (p->migration_disabled > 1) {
+ p->migration_disabled--;
+ return;
++ } else if (WARN_ON_ONCE(p->migration_disabled == 0)) {
++ return;
+ }
+
+ /*
diff --git a/patches/0002-io-wq-Don-t-mix-raw_spinlock_irq-spin_lock_irq.patch b/patches/0002-io-wq-Don-t-mix-raw_spinlock_irq-spin_lock_irq.patch
index 84d9ebdea7d6..168dd41d81d6 100644
--- a/patches/0002-io-wq-Don-t-mix-raw_spinlock_irq-spin_lock_irq.patch
+++ b/patches/0002-io-wq-Don-t-mix-raw_spinlock_irq-spin_lock_irq.patch
@@ -10,7 +10,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
-@@ -414,7 +414,7 @@ static void io_wait_on_hash(struct io_wq
+@@ -413,7 +413,7 @@ static void io_wait_on_hash(struct io_wq
{
struct io_wq *wq = wqe->wq;
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (list_empty(&wqe->wait.entry)) {
__add_wait_queue(&wq->hash->wait, &wqe->wait);
if (!test_bit(hash, &wq->hash->map)) {
-@@ -422,7 +422,7 @@ static void io_wait_on_hash(struct io_wq
+@@ -421,7 +421,7 @@ static void io_wait_on_hash(struct io_wq
list_del_init(&wqe->wait.entry);
}
}
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
-@@ -460,9 +460,9 @@ static struct io_wq_work *io_get_next_wo
+@@ -459,9 +459,9 @@ static struct io_wq_work *io_get_next_wo
}
if (stall_hash != -1U) {
diff --git a/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch b/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
index 70023867233b..1d9cf5525b15 100644
--- a/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
+++ b/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object_map;
}
-@@ -4876,17 +4882,17 @@ static int add_location(struct loc_track
+@@ -4879,17 +4885,17 @@ static int add_location(struct loc_track
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SLUB_DEBUG */
-@@ -5813,14 +5819,21 @@ static int slab_debug_trace_open(struct
+@@ -5816,14 +5822,21 @@ static int slab_debug_trace_open(struct
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
sizeof(struct loc_track));
struct kmem_cache *s = file_inode(filep)->i_private;
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
-@@ -5831,12 +5844,13 @@ static int slab_debug_trace_open(struct
+@@ -5834,12 +5847,13 @@ static int slab_debug_trace_open(struct
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
diff --git a/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch b/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
index afaa6338e103..dd9c26775a55 100644
--- a/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
+++ b/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -4676,11 +4676,11 @@ static int count_total(struct page *page
+@@ -4679,11 +4679,11 @@ static int count_total(struct page *page
#endif
#ifdef CONFIG_SLUB_DEBUG
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_lock(page);
-@@ -4688,21 +4688,20 @@ static void validate_slab(struct kmem_ca
+@@ -4691,21 +4691,20 @@ static void validate_slab(struct kmem_ca
goto unlock;
/* Now we know that a valid freelist exists */
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long count = 0;
struct page *page;
-@@ -4711,7 +4710,7 @@ static int validate_slab_node(struct kme
+@@ -4714,7 +4713,7 @@ static int validate_slab_node(struct kme
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list) {
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
count++;
}
if (count != n->nr_partial) {
-@@ -4724,7 +4723,7 @@ static int validate_slab_node(struct kme
+@@ -4727,7 +4726,7 @@ static int validate_slab_node(struct kme
goto out;
list_for_each_entry(page, &n->full, slab_list) {
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
count++;
}
if (count != atomic_long_read(&n->nr_slabs)) {
-@@ -4743,10 +4742,17 @@ long validate_slab_cache(struct kmem_cac
+@@ -4746,10 +4745,17 @@ long validate_slab_cache(struct kmem_cac
int node;
unsigned long count = 0;
struct kmem_cache_node *n;
diff --git a/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch b/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
new file mode 100644
index 000000000000..bd8827d7e3f6
--- /dev/null
+++ b/patches/0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
@@ -0,0 +1,47 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 13 Aug 2021 12:40:49 +0200
+Subject: [PATCH 03/10] rtmutex: Add a special case for ww-mutex handling.
+
+The lockdep selftest for ww-mutex assumes in a few cases the
+ww_ctx->contending_lock assignment via __ww_mutex_check_kill() which
+does not happen if the rtmutex detects the deadlock early.
+
+The testcase passes if the deadlock handling here is removed. This means
+that it will work if multiple threads/tasks are involved and not just a
+single one.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1059,8 +1059,26 @@ static int __sched task_blocks_on_rt_mut
+ * which is wrong, as the other waiter is not in a deadlock
+ * situation.
+ */
+- if (owner == task)
++ if (owner == task) {
++#if defined(DEBUG_WW_MUTEXES) && defined(CONFIG_DEBUG_LOCKING_API_SELFTESTS)
++ /*
++ * The lockdep selftest for ww-mutex assumes in a few cases
++ * the ww_ctx->contending_lock assignment via
++ * __ww_mutex_check_kill() which does not happen if the rtmutex
++ * detects the deadlock early.
++ */
++ if (build_ww_mutex() && ww_ctx) {
++ struct rt_mutex *rtm;
++
++ /* Check whether the waiter should backout immediately */
++ rtm = container_of(lock, struct rt_mutex, rtmutex);
++
++ __ww_mutex_add_waiter(waiter, rtm, ww_ctx);
++ __ww_mutex_check_kill(rtm, waiter, ww_ctx);
++ }
++#endif
+ return -EDEADLK;
++ }
+
+ raw_spin_lock(&task->pi_lock);
+ waiter->task = task;
diff --git a/patches/0004-mm-slub-don-t-disable-irq-for-debug_check_no_locks_f.patch b/patches/0004-mm-slub-don-t-disable-irq-for-debug_check_no_locks_f.patch
index 4ee22a57a75a..aebe358c82ba 100644
--- a/patches/0004-mm-slub-don-t-disable-irq-for-debug_check_no_locks_f.patch
+++ b/patches/0004-mm-slub-don-t-disable-irq-for-debug_check_no_locks_f.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1588,20 +1588,8 @@ static __always_inline bool slab_free_ho
+@@ -1591,20 +1591,8 @@ static __always_inline bool slab_free_ho
{
kmemleak_free_recursive(x, s->flags);
diff --git a/patches/0004-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch b/patches/0004-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
new file mode 100644
index 000000000000..7fe32d9d3d4a
--- /dev/null
+++ b/patches/0004-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
@@ -0,0 +1,115 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 13 Aug 2021 13:49:49 +0200
+Subject: [PATCH 04/10] rtmutex: Add rt_mutex_lock_nest_lock() and
+ rt_mutex_lock_killable().
+
+The locking selftest for ww-mutex expects to operate directly on the
+base-mutex which becomes a rtmutex on PREEMPT_RT.
+
+Add rt_mutex_lock_nest_lock(), follows mutex_lock_nest_lock() for
+rtmutex.
+Add rt_mutex_lock_killable(), follows mutex_lock_killable() for rtmutex.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 9 +++++++++
+ kernel/locking/rtmutex_api.c | 30 ++++++++++++++++++++++++++----
+ 2 files changed, 35 insertions(+), 4 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mu
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
++extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
+ #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
++#define rt_mutex_lock_nest_lock(lock, nest_lock) \
++ do { \
++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
++ _rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
++ } while (0)
++
+ #else
+ extern void rt_mutex_lock(struct rt_mutex *lock);
+ #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
++#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
+ #endif
+
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
++extern int rt_mutex_lock_killable(struct rt_mutex *lock);
+ extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+ extern void rt_mutex_unlock(struct rt_mutex *lock);
+--- a/kernel/locking/rtmutex_api.c
++++ b/kernel/locking/rtmutex_api.c
+@@ -21,12 +21,13 @@ int max_lock_depth = 1024;
+ */
+ static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
+ unsigned int state,
++ struct lockdep_map *nest_lock,
+ unsigned int subclass)
+ {
+ int ret;
+
+ might_sleep();
+- mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
+ ret = __rt_mutex_lock(&lock->rtmutex, state);
+ if (ret)
+ mutex_release(&lock->dep_map, _RET_IP_);
+@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init);
+ */
+ void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
+ {
+- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+
++void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
++{
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
++}
++EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
++
+ #else /* !CONFIG_DEBUG_LOCK_ALLOC */
+
+ /**
+@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
+ */
+ void __sched rt_mutex_lock(struct rt_mutex *lock)
+ {
+- __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
++ __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ #endif
+@@ -77,11 +84,26 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ {
+- return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
++ return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+ /**
++ * rt_mutex_lock_killable - lock a rt_mutex killable
++ *
++ * @lock: the rt_mutex to be locked
++ *
++ * Returns:
++ * 0 on success
++ * -EINTR when interrupted by a signal
++ */
++int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
++{
++ return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
++}
++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
++
++/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
diff --git a/patches/lockdep__Make_it_RT_aware.patch b/patches/0005-lockdep-Make-it-RT-aware.patch
index 0bb19b65a6a2..833ded1f6536 100644
--- a/patches/lockdep__Make_it_RT_aware.patch
+++ b/patches/0005-lockdep-Make-it-RT-aware.patch
@@ -1,19 +1,25 @@
-Subject: lockdep: Make it RT aware
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun Jul 17 18:51:23 2011 +0200
+Date: Sun, 17 Jul 2011 18:51:23 +0200
+Subject: [PATCH 05/10] lockdep: Make it RT aware
-From: Thomas Gleixner <tglx@linutronix.de>
-
-teach lockdep that we don't really do softirqs on -RT.
+There is not really a softirq context on PREEMPT_RT.
+Softirqs on PREEMPT_RT are always invoked within the context of a threaded
+interrupt handler or within ksoftirqd. The "in-softirq" context is preemptible
+and is protected by a per-CPU lock to ensure mutual exclusion.
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+There is no difference on PREEMPT_RT between spin_lock_irq() and spin_lock()
+because the former does not disable interrupts. Therefore if lock is used
+in_softirq() and locked once with spin_lock_irq() then lockdep will report this
+with "inconsistent {SOFTIRQ-ON-W} -> {IN-SOFTIRQ-W} usage".
+Teach lockdep that we don't really do softirqs on -RT.
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/irqflags.h | 23 +++++++++++++++--------
kernel/locking/lockdep.c | 2 ++
2 files changed, 17 insertions(+), 8 deletions(-)
----
+
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -71,14 +71,6 @@ do { \
diff --git a/patches/0005-mm-slub-remove-redundant-unfreeze_partials-from-put_.patch b/patches/0005-mm-slub-remove-redundant-unfreeze_partials-from-put_.patch
index d29148f4c923..e9adac1c5455 100644
--- a/patches/0005-mm-slub-remove-redundant-unfreeze_partials-from-put_.patch
+++ b/patches/0005-mm-slub-remove-redundant-unfreeze_partials-from-put_.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2463,13 +2463,6 @@ static void put_cpu_partial(struct kmem_
+@@ -2466,13 +2466,6 @@ static void put_cpu_partial(struct kmem_
} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
!= oldpage);
diff --git a/patches/0006-lockdep-selftests-Add-rtmutex-to-the-last-column.patch b/patches/0006-lockdep-selftests-Add-rtmutex-to-the-last-column.patch
new file mode 100644
index 000000000000..5c8f7e1e0575
--- /dev/null
+++ b/patches/0006-lockdep-selftests-Add-rtmutex-to-the-last-column.patch
@@ -0,0 +1,23 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 16:16:54 +0200
+Subject: [PATCH 06/10] lockdep/selftests: Add rtmutex to the last column
+
+The last column contains the results for the rtmutex tests.
+Add it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -2812,7 +2812,7 @@ void locking_selftest(void)
+ printk("------------------------\n");
+ printk("| Locking API testsuite:\n");
+ printk("----------------------------------------------------------------------------\n");
+- printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n");
++ printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
+ printk(" --------------------------------------------------------------------------\n");
+
+ init_shared_classes();
diff --git a/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch b/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
new file mode 100644
index 000000000000..6837b520496b
--- /dev/null
+++ b/patches/0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
@@ -0,0 +1,82 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 14:25:38 +0200
+Subject: [PATCH 07/10] lockdep/selftests: Unbalanced migrate_disable() &
+ rcu_read_lock()
+
+The tests with unbalanced lock() + unlock() operation leave a modified
+preemption counter behind which is then reset to its original value
+after the test.
+
+The spin_lock() function on PREEMPT_RT does not include a
+preempt_disable() statement but migrate_disable() and read_rcu_lock().
+As a consequence both counter never get back to their original value and
+system explodes later after the selftest.
+In the double-unlock case on PREEMPT_RT, the migrate_disable() and RCU
+code will trigger which should be avoided. These counter should not be
+decremented below their initial value.
+
+Save both counters and bring them back to their original value after the
+test.
+In the double-unlock case, increment both counter in advance to they
+become balanced after the double unlock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 26 +++++++++++++++++++++++++-
+ 1 file changed, 25 insertions(+), 1 deletion(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -712,12 +712,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex);
+
+ #undef E
+
++#ifdef CONFIG_PREEMPT_RT
++# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
++#else
++# define RT_PREPARE_DBL_UNLOCK()
++#endif
+ /*
+ * Double unlock:
+ */
+ #define E() \
+ \
+ LOCK(A); \
++ RT_PREPARE_DBL_UNLOCK(); \
+ UNLOCK(A); \
+ UNLOCK(A); /* fail */
+
+@@ -1398,7 +1404,13 @@ static int unexpected_testcase_failures;
+
+ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
+ {
+- unsigned long saved_preempt_count = preempt_count();
++ int saved_preempt_count = preempt_count();
++#ifdef CONFIG_PREEMPT_RT
++#ifdef CONFIG_SMP
++ int saved_mgd_count = current->migration_disabled;
++#endif
++ int saved_rcu_count = current->rcu_read_lock_nesting;
++#endif
+
+ WARN_ON(irqs_disabled());
+
+@@ -1432,6 +1444,18 @@ static void dotest(void (*testcase_fn)(v
+ * count, so restore it:
+ */
+ preempt_count_set(saved_preempt_count);
++
++#ifdef CONFIG_PREEMPT_RT
++#ifdef CONFIG_SMP
++ while (current->migration_disabled > saved_mgd_count)
++ migrate_enable();
++#endif
++
++ while (current->rcu_read_lock_nesting > saved_rcu_count)
++ rcu_read_unlock();
++ WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
++#endif
++
+ #ifdef CONFIG_TRACE_IRQFLAGS
+ if (softirq_count())
+ current->softirqs_enabled = 0;
diff --git a/patches/0007-mm-slub-extract-get_partial-from-new_slab_objects.patch b/patches/0007-mm-slub-extract-get_partial-from-new_slab_objects.patch
index a4199e1d5825..38665c4767ff 100644
--- a/patches/0007-mm-slub-extract-get_partial-from-new_slab_objects.patch
+++ b/patches/0007-mm-slub-extract-get_partial-from-new_slab_objects.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2596,17 +2596,12 @@ slab_out_of_memory(struct kmem_cache *s,
+@@ -2599,17 +2599,12 @@ slab_out_of_memory(struct kmem_cache *s,
static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
int node, struct kmem_cache_cpu **pc)
{
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = new_slab(s, flags, node);
if (page) {
c = raw_cpu_ptr(s->cpu_slab);
-@@ -2770,6 +2765,10 @@ static void *___slab_alloc(struct kmem_c
+@@ -2773,6 +2768,10 @@ static void *___slab_alloc(struct kmem_c
goto redo;
}
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
freelist = new_slab_objects(s, gfpflags, node, &c);
if (unlikely(!freelist)) {
-@@ -2777,6 +2776,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2780,6 +2779,7 @@ static void *___slab_alloc(struct kmem_c
return NULL;
}
diff --git a/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch b/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
new file mode 100644
index 000000000000..e458c4ac98d8
--- /dev/null
+++ b/patches/0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
@@ -0,0 +1,217 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 16:02:29 +0200
+Subject: [PATCH 08/10] lockdep/selftests: Skip the softirq related tests on
+ PREEMPT_RT
+
+The softirq context on PREEMPT_RT is different compared to !PREEMPT_RT.
+As such lockdep_softirq_enter() is a nop and the all the "softirq safe"
+tests fail on PREEMPT_RT because there is no difference.
+
+Skip the softirq context tests on PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 38 +++++++++++++++++++++++++++++++-------
+ 1 file changed, 31 insertions(+), 7 deletions(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -26,6 +26,12 @@
+ #include <linux/rtmutex.h>
+ #include <linux/local_lock.h>
+
++#ifdef CONFIG_PREEMPT_RT
++# define NON_RT(...)
++#else
++# define NON_RT(...) __VA_ARGS__
++#endif
++
+ /*
+ * Change this to 1 if you want to see the failure printouts:
+ */
+@@ -808,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
+ #include "locking-selftest-wlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-spin-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
+
+@@ -816,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
+
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+
++#ifndef CONFIG_PREEMPT_RT
+ /*
+ * Enabling hardirqs with a softirq-safe lock held:
+ */
+@@ -852,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
+ #undef E1
+ #undef E2
+
++#endif
++
+ /*
+ * Enabling irqs with an irq-safe lock held:
+ */
+@@ -881,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
+ #include "locking-selftest-wlock-hardirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-spin-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
+
+@@ -889,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
+
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -927,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
+ #include "locking-selftest-wlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-spin-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
+
+@@ -935,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
+
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -975,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
+ #include "locking-selftest-wlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-spin-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
+
+@@ -983,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
+
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -1037,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
+ #include "locking-selftest-wlock-hardirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-spin-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
+
+@@ -1045,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
+
+ #include "locking-selftest-wlock-softirq.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -1212,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-softirq.h"
+ #include "locking-selftest-rlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
+
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -1258,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-softirq.h"
+ #include "locking-selftest-rlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
+
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
++#endif
+
+ #undef E1
+ #undef E2
+@@ -1312,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
+
++#ifndef CONFIG_PREEMPT_RT
+ #include "locking-selftest-softirq.h"
+ #include "locking-selftest-rlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
+
+ #include "locking-selftest-wlock.h"
+ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
++#endif
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
+@@ -1523,7 +1548,7 @@ static inline void print_testname(const
+
+ #define DO_TESTCASE_2x2RW(desc, name, nr) \
+ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
+- DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \
++ NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
+
+ #define DO_TESTCASE_6x2x2RW(desc, name) \
+ DO_TESTCASE_2x2RW(desc, name, 123); \
+@@ -1571,19 +1596,19 @@ static inline void print_testname(const
+
+ #define DO_TESTCASE_2I(desc, name, nr) \
+ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
+- DO_TESTCASE_1("soft-"desc, name##_soft, nr);
++ NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
+
+ #define DO_TESTCASE_2IB(desc, name, nr) \
+ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
+- DO_TESTCASE_1B("soft-"desc, name##_soft, nr);
++ NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
+
+ #define DO_TESTCASE_6I(desc, name, nr) \
+ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
+- DO_TESTCASE_3("soft-"desc, name##_soft, nr);
++ NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
+
+ #define DO_TESTCASE_6IRW(desc, name, nr) \
+ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
+- DO_TESTCASE_3RW("soft-"desc, name##_soft, nr);
++ NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
+
+ #define DO_TESTCASE_2x3(desc, name) \
+ DO_TESTCASE_3(desc, name, 12); \
+@@ -2909,12 +2934,11 @@ void locking_selftest(void)
+ DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
+
+ printk(" --------------------------------------------------------------------------\n");
+-
+ /*
+ * irq-context testcases:
+ */
+ DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
+- DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A);
++ NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
+ DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
+ DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
diff --git a/patches/0008-mm-slub-dissolve-new_slab_objects-into-___slab_alloc.patch b/patches/0008-mm-slub-dissolve-new_slab_objects-into-___slab_alloc.patch
index 74a65e980bdf..f9434d20cd3e 100644
--- a/patches/0008-mm-slub-dissolve-new_slab_objects-into-___slab_alloc.patch
+++ b/patches/0008-mm-slub-dissolve-new_slab_objects-into-___slab_alloc.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1868,6 +1868,8 @@ static struct page *new_slab(struct kmem
+@@ -1871,6 +1871,8 @@ static struct page *new_slab(struct kmem
if (unlikely(flags & GFP_SLAB_BUG_MASK))
flags = kmalloc_fix_flags(flags);
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return allocate_slab(s,
flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
}
-@@ -2593,36 +2595,6 @@ slab_out_of_memory(struct kmem_cache *s,
+@@ -2596,36 +2598,6 @@ slab_out_of_memory(struct kmem_cache *s,
#endif
}
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
{
if (unlikely(PageSlabPfmemalloc(page)))
-@@ -2769,13 +2741,27 @@ static void *___slab_alloc(struct kmem_c
+@@ -2772,13 +2744,27 @@ static void *___slab_alloc(struct kmem_c
if (freelist)
goto check_new_page;
diff --git a/patches/0009-lockdep-selftests-Use-correct-depmap-for-local_lock-.patch b/patches/0009-lockdep-selftests-Use-correct-depmap-for-local_lock-.patch
new file mode 100644
index 000000000000..060136340be8
--- /dev/null
+++ b/patches/0009-lockdep-selftests-Use-correct-depmap-for-local_lock-.patch
@@ -0,0 +1,30 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 16:28:28 +0200
+Subject: [PATCH 09/10] lockdep/selftests: Use correct depmap for local_lock on
+ RT
+
+The local_lock_t structure on PREEMPT_RT does not provide a dep_map
+member. The dep_map is available in the inner lock member.
+
+Use the lock.dep_map for local_lock on PREEMPT_RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -1351,7 +1351,11 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+ # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
+ # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
+ # define I_WW(x) lockdep_reset_lock(&x.dep_map)
+-# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
++# ifdef CONFIG_PREEMPT_RT
++# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.lock.dep_map))
++# else
++# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
++# endif
+ #ifdef CONFIG_RT_MUTEXES
+ # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
+ #endif
diff --git a/patches/0009-mm-slub-return-slab-page-from-get_partial-and-set-c-.patch b/patches/0009-mm-slub-return-slab-page-from-get_partial-and-set-c-.patch
index 3eaf86ad4570..2c06299abe75 100644
--- a/patches/0009-mm-slub-return-slab-page-from-get_partial-and-set-c-.patch
+++ b/patches/0009-mm-slub-return-slab-page-from-get_partial-and-set-c-.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2000,7 +2000,7 @@ static inline bool pfmemalloc_match(stru
+@@ -2003,7 +2003,7 @@ static inline bool pfmemalloc_match(stru
* Try to allocate a partial slab from a specific node.
*/
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct page *page, *page2;
void *object = NULL;
-@@ -2029,7 +2029,7 @@ static void *get_partial_node(struct kme
+@@ -2032,7 +2032,7 @@ static void *get_partial_node(struct kme
available += objects;
if (!object) {
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, ALLOC_FROM_PARTIAL);
object = t;
} else {
-@@ -2049,7 +2049,7 @@ static void *get_partial_node(struct kme
+@@ -2052,7 +2052,7 @@ static void *get_partial_node(struct kme
* Get a page from somewhere. Search in increasing NUMA distances.
*/
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
#ifdef CONFIG_NUMA
struct zonelist *zonelist;
-@@ -2091,7 +2091,7 @@ static void *get_any_partial(struct kmem
+@@ -2094,7 +2094,7 @@ static void *get_any_partial(struct kmem
if (n && cpuset_zone_allowed(zone, flags) &&
n->nr_partial > s->min_partial) {
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (object) {
/*
* Don't check read_mems_allowed_retry()
-@@ -2113,7 +2113,7 @@ static void *get_any_partial(struct kmem
+@@ -2116,7 +2116,7 @@ static void *get_any_partial(struct kmem
* Get a partial page, lock it and return it.
*/
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
void *object;
int searchnode = node;
-@@ -2121,11 +2121,11 @@ static void *get_partial(struct kmem_cac
+@@ -2124,11 +2124,11 @@ static void *get_partial(struct kmem_cac
if (node == NUMA_NO_NODE)
searchnode = numa_mem_id();
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_PREEMPTION
-@@ -2737,9 +2737,11 @@ static void *___slab_alloc(struct kmem_c
+@@ -2740,9 +2740,11 @@ static void *___slab_alloc(struct kmem_c
goto redo;
}
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = new_slab(s, gfpflags, node);
-@@ -2763,7 +2765,6 @@ static void *___slab_alloc(struct kmem_c
+@@ -2766,7 +2768,6 @@ static void *___slab_alloc(struct kmem_c
c->page = page;
check_new_page:
diff --git a/patches/0010-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch b/patches/0010-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
new file mode 100644
index 000000000000..7f39759421e5
--- /dev/null
+++ b/patches/0010-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
@@ -0,0 +1,251 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 12 Aug 2021 18:13:39 +0200
+Subject: [PATCH 10/10] lockdep/selftests: Adapt ww-tests for PREEMPT_RT
+
+The ww-mutex selftest operates directly on ww_mutex::base and assumes
+its type is struct mutex. This isn't true on PREEMPT_RT which turns the
+mutex into a rtmutex.
+
+Add a ww_mutex_base_ abstraction which maps to the relevant mutex_ or
+rt_mutex_ function.
+Change the CONFIG_DEBUG_MUTEXES ifdef to DEBUG_WW_MUTEXES. The latter is
+true for the MUTEX and RTMUTEX implementation of WW-MUTEX. The
+assignment is required in order to pass the tests.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 74 +++++++++++++++++++++++++++++--------------------
+ 1 file changed, 44 insertions(+), 30 deletions(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -1704,6 +1704,20 @@ static void ww_test_fail_acquire(void)
+ #endif
+ }
+
++#ifdef CONFIG_PREEMPT_RT
++#define ww_mutex_base_lock(b) rt_mutex_lock(b)
++#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
++#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
++#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
++#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
++#else
++#define ww_mutex_base_lock(b) mutex_lock(b)
++#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
++#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
++#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
++#define ww_mutex_base_unlock(b) mutex_unlock(b)
++#endif
++
+ static void ww_test_normal(void)
+ {
+ int ret;
+@@ -1718,50 +1732,50 @@ static void ww_test_normal(void)
+
+ /* mutex_lock (and indirectly, mutex_lock_nested) */
+ o.ctx = (void *)~0UL;
+- mutex_lock(&o.base);
+- mutex_unlock(&o.base);
++ ww_mutex_base_lock(&o.base);
++ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* mutex_lock_interruptible (and *_nested) */
+ o.ctx = (void *)~0UL;
+- ret = mutex_lock_interruptible(&o.base);
++ ret = ww_mutex_base_lock_interruptible(&o.base);
+ if (!ret)
+- mutex_unlock(&o.base);
++ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* mutex_lock_killable (and *_nested) */
+ o.ctx = (void *)~0UL;
+- ret = mutex_lock_killable(&o.base);
++ ret = ww_mutex_base_lock_killable(&o.base);
+ if (!ret)
+- mutex_unlock(&o.base);
++ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* trylock, succeeding */
+ o.ctx = (void *)~0UL;
+- ret = mutex_trylock(&o.base);
++ ret = ww_mutex_base_trylock(&o.base);
+ WARN_ON(!ret);
+ if (ret)
+- mutex_unlock(&o.base);
++ ww_mutex_base_unlock(&o.base);
+ else
+ WARN_ON(1);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* trylock, failing */
+ o.ctx = (void *)~0UL;
+- mutex_lock(&o.base);
+- ret = mutex_trylock(&o.base);
++ ww_mutex_base_lock(&o.base);
++ ret = ww_mutex_base_trylock(&o.base);
+ WARN_ON(ret);
+- mutex_unlock(&o.base);
++ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+
+ /* nest_lock */
+ o.ctx = (void *)~0UL;
+- mutex_lock_nest_lock(&o.base, &t);
+- mutex_unlock(&o.base);
++ ww_mutex_base_lock_nest_lock(&o.base, &t);
++ ww_mutex_base_unlock(&o.base);
+ WARN_ON(o.ctx != (void *)~0UL);
+ }
+
+@@ -1774,7 +1788,7 @@ static void ww_test_two_contexts(void)
+ static void ww_test_diff_class(void)
+ {
+ WWAI(&t);
+-#ifdef CONFIG_DEBUG_MUTEXES
++#ifdef DEBUG_WW_MUTEXES
+ t.ww_class = NULL;
+ #endif
+ WWL(&o, &t);
+@@ -1838,7 +1852,7 @@ static void ww_test_edeadlk_normal(void)
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ o2.ctx = &t2;
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+
+@@ -1854,7 +1868,7 @@ static void ww_test_edeadlk_normal(void)
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+- mutex_unlock(&o2.base);
++ ww_mutex_base_unlock(&o2.base);
+ WWU(&o);
+
+ WWL(&o2, &t);
+@@ -1864,7 +1878,7 @@ static void ww_test_edeadlk_normal_slow(
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+@@ -1880,7 +1894,7 @@ static void ww_test_edeadlk_normal_slow(
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+- mutex_unlock(&o2.base);
++ ww_mutex_base_unlock(&o2.base);
+ WWU(&o);
+
+ ww_mutex_lock_slow(&o2, &t);
+@@ -1890,7 +1904,7 @@ static void ww_test_edeadlk_no_unlock(vo
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ o2.ctx = &t2;
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+
+@@ -1906,7 +1920,7 @@ static void ww_test_edeadlk_no_unlock(vo
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+- mutex_unlock(&o2.base);
++ ww_mutex_base_unlock(&o2.base);
+
+ WWL(&o2, &t);
+ }
+@@ -1915,7 +1929,7 @@ static void ww_test_edeadlk_no_unlock_sl
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+@@ -1931,7 +1945,7 @@ static void ww_test_edeadlk_no_unlock_sl
+
+ o2.ctx = NULL;
+ mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
+- mutex_unlock(&o2.base);
++ ww_mutex_base_unlock(&o2.base);
+
+ ww_mutex_lock_slow(&o2, &t);
+ }
+@@ -1940,7 +1954,7 @@ static void ww_test_edeadlk_acquire_more
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+@@ -1961,7 +1975,7 @@ static void ww_test_edeadlk_acquire_more
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+@@ -1982,11 +1996,11 @@ static void ww_test_edeadlk_acquire_more
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+- mutex_lock(&o3.base);
++ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
+ o3.ctx = &t2;
+
+@@ -2008,11 +2022,11 @@ static void ww_test_edeadlk_acquire_more
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+- mutex_lock(&o3.base);
++ ww_mutex_base_lock(&o3.base);
+ mutex_release(&o3.base.dep_map, _THIS_IP_);
+ o3.ctx = &t2;
+
+@@ -2033,7 +2047,7 @@ static void ww_test_edeadlk_acquire_wron
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
+@@ -2058,7 +2072,7 @@ static void ww_test_edeadlk_acquire_wron
+ {
+ int ret;
+
+- mutex_lock(&o2.base);
++ ww_mutex_base_lock(&o2.base);
+ mutex_release(&o2.base.dep_map, _THIS_IP_);
+ o2.ctx = &t2;
+
diff --git a/patches/0010-mm-slub-restructure-new-page-checks-in-___slab_alloc.patch b/patches/0010-mm-slub-restructure-new-page-checks-in-___slab_alloc.patch
index 1f837a510862..b5511a0f418e 100644
--- a/patches/0010-mm-slub-restructure-new-page-checks-in-___slab_alloc.patch
+++ b/patches/0010-mm-slub-restructure-new-page-checks-in-___slab_alloc.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2765,13 +2765,29 @@ static void *___slab_alloc(struct kmem_c
+@@ -2768,13 +2768,29 @@ static void *___slab_alloc(struct kmem_c
c->page = page;
check_new_page:
diff --git a/patches/0011-mm-slub-simplify-kmem_cache_cpu-and-tid-setup.patch b/patches/0011-mm-slub-simplify-kmem_cache_cpu-and-tid-setup.patch
index cf20487fa6f0..37f53c3424ae 100644
--- a/patches/0011-mm-slub-simplify-kmem_cache_cpu-and-tid-setup.patch
+++ b/patches/0011-mm-slub-simplify-kmem_cache_cpu-and-tid-setup.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2865,15 +2865,14 @@ static __always_inline void *slab_alloc_
+@@ -2868,15 +2868,14 @@ static __always_inline void *slab_alloc_
* reading from one cpu area. That does not matter as long
* as we end up on the original cpu again when doing the cmpxchg.
*
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Irqless object alloc/free algorithm used here depends on sequence
-@@ -3147,11 +3146,8 @@ static __always_inline void do_slab_free
+@@ -3150,11 +3149,8 @@ static __always_inline void do_slab_free
* data is retrieved via this pointer. If we are on the same cpu
* during the cmpxchg then the free will succeed.
*/
diff --git a/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch b/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
index c833fcf3b0e7..73ca5d4db92c 100644
--- a/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
+++ b/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2653,7 +2653,7 @@ static inline void *get_freelist(struct
+@@ -2656,7 +2656,7 @@ static inline void *get_freelist(struct
* we need to allocate a new slab. This is the slowest path since it involves
* a call to the page allocator and the setup of a new slab.
*
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
-@@ -2661,9 +2661,11 @@ static void *___slab_alloc(struct kmem_c
+@@ -2664,9 +2664,11 @@ static void *___slab_alloc(struct kmem_c
{
void *freelist;
struct page *page;
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = c->page;
if (!page) {
/*
-@@ -2726,6 +2728,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2729,6 +2731,7 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return freelist;
new_slab:
-@@ -2743,14 +2746,16 @@ static void *___slab_alloc(struct kmem_c
+@@ -2746,14 +2749,16 @@ static void *___slab_alloc(struct kmem_c
goto check_new_page;
}
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (c->page)
flush_slab(s, c);
-@@ -2790,31 +2795,33 @@ static void *___slab_alloc(struct kmem_c
+@@ -2793,31 +2798,33 @@ static void *___slab_alloc(struct kmem_c
return_single:
deactivate_slab(s, page, get_freepointer(s, freelist), c);
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return p;
}
-@@ -3342,8 +3349,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3345,8 +3352,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < size; i++) {
void *object = kfence_alloc(s, s->object_size, flags);
-@@ -3364,6 +3371,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3367,6 +3374,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
*/
c->tid = next_tid(c->tid);
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
-@@ -3376,6 +3385,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3379,6 +3388,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
@@ -161,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
-@@ -3384,6 +3395,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3387,6 +3398,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* memcg and kmem_cache debug support and memory initialization.
-@@ -3393,7 +3405,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3396,7 +3408,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
slab_want_init_on_alloc(flags, s));
return i;
error:
diff --git a/patches/0013-mm-slub-do-initial-checks-in-___slab_alloc-with-irqs.patch b/patches/0013-mm-slub-do-initial-checks-in-___slab_alloc-with-irqs.patch
index ae03bd959839..06584e4fd355 100644
--- a/patches/0013-mm-slub-do-initial-checks-in-___slab_alloc-with-irqs.patch
+++ b/patches/0013-mm-slub-do-initial-checks-in-___slab_alloc-with-irqs.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2665,8 +2665,9 @@ static void *___slab_alloc(struct kmem_c
+@@ -2668,8 +2668,9 @@ static void *___slab_alloc(struct kmem_c
stat(s, ALLOC_SLOWPATH);
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!page) {
/*
* if the node is not online or has no normal memory, just
-@@ -2675,6 +2676,11 @@ static void *___slab_alloc(struct kmem_c
+@@ -2678,6 +2679,11 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto new_slab;
}
redo:
-@@ -2689,8 +2695,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2692,8 +2698,7 @@ static void *___slab_alloc(struct kmem_c
goto redo;
} else {
stat(s, ALLOC_NODE_MISMATCH);
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2699,12 +2704,15 @@ static void *___slab_alloc(struct kmem_c
+@@ -2702,12 +2707,15 @@ static void *___slab_alloc(struct kmem_c
* PFMEMALLOC but right now, we are losing the pfmemalloc
* information when the page leaves the per-cpu allocator
*/
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
freelist = c->freelist;
if (freelist)
goto load_freelist;
-@@ -2720,6 +2728,9 @@ static void *___slab_alloc(struct kmem_c
+@@ -2723,6 +2731,9 @@ static void *___slab_alloc(struct kmem_c
stat(s, ALLOC_REFILL);
load_freelist:
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* freelist is pointing to the list of objects to be used.
* page is pointing to the page from which the objects are obtained.
-@@ -2731,11 +2742,23 @@ static void *___slab_alloc(struct kmem_c
+@@ -2734,11 +2745,23 @@ static void *___slab_alloc(struct kmem_c
local_irq_restore(flags);
return freelist;
diff --git a/patches/0014-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch b/patches/0014-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
index 32003c814ae8..12c3971fc931 100644
--- a/patches/0014-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
+++ b/patches/0014-mm-slub-move-disabling-irqs-closer-to-get_partial-in.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2676,11 +2676,6 @@ static void *___slab_alloc(struct kmem_c
+@@ -2679,11 +2679,6 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(node != NUMA_NO_NODE &&
!node_isset(node, slab_nodes)))
node = NUMA_NO_NODE;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto new_slab;
}
redo:
-@@ -2721,6 +2716,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2724,6 +2719,7 @@ static void *___slab_alloc(struct kmem_c
if (!freelist) {
c->page = NULL;
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
}
-@@ -2750,12 +2746,19 @@ static void *___slab_alloc(struct kmem_c
+@@ -2753,12 +2749,19 @@ static void *___slab_alloc(struct kmem_c
goto reread_page;
}
deactivate_slab(s, page, c->freelist, c);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
local_irq_restore(flags);
-@@ -2763,6 +2766,16 @@ static void *___slab_alloc(struct kmem_c
+@@ -2766,6 +2769,16 @@ static void *___slab_alloc(struct kmem_c
goto redo;
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
freelist = get_partial(s, gfpflags, node, &page);
if (freelist) {
c->page = page;
-@@ -2795,15 +2808,18 @@ static void *___slab_alloc(struct kmem_c
+@@ -2798,15 +2811,18 @@ static void *___slab_alloc(struct kmem_c
check_new_page:
if (kmem_cache_debug(s)) {
diff --git a/patches/0015-mm-slub-restore-irqs-around-calling-new_slab.patch b/patches/0015-mm-slub-restore-irqs-around-calling-new_slab.patch
index b34ac13ec1c5..8b51e8bea23d 100644
--- a/patches/0015-mm-slub-restore-irqs-around-calling-new_slab.patch
+++ b/patches/0015-mm-slub-restore-irqs-around-calling-new_slab.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1792,9 +1792,6 @@ static struct page *allocate_slab(struct
+@@ -1795,9 +1795,6 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
flags |= s->allocflags;
/*
-@@ -1853,8 +1850,6 @@ static struct page *allocate_slab(struct
+@@ -1856,8 +1853,6 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!page)
return NULL;
-@@ -2782,16 +2777,17 @@ static void *___slab_alloc(struct kmem_c
+@@ -2785,16 +2780,17 @@ static void *___slab_alloc(struct kmem_c
goto check_new_page;
}
diff --git a/patches/0016-mm-slub-validate-slab-from-partial-list-or-page-allo.patch b/patches/0016-mm-slub-validate-slab-from-partial-list-or-page-allo.patch
index f6b3aa9ee348..deb450363a76 100644
--- a/patches/0016-mm-slub-validate-slab-from-partial-list-or-page-allo.patch
+++ b/patches/0016-mm-slub-validate-slab-from-partial-list-or-page-allo.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2772,10 +2772,8 @@ static void *___slab_alloc(struct kmem_c
+@@ -2775,10 +2775,8 @@ static void *___slab_alloc(struct kmem_c
lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page);
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
put_cpu_ptr(s->cpu_slab);
-@@ -2788,9 +2786,6 @@ static void *___slab_alloc(struct kmem_c
+@@ -2791,9 +2789,6 @@ static void *___slab_alloc(struct kmem_c
}
local_irq_save(flags);
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
-@@ -2799,14 +2794,12 @@ static void *___slab_alloc(struct kmem_c
+@@ -2802,14 +2797,12 @@ static void *___slab_alloc(struct kmem_c
page->freelist = NULL;
stat(s, ALLOC_SLAB);
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
goto new_slab;
} else {
-@@ -2825,10 +2818,18 @@ static void *___slab_alloc(struct kmem_c
+@@ -2828,10 +2821,18 @@ static void *___slab_alloc(struct kmem_c
*/
goto return_single;
diff --git a/patches/0017-mm-slub-check-new-pages-with-restored-irqs.patch b/patches/0017-mm-slub-check-new-pages-with-restored-irqs.patch
index e67ba840d6c3..b63ef4e8f914 100644
--- a/patches/0017-mm-slub-check-new-pages-with-restored-irqs.patch
+++ b/patches/0017-mm-slub-check-new-pages-with-restored-irqs.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!PageSlab(page)) {
slab_err(s, page, "Not a valid slab page");
return 0;
-@@ -2772,10 +2770,10 @@ static void *___slab_alloc(struct kmem_c
+@@ -2775,10 +2773,10 @@ static void *___slab_alloc(struct kmem_c
lockdep_assert_irqs_disabled();
freelist = get_partial(s, gfpflags, node, &page);
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
put_cpu_ptr(s->cpu_slab);
page = new_slab(s, gfpflags, node);
c = get_cpu_ptr(s->cpu_slab);
-@@ -2785,7 +2783,6 @@ static void *___slab_alloc(struct kmem_c
+@@ -2788,7 +2786,6 @@ static void *___slab_alloc(struct kmem_c
return NULL;
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No other reference to the page yet so we can
* muck around with it freely without cmpxchg
-@@ -2800,7 +2797,6 @@ static void *___slab_alloc(struct kmem_c
+@@ -2803,7 +2800,6 @@ static void *___slab_alloc(struct kmem_c
if (kmem_cache_debug(s)) {
if (!alloc_debug_processing(s, page, freelist, addr)) {
/* Slab failed checks. Next slab needed */
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto new_slab;
} else {
/*
-@@ -2818,6 +2814,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2821,6 +2817,7 @@ static void *___slab_alloc(struct kmem_c
*/
goto return_single;
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(c->page))
flush_slab(s, c);
c->page = page;
-@@ -2826,6 +2823,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2829,6 +2826,7 @@ static void *___slab_alloc(struct kmem_c
return_single:
diff --git a/patches/0018-mm-slub-stop-disabling-irqs-around-get_partial.patch b/patches/0018-mm-slub-stop-disabling-irqs-around-get_partial.patch
index 8937456ce871..36de2b10c616 100644
--- a/patches/0018-mm-slub-stop-disabling-irqs-around-get_partial.patch
+++ b/patches/0018-mm-slub-stop-disabling-irqs-around-get_partial.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1993,11 +1993,12 @@ static inline bool pfmemalloc_match(stru
+@@ -1996,11 +1996,12 @@ static inline bool pfmemalloc_match(stru
* Try to allocate a partial slab from a specific node.
*/
static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int objects;
/*
-@@ -2009,11 +2010,11 @@ static void *get_partial_node(struct kme
+@@ -2012,11 +2013,11 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
t = acquire_slab(s, n, page, object == NULL, &objects);
-@@ -2034,7 +2035,7 @@ static void *get_partial_node(struct kme
+@@ -2037,7 +2038,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object;
}
-@@ -2749,8 +2750,10 @@ static void *___slab_alloc(struct kmem_c
+@@ -2752,8 +2753,10 @@ static void *___slab_alloc(struct kmem_c
local_irq_restore(flags);
goto reread_page;
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = c->page = slub_percpu_partial(c);
slub_set_percpu_partial(c, page);
-@@ -2759,18 +2762,9 @@ static void *___slab_alloc(struct kmem_c
+@@ -2762,18 +2765,9 @@ static void *___slab_alloc(struct kmem_c
goto redo;
}
diff --git a/patches/0019-mm-slub-move-reset-of-c-page-and-freelist-out-of-dea.patch b/patches/0019-mm-slub-move-reset-of-c-page-and-freelist-out-of-dea.patch
index 7a361ec2ddb0..791c9e50d519 100644
--- a/patches/0019-mm-slub-move-reset-of-c-page-and-freelist-out-of-dea.patch
+++ b/patches/0019-mm-slub-move-reset-of-c-page-and-freelist-out-of-dea.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2192,10 +2192,13 @@ static void init_kmem_cache_cpus(struct
+@@ -2195,10 +2195,13 @@ static void init_kmem_cache_cpus(struct
}
/*
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
-@@ -2324,9 +2327,6 @@ static void deactivate_slab(struct kmem_
+@@ -2327,9 +2330,6 @@ static void deactivate_slab(struct kmem_
discard_slab(s, page);
stat(s, FREE_SLAB);
}
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2451,10 +2451,16 @@ static void put_cpu_partial(struct kmem_
+@@ -2454,10 +2454,16 @@ static void put_cpu_partial(struct kmem_
static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
{
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2739,7 +2745,10 @@ static void *___slab_alloc(struct kmem_c
+@@ -2742,7 +2748,10 @@ static void *___slab_alloc(struct kmem_c
local_irq_restore(flags);
goto reread_page;
}
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
new_slab:
-@@ -2818,11 +2827,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2821,11 +2830,7 @@ static void *___slab_alloc(struct kmem_c
return_single:
local_irq_save(flags);
diff --git a/patches/0020-mm-slub-make-locking-in-deactivate_slab-irq-safe.patch b/patches/0020-mm-slub-make-locking-in-deactivate_slab-irq-safe.patch
index 5774967749ea..678355c7d274 100644
--- a/patches/0020-mm-slub-make-locking-in-deactivate_slab-irq-safe.patch
+++ b/patches/0020-mm-slub-make-locking-in-deactivate_slab-irq-safe.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2206,6 +2206,7 @@ static void deactivate_slab(struct kmem_
+@@ -2209,6 +2209,7 @@ static void deactivate_slab(struct kmem_
enum slab_modes l = M_NONE, m = M_NONE;
void *nextfree, *freelist_iter, *freelist_tail;
int tail = DEACTIVATE_TO_HEAD;
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct page new;
struct page old;
-@@ -2281,7 +2282,7 @@ static void deactivate_slab(struct kmem_
+@@ -2284,7 +2285,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2292,7 +2293,7 @@ static void deactivate_slab(struct kmem_
+@@ -2295,7 +2296,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2309,14 +2310,14 @@ static void deactivate_slab(struct kmem_
+@@ -2312,14 +2313,14 @@ static void deactivate_slab(struct kmem_
}
l = m;
diff --git a/patches/0021-mm-slub-call-deactivate_slab-without-disabling-irqs.patch b/patches/0021-mm-slub-call-deactivate_slab-without-disabling-irqs.patch
index db649628420f..f1506c91c968 100644
--- a/patches/0021-mm-slub-call-deactivate_slab-without-disabling-irqs.patch
+++ b/patches/0021-mm-slub-call-deactivate_slab-without-disabling-irqs.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2749,8 +2749,8 @@ static void *___slab_alloc(struct kmem_c
+@@ -2752,8 +2752,8 @@ static void *___slab_alloc(struct kmem_c
freelist = c->freelist;
c->page = NULL;
c->freelist = NULL;
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
new_slab:
-@@ -2818,18 +2818,32 @@ static void *___slab_alloc(struct kmem_c
+@@ -2821,18 +2821,32 @@ static void *___slab_alloc(struct kmem_c
*/
goto return_single;
diff --git a/patches/0022-mm-slub-move-irq-control-into-unfreeze_partials.patch b/patches/0022-mm-slub-move-irq-control-into-unfreeze_partials.patch
index bb6998f56ce0..6a79f28bb712 100644
--- a/patches/0022-mm-slub-move-irq-control-into-unfreeze_partials.patch
+++ b/patches/0022-mm-slub-move-irq-control-into-unfreeze_partials.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2333,9 +2333,8 @@ static void deactivate_slab(struct kmem_
+@@ -2336,9 +2336,8 @@ static void deactivate_slab(struct kmem_
/*
* Unfreeze all the cpu partial slabs.
*
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static void unfreeze_partials(struct kmem_cache *s,
struct kmem_cache_cpu *c)
-@@ -2343,6 +2342,9 @@ static void unfreeze_partials(struct kme
+@@ -2346,6 +2345,9 @@ static void unfreeze_partials(struct kme
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while ((page = slub_percpu_partial(c))) {
struct page new;
-@@ -2395,6 +2397,8 @@ static void unfreeze_partials(struct kme
+@@ -2398,6 +2400,8 @@ static void unfreeze_partials(struct kme
discard_slab(s, page);
stat(s, FREE_SLAB);
}
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_SLUB_CPU_PARTIAL */
}
-@@ -2422,14 +2426,11 @@ static void put_cpu_partial(struct kmem_
+@@ -2425,14 +2429,11 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > slub_cpu_partial(s)) {
diff --git a/patches/0023-mm-slub-discard-slabs-in-unfreeze_partials-without-i.patch b/patches/0023-mm-slub-discard-slabs-in-unfreeze_partials-without-i.patch
index 01be57724761..58d0c6a18180 100644
--- a/patches/0023-mm-slub-discard-slabs-in-unfreeze_partials-without-i.patch
+++ b/patches/0023-mm-slub-discard-slabs-in-unfreeze_partials-without-i.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2389,6 +2389,8 @@ static void unfreeze_partials(struct kme
+@@ -2392,6 +2392,8 @@ static void unfreeze_partials(struct kme
if (n)
spin_unlock(&n->list_lock);
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (discard_page) {
page = discard_page;
discard_page = discard_page->next;
-@@ -2398,7 +2400,6 @@ static void unfreeze_partials(struct kme
+@@ -2401,7 +2403,6 @@ static void unfreeze_partials(struct kme
stat(s, FREE_SLAB);
}
diff --git a/patches/0024-mm-slub-detach-whole-partial-list-at-once-in-unfreez.patch b/patches/0024-mm-slub-detach-whole-partial-list-at-once-in-unfreez.patch
index e13d4d0af5c1..4b36f4376cb4 100644
--- a/patches/0024-mm-slub-detach-whole-partial-list-at-once-in-unfreez.patch
+++ b/patches/0024-mm-slub-detach-whole-partial-list-at-once-in-unfreez.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2341,16 +2341,20 @@ static void unfreeze_partials(struct kme
+@@ -2344,16 +2344,20 @@ static void unfreeze_partials(struct kme
{
#ifdef CONFIG_SLUB_CPU_PARTIAL
struct kmem_cache_node *n = NULL, *n2 = NULL;
diff --git a/patches/0025-mm-slub-separate-detaching-of-partial-list-in-unfree.patch b/patches/0025-mm-slub-separate-detaching-of-partial-list-in-unfree.patch
index bde2bb258cbc..363cfdd1c429 100644
--- a/patches/0025-mm-slub-separate-detaching-of-partial-list-in-unfree.patch
+++ b/patches/0025-mm-slub-separate-detaching-of-partial-list-in-unfree.patch
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2330,25 +2330,15 @@ static void deactivate_slab(struct kmem_
+@@ -2333,25 +2333,15 @@ static void deactivate_slab(struct kmem_
}
}
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (partial_page) {
struct page new;
struct page old;
-@@ -2403,10 +2393,45 @@ static void unfreeze_partials(struct kme
+@@ -2406,10 +2396,45 @@ static void unfreeze_partials(struct kme
discard_slab(s, page);
stat(s, FREE_SLAB);
}
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
* partial page slot if available.
-@@ -2435,7 +2460,7 @@ static void put_cpu_partial(struct kmem_
+@@ -2438,7 +2463,7 @@ static void put_cpu_partial(struct kmem_
* partial array is full. Move the existing
* set to the per node partial list.
*/
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2470,11 +2495,6 @@ static inline void flush_slab(struct kme
+@@ -2473,11 +2498,6 @@ static inline void flush_slab(struct kme
stat(s, CPUSLAB_FLUSH);
}
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
{
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
-@@ -2482,14 +2502,23 @@ static inline void __flush_cpu_slab(stru
+@@ -2485,14 +2505,23 @@ static inline void __flush_cpu_slab(stru
if (c->page)
flush_slab(s, c);
diff --git a/patches/0026-mm-slub-only-disable-irq-with-spin_lock-in-__unfreez.patch b/patches/0026-mm-slub-only-disable-irq-with-spin_lock-in-__unfreez.patch
index 074d47518f9b..6f52a063a54b 100644
--- a/patches/0026-mm-slub-only-disable-irq-with-spin_lock-in-__unfreez.patch
+++ b/patches/0026-mm-slub-only-disable-irq-with-spin_lock-in-__unfreez.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2335,9 +2335,7 @@ static void __unfreeze_partials(struct k
+@@ -2338,9 +2338,7 @@ static void __unfreeze_partials(struct k
{
struct kmem_cache_node *n = NULL, *n2 = NULL;
struct page *page, *discard_page = NULL;
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (partial_page) {
struct page new;
-@@ -2349,10 +2347,10 @@ static void __unfreeze_partials(struct k
+@@ -2352,10 +2350,10 @@ static void __unfreeze_partials(struct k
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
do {
-@@ -2381,9 +2379,7 @@ static void __unfreeze_partials(struct k
+@@ -2384,9 +2382,7 @@ static void __unfreeze_partials(struct k
}
if (n)
diff --git a/patches/0027-mm-slub-don-t-disable-irqs-in-slub_cpu_dead.patch b/patches/0027-mm-slub-don-t-disable-irqs-in-slub_cpu_dead.patch
index ea4a9f727f89..cf715091134f 100644
--- a/patches/0027-mm-slub-don-t-disable-irqs-in-slub_cpu_dead.patch
+++ b/patches/0027-mm-slub-don-t-disable-irqs-in-slub_cpu_dead.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2537,14 +2537,10 @@ static void flush_all(struct kmem_cache
+@@ -2540,14 +2540,10 @@ static void flush_all(struct kmem_cache
static int slub_cpu_dead(unsigned int cpu)
{
struct kmem_cache *s;
diff --git a/patches/0028-mm-slab-make-flush_slab-possible-to-call-with-irqs-e.patch b/patches/0028-mm-slab-make-flush_slab-possible-to-call-with-irqs-e.patch
index e239e13e4e25..dd475caaf163 100644
--- a/patches/0028-mm-slab-make-flush_slab-possible-to-call-with-irqs-e.patch
+++ b/patches/0028-mm-slab-make-flush_slab-possible-to-call-with-irqs-e.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2477,16 +2477,28 @@ static void put_cpu_partial(struct kmem_
+@@ -2480,16 +2480,28 @@ static void put_cpu_partial(struct kmem_
#endif /* CONFIG_SLUB_CPU_PARTIAL */
}
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, CPUSLAB_FLUSH);
}
-@@ -2496,7 +2508,7 @@ static inline void __flush_cpu_slab(stru
+@@ -2499,7 +2511,7 @@ static inline void __flush_cpu_slab(stru
struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
if (c->page)
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unfreeze_partials_cpu(s, c);
}
-@@ -2512,7 +2524,7 @@ static void flush_cpu_slab(void *d)
+@@ -2515,7 +2527,7 @@ static void flush_cpu_slab(void *d)
struct kmem_cache_cpu *c = this_cpu_ptr(s->cpu_slab);
if (c->page)
diff --git a/patches/0029-mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch b/patches/0029-mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
index b93bc5c96785..0907b8849131 100644
--- a/patches/0029-mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
+++ b/patches/0029-mm-slub-Move-flush_cpu_slab-invocations-__free_slab-.patch
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2513,33 +2513,79 @@ static inline void __flush_cpu_slab(stru
+@@ -2516,33 +2516,79 @@ static inline void __flush_cpu_slab(stru
unfreeze_partials_cpu(s, c);
}
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -4071,7 +4117,7 @@ int __kmem_cache_shutdown(struct kmem_ca
+@@ -4074,7 +4120,7 @@ int __kmem_cache_shutdown(struct kmem_ca
int node;
struct kmem_cache_node *n;
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Attempt to free all objects */
for_each_kmem_cache_node(s, node, n) {
free_partial(s, n);
-@@ -4347,7 +4393,7 @@ EXPORT_SYMBOL(kfree);
+@@ -4350,7 +4396,7 @@ EXPORT_SYMBOL(kfree);
* being allocated from last increasing the chance that the last objects
* are freed in them.
*/
@@ -145,7 +145,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int node;
int i;
-@@ -4359,7 +4405,6 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -4362,7 +4408,6 @@ int __kmem_cache_shrink(struct kmem_cach
unsigned long flags;
int ret = 0;
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_kmem_cache_node(s, node, n) {
INIT_LIST_HEAD(&discard);
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
-@@ -4409,13 +4454,21 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -4412,13 +4457,21 @@ int __kmem_cache_shrink(struct kmem_cach
return ret;
}
diff --git a/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch b/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
index 0be4892a6c11..20c099cc8277 100644
--- a/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
+++ b/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
-@@ -4054,9 +4066,10 @@ static void list_slab_objects(struct kme
+@@ -4057,9 +4069,10 @@ static void list_slab_objects(struct kme
void *addr = page_address(page);
unsigned long *map;
void *p;
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
-@@ -4067,7 +4080,7 @@ static void list_slab_objects(struct kme
+@@ -4070,7 +4083,7 @@ static void list_slab_objects(struct kme
}
}
put_map(map);
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -4799,8 +4812,9 @@ static void validate_slab(struct kmem_ca
+@@ -4802,8 +4815,9 @@ static void validate_slab(struct kmem_ca
{
void *p;
void *addr = page_address(page);
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
goto unlock;
-@@ -4815,7 +4829,7 @@ static void validate_slab(struct kmem_ca
+@@ -4818,7 +4832,7 @@ static void validate_slab(struct kmem_ca
break;
}
unlock:
diff --git a/patches/0033-mm-slub-protect-put_cpu_partial-with-disabled-irqs-i.patch b/patches/0033-mm-slub-protect-put_cpu_partial-with-disabled-irqs-i.patch
index d74723c0ac4a..7ecc48a04726 100644
--- a/patches/0033-mm-slub-protect-put_cpu_partial-with-disabled-irqs-i.patch
+++ b/patches/0033-mm-slub-protect-put_cpu_partial-with-disabled-irqs-i.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -2003,7 +2003,12 @@ static inline void *acquire_slab(struct
+@@ -2006,7 +2006,12 @@ static inline void *acquire_slab(struct
return freelist;
}
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
/*
-@@ -2437,14 +2442,6 @@ static void unfreeze_partials_cpu(struct
+@@ -2440,14 +2445,6 @@ static void unfreeze_partials_cpu(struct
__unfreeze_partials(s, partial_page);
}
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
* partial page slot if available.
-@@ -2454,46 +2451,56 @@ static inline void unfreeze_partials_cpu
+@@ -2457,46 +2454,56 @@ static inline void unfreeze_partials_cpu
*/
static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
{
diff --git a/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch b/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
index d9058af75240..22c20bbbe03a 100644
--- a/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
+++ b/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SLUB_DEBUG_ON
DEFINE_STATIC_KEY_TRUE(slub_debug_enabled);
-@@ -2812,7 +2832,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2815,7 +2835,7 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(!pfmemalloc_match(page, gfpflags)))
goto deactivate_slab;
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_save(flags);
if (unlikely(page != c->page)) {
local_irq_restore(flags);
-@@ -2871,7 +2891,8 @@ static void *___slab_alloc(struct kmem_c
+@@ -2874,7 +2894,8 @@ static void *___slab_alloc(struct kmem_c
}
if (unlikely(!slub_percpu_partial(c))) {
local_irq_restore(flags);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
page = c->page = slub_percpu_partial(c);
-@@ -2887,9 +2908,9 @@ static void *___slab_alloc(struct kmem_c
+@@ -2890,9 +2911,9 @@ static void *___slab_alloc(struct kmem_c
if (freelist)
goto check_new_page;
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!page)) {
slab_out_of_memory(s, gfpflags, node);
-@@ -2972,12 +2993,12 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2975,12 +2996,12 @@ static void *__slab_alloc(struct kmem_ca
* cpu before disabling preemption. Need to reload cpu area
* pointer.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
return p;
}
-@@ -3506,7 +3527,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3509,7 +3530,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_disable();
for (i = 0; i < size; i++) {
-@@ -3552,7 +3573,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3555,7 +3576,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* memcg and kmem_cache debug support and memory initialization.
-@@ -3562,7 +3583,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3565,7 +3586,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
slab_want_init_on_alloc(flags, s));
return i;
error:
diff --git a/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch b/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
index b58fa605569b..a976674cc793 100644
--- a/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
+++ b/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* SLUB assigns one slab for allocation to each processor.
* Allocations only occur from these slabs called cpu slabs.
-@@ -2228,9 +2266,13 @@ static inline void note_cmpxchg_failure(
+@@ -2231,9 +2269,13 @@ static inline void note_cmpxchg_failure(
static void init_kmem_cache_cpus(struct kmem_cache *s)
{
int cpu;
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2441,10 +2483,10 @@ static void unfreeze_partials(struct kme
+@@ -2444,10 +2486,10 @@ static void unfreeze_partials(struct kme
struct page *partial_page;
unsigned long flags;
@@ -165,7 +165,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (partial_page)
__unfreeze_partials(s, partial_page);
-@@ -2477,7 +2519,7 @@ static void put_cpu_partial(struct kmem_
+@@ -2480,7 +2522,7 @@ static void put_cpu_partial(struct kmem_
int pages = 0;
int pobjects = 0;
@@ -174,7 +174,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
oldpage = this_cpu_read(s->cpu_slab->partial);
-@@ -2505,7 +2547,7 @@ static void put_cpu_partial(struct kmem_
+@@ -2508,7 +2550,7 @@ static void put_cpu_partial(struct kmem_
this_cpu_write(s->cpu_slab->partial, page);
@@ -183,7 +183,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (page_to_unfreeze) {
__unfreeze_partials(s, page_to_unfreeze);
-@@ -2529,7 +2571,7 @@ static inline void flush_slab(struct kme
+@@ -2532,7 +2574,7 @@ static inline void flush_slab(struct kme
struct page *page;
if (lock)
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
freelist = c->freelist;
page = c->page;
-@@ -2539,7 +2581,7 @@ static inline void flush_slab(struct kme
+@@ -2542,7 +2584,7 @@ static inline void flush_slab(struct kme
c->tid = next_tid(c->tid);
if (lock)
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (page)
deactivate_slab(s, page, freelist);
-@@ -2833,9 +2875,9 @@ static void *___slab_alloc(struct kmem_c
+@@ -2836,9 +2878,9 @@ static void *___slab_alloc(struct kmem_c
goto deactivate_slab;
/* must check again c->page in case we got preempted and it changed */
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto reread_page;
}
freelist = c->freelist;
-@@ -2846,7 +2888,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2849,7 +2891,7 @@ static void *___slab_alloc(struct kmem_c
if (!freelist) {
c->page = NULL;
@@ -222,7 +222,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, DEACTIVATE_BYPASS);
goto new_slab;
}
-@@ -2855,7 +2897,11 @@ static void *___slab_alloc(struct kmem_c
+@@ -2858,7 +2900,11 @@ static void *___slab_alloc(struct kmem_c
load_freelist:
@@ -235,7 +235,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* freelist is pointing to the list of objects to be used.
-@@ -2865,39 +2911,39 @@ static void *___slab_alloc(struct kmem_c
+@@ -2868,39 +2914,39 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, CPU_PARTIAL_ALLOC);
goto redo;
}
-@@ -2950,7 +2996,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2953,7 +2999,7 @@ static void *___slab_alloc(struct kmem_c
retry_load_page:
@@ -292,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(c->page)) {
void *flush_freelist = c->freelist;
struct page *flush_page = c->page;
-@@ -2959,7 +3005,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2962,7 +3008,7 @@ static void *___slab_alloc(struct kmem_c
c->freelist = NULL;
c->tid = next_tid(c->tid);
@@ -301,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
deactivate_slab(s, flush_page, flush_freelist);
-@@ -3078,7 +3124,15 @@ static __always_inline void *slab_alloc_
+@@ -3081,7 +3127,15 @@ static __always_inline void *slab_alloc_
object = c->freelist;
page = c->page;
@@ -318,7 +318,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
object = __slab_alloc(s, gfpflags, node, addr, c);
} else {
void *next_object = get_freepointer_safe(s, object);
-@@ -3338,6 +3392,7 @@ static __always_inline void do_slab_free
+@@ -3341,6 +3395,7 @@ static __always_inline void do_slab_free
barrier();
if (likely(page == c->page)) {
@@ -326,7 +326,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void **freelist = READ_ONCE(c->freelist);
set_freepointer(s, tail_obj, freelist);
-@@ -3350,6 +3405,31 @@ static __always_inline void do_slab_free
+@@ -3353,6 +3408,31 @@ static __always_inline void do_slab_free
note_cmpxchg_failure("slab_free", s, tid);
goto redo;
}
@@ -358,7 +358,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, FREE_FASTPATH);
} else
__slab_free(s, page, head, tail_obj, cnt, addr);
-@@ -3528,7 +3608,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3531,7 +3611,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* handlers invoking normal fastpath.
*/
c = slub_get_cpu_ptr(s->cpu_slab);
@@ -367,7 +367,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < size; i++) {
void *object = kfence_alloc(s, s->object_size, flags);
-@@ -3549,7 +3629,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3552,7 +3632,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
*/
c->tid = next_tid(c->tid);
@@ -376,7 +376,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Invoking slow path likely have side-effect
-@@ -3563,7 +3643,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3566,7 +3646,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
@@ -385,7 +385,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue; /* goto for-loop */
}
-@@ -3572,7 +3652,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3575,7 +3655,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 8b36fd73f91a..6b1364508a7c 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt9
++-rt10
diff --git a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
index ea06ad3c3502..8858f108b70d 100644
--- a/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
+++ b/patches/KVM__arm_arm64__downgrade_preempt_disabled_region_to_migrate_disable.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
-@@ -768,7 +768,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -772,7 +772,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -817,7 +817,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -821,7 +821,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_user(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
continue;
}
-@@ -889,7 +889,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -893,7 +893,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, ret);
diff --git a/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch b/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
index cd938b8398a7..a05af414166a 100644
--- a/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
+++ b/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
task_numa_free(tsk, true);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4636,15 +4636,6 @@ static struct rq *finish_task_switch(str
+@@ -4638,15 +4638,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch b/patches/lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
new file mode 100644
index 000000000000..575ff85aca0e
--- /dev/null
+++ b/patches/lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
@@ -0,0 +1,115 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 13 Aug 2021 18:26:10 +0200
+Subject: [PATCH] lockdep/selftests: Avoid using
+ local_lock_{acquire|release}().
+
+The functions local_lock related functions
+ local_lock_acquire()
+ local_lock_release()
+
+are part of the internal implementation and should be avoided.
+Define the lock as DEFINE_PER_CPU so the normal local_lock() function
+can be used.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ lib/locking-selftest.c | 30 +++++++++++++++---------------
+ 1 file changed, 15 insertions(+), 15 deletions(-)
+
+--- a/lib/locking-selftest.c
++++ b/lib/locking-selftest.c
+@@ -139,7 +139,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
+
+ #endif
+
+-static local_lock_t local_A = INIT_LOCAL_LOCK(local_A);
++static DEFINE_PER_CPU(local_lock_t, local_A);
+
+ /*
+ * non-inlined runtime initializers, to let separate locks share
+@@ -1320,7 +1320,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+ # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
+ # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
+ # define I_WW(x) lockdep_reset_lock(&x.dep_map)
+-# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map)
++# define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
+ #ifdef CONFIG_RT_MUTEXES
+ # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
+ #endif
+@@ -1380,7 +1380,7 @@ static void reset_locks(void)
+ init_shared_classes();
+ raw_spin_lock_init(&raw_lock_A);
+ raw_spin_lock_init(&raw_lock_B);
+- local_lock_init(&local_A);
++ local_lock_init(this_cpu_ptr(&local_A));
+
+ ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
+ memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
+@@ -2646,8 +2646,8 @@ static void wait_context_tests(void)
+
+ static void local_lock_2(void)
+ {
+- local_lock_acquire(&local_A); /* IRQ-ON */
+- local_lock_release(&local_A);
++ local_lock(&local_A); /* IRQ-ON */
++ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+@@ -2656,18 +2656,18 @@ static void local_lock_2(void)
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
+- local_lock_release(&local_A);
++ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
++ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+ }
+
+ static void local_lock_3A(void)
+ {
+- local_lock_acquire(&local_A); /* IRQ-ON */
++ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+- local_lock_release(&local_A);
++ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+@@ -2676,18 +2676,18 @@ static void local_lock_3A(void)
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+- local_lock_release(&local_A);
++ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
++ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+ }
+
+ static void local_lock_3B(void)
+ {
+- local_lock_acquire(&local_A); /* IRQ-ON */
++ local_lock(&local_A); /* IRQ-ON */
+ spin_lock(&lock_B); /* IRQ-ON */
+ spin_unlock(&lock_B);
+- local_lock_release(&local_A);
++ local_unlock(&local_A);
+
+ HARDIRQ_ENTER();
+ spin_lock(&lock_A); /* IN-IRQ */
+@@ -2696,8 +2696,8 @@ static void local_lock_3B(void)
+
+ HARDIRQ_DISABLE();
+ spin_lock(&lock_A);
+- local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
+- local_lock_release(&local_A);
++ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
++ local_unlock(&local_A);
+ spin_unlock(&lock_A);
+ HARDIRQ_ENABLE();
+
diff --git a/patches/lockdep__disable_self-test.patch b/patches/lockdep__disable_self-test.patch
deleted file mode 100644
index 797c503af62f..000000000000
--- a/patches/lockdep__disable_self-test.patch
+++ /dev/null
@@ -1,30 +0,0 @@
-Subject: lockdep: disable self-test
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue Oct 17 16:36:18 2017 +0200
-
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-The self-test wasn't always 100% accurate for RT. We disabled a few
-tests which failed because they had a different semantic for RT. Some
-still reported false positives. Now the selftest locks up the system
-during boot and it needs to be investigated…
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- lib/Kconfig.debug | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
----
---- a/lib/Kconfig.debug
-+++ b/lib/Kconfig.debug
-@@ -1452,7 +1452,7 @@ config DEBUG_ATOMIC_SLEEP
-
- config DEBUG_LOCKING_API_SELFTESTS
- bool "Locking API boot-time self-tests"
-- depends on DEBUG_KERNEL
-+ depends on DEBUG_KERNEL && !PREEMPT_RT
- help
- Say Y here if you want the kernel to run a short self-test during
- bootup. The self-test checks whether common types of locking bugs
diff --git a/patches/lockdep__selftest__Only_do_hardirq_context_test_for_raw_spinlock.patch b/patches/lockdep__selftest__Only_do_hardirq_context_test_for_raw_spinlock.patch
deleted file mode 100644
index 6f214d1d35b0..000000000000
--- a/patches/lockdep__selftest__Only_do_hardirq_context_test_for_raw_spinlock.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-Subject: lockdep: selftest: Only do hardirq context test for raw spinlock
-From: Yong Zhang <yong.zhang@windriver.com>
-Date: Mon Apr 16 15:01:56 2012 +0800
-
-From: Yong Zhang <yong.zhang@windriver.com>
-
-On -rt there is no softirq context any more and rwlock is sleepable,
-disable softirq context test and rwlock+irq test.
-
-Signed-off-by: Yong Zhang <yong.zhang0@gmail.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Yong Zhang <yong.zhang@windriver.com>
-Link: http://lkml.kernel.org/r/1334559716-18447-3-git-send-email-yong.zhang0@gmail.com
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- lib/locking-selftest.c | 23 +++++++++++++++++++++++
- 1 file changed, 23 insertions(+)
----
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -2886,6 +2886,7 @@ void locking_selftest(void)
-
- printk(" --------------------------------------------------------------------------\n");
-
-+#ifndef CONFIG_PREEMPT_RT
- /*
- * irq-context testcases:
- */
-@@ -2900,6 +2901,28 @@ void locking_selftest(void)
- DO_TESTCASE_6x2x2RW("irq read-recursion #2", irq_read_recursion2);
- DO_TESTCASE_6x2x2RW("irq read-recursion #3", irq_read_recursion3);
-
-+#else
-+ /* On -rt, we only do hardirq context test for raw spinlock */
-+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12);
-+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21);
-+
-+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12);
-+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21);
-+
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321);
-+
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312);
-+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321);
-+#endif
- ww_tests();
-
- force_read_lock_recursive = 0;
diff --git a/patches/lockdep__selftest__fix_warnings_due_to_missing_PREEMPT_RT_conditionals.patch b/patches/lockdep__selftest__fix_warnings_due_to_missing_PREEMPT_RT_conditionals.patch
deleted file mode 100644
index 79d835aa2b4f..000000000000
--- a/patches/lockdep__selftest__fix_warnings_due_to_missing_PREEMPT_RT_conditionals.patch
+++ /dev/null
@@ -1,148 +0,0 @@
-Subject: lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals
-From: Josh Cartwright <josh.cartwright@ni.com>
-Date: Wed Jan 28 13:08:45 2015 -0600
-
-From: Josh Cartwright <josh.cartwright@ni.com>
-
-"lockdep: Selftest: Only do hardirq context test for raw spinlock"
-disabled the execution of certain tests with PREEMPT_RT, but did
-not prevent the tests from still being defined. This leads to warnings
-like:
-
- ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_12' defined but not used [-Wunused-function]
- ./linux/lib/locking-selftest.c:574:1: warning: 'irqsafe1_hard_rlock_21' defined but not used [-Wunused-function]
- ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_12' defined but not used [-Wunused-function]
- ./linux/lib/locking-selftest.c:577:1: warning: 'irqsafe1_hard_wlock_21' defined but not used [-Wunused-function]
- ./linux/lib/locking-selftest.c:580:1: warning: 'irqsafe1_soft_spin_12' defined but not used [-Wunused-function]
- ...
-
-Fixed by wrapping the test definitions in #ifndef CONFIG_PREEMPT_RT
-conditionals.
-
-
-Signed-off-by: Josh Cartwright <josh.cartwright@ni.com>
-Signed-off-by: Xander Huff <xander.huff@ni.com>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Gratian Crisan <gratian.crisan@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- lib/locking-selftest.c | 28 ++++++++++++++++++++++++++++
- 1 file changed, 28 insertions(+)
----
---- a/lib/locking-selftest.c
-+++ b/lib/locking-selftest.c
-@@ -796,6 +796,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-
-@@ -811,9 +813,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
-
-+#ifndef CONFIG_PREEMPT_RT
- /*
- * Enabling hardirqs with a softirq-safe lock held:
- */
-@@ -846,6 +851,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
- #undef E1
- #undef E2
-
-+#endif
-+
- /*
- * Enabling irqs with an irq-safe lock held:
- */
-@@ -869,6 +876,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-
-@@ -884,6 +893,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
-
-@@ -915,6 +926,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-
-@@ -930,6 +943,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
- #undef E3
-@@ -963,6 +978,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
- #include "locking-selftest-spin-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- #include "locking-selftest-rlock-hardirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-
-@@ -978,10 +995,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
- #include "locking-selftest-wlock-softirq.h"
- GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
-
-+#endif
-+
- #undef E1
- #undef E2
- #undef E3
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- /*
- * read-lock / write-lock irq inversion.
- *
-@@ -1171,6 +1192,11 @@ GENERATE_PERMUTATIONS_3_EVENTS(W1W2_R2R3
- #undef E1
- #undef E2
- #undef E3
-+
-+#endif
-+
-+#ifndef CONFIG_PREEMPT_RT
-+
- /*
- * read-lock / write-lock recursion that is actually safe.
- */
-@@ -1217,6 +1243,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
- #undef E2
- #undef E3
-
-+#endif
-+
- /*
- * read-lock / write-lock recursion that is unsafe.
- */
diff --git a/patches/locking-local_lock--Add-PREEMPT_RT-support.patch b/patches/locking-local_lock--Add-PREEMPT_RT-support.patch
new file mode 100644
index 000000000000..6e4bf800a988
--- /dev/null
+++ b/patches/locking-local_lock--Add-PREEMPT_RT-support.patch
@@ -0,0 +1,79 @@
+Subject: locking/local_lock: Add PREEMPT_RT support
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Aug 2021 10:35:01 +0200
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+On PREEMPT_RT enabled kernels local_lock maps to a per CPU 'sleeping'
+spinlock which protects the critical section while staying preemptible. CPU
+locality is established by disabling migration.
+
+Provide the necessary types and macros to substitute the non-RT variant.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+V5: New patch
+---
+ include/linux/local_lock_internal.h | 48 ++++++++++++++++++++++++++++++++++++
+ 1 file changed, 48 insertions(+)
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -6,6 +6,8 @@
+ #include <linux/percpu-defs.h>
+ #include <linux/lockdep.h>
+
++#ifndef CONFIG_PREEMPT_RT
++
+ typedef struct {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+@@ -95,3 +97,49 @@ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+ local_irq_restore(flags); \
+ } while (0)
++
++#else /* !CONFIG_PREEMPT_RT */
++
++/*
++ * On PREEMPT_RT local_lock maps to a per CPU spinlock which protects the
++ * critical section while staying preemptible.
++ */
++typedef struct {
++ spinlock_t lock;
++} local_lock_t;
++
++#define INIT_LOCAL_LOCK(lockname) { \
++ __LOCAL_SPIN_LOCK_UNLOCKED((lockname).lock) \
++ }
++
++#define __local_lock_init(l) \
++ do { \
++ local_spin_lock_init(&(l)->lock); \
++ } while (0)
++
++#define __local_lock(__lock) \
++ do { \
++ migrate_disable(); \
++ spin_lock(&(this_cpu_ptr((__lock)))->lock); \
++ } while (0)
++
++#define __local_lock_irq(lock) __local_lock(lock)
++
++#define __local_lock_irqsave(lock, flags) \
++ do { \
++ typecheck(unsigned long, flags); \
++ flags = 0; \
++ __local_lock(lock); \
++ } while (0)
++
++#define __local_unlock(__lock) \
++ do { \
++ spin_unlock(&(this_cpu_ptr((__lock)))->lock); \
++ migrate_enable(); \
++ } while (0)
++
++#define __local_unlock_irq(lock) __local_unlock(lock)
++
++#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
++
++#endif /* CONFIG_PREEMPT_RT */
diff --git a/patches/locking-local_lock--Add-missing-owner-initialization.patch b/patches/locking-local_lock--Add-missing-owner-initialization.patch
new file mode 100644
index 000000000000..a81ff1f5400a
--- /dev/null
+++ b/patches/locking-local_lock--Add-missing-owner-initialization.patch
@@ -0,0 +1,87 @@
+Subject: locking/local_lock: Add missing owner initialization
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Aug 2021 16:29:08 +0200
+
+If CONFIG_DEBUG_LOCK_ALLOC is enabled then local_lock_t has a 'owner'
+member which is checked for consistency, but nothing initialized it to
+zero explicitly.
+
+The static initializer does so implicit, and the run time allocated per CPU
+storage is usually zero initialized as well, but relying on that is not
+really good practice.
+
+Fixes: 91710728d172 ("locking: Introduce local_lock()")
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+V5: New patch
+---
+ include/linux/local_lock_internal.h | 42 +++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 19 deletions(-)
+
+--- a/include/linux/local_lock_internal.h
++++ b/include/linux/local_lock_internal.h
+@@ -14,29 +14,14 @@ typedef struct {
+ } local_lock_t;
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define LL_DEP_MAP_INIT(lockname) \
++# define LOCAL_LOCK_DEBUG_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+- .lock_type = LD_LOCK_PERCPU, \
+- }
+-#else
+-# define LL_DEP_MAP_INIT(lockname)
+-#endif
++ .lock_type = LD_LOCK_PERCPU, \
++ }, \
++ .owner = NULL,
+
+-#define INIT_LOCAL_LOCK(lockname) { LL_DEP_MAP_INIT(lockname) }
+-
+-#define __local_lock_init(lock) \
+-do { \
+- static struct lock_class_key __key; \
+- \
+- debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
+- lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, 0, \
+- LD_WAIT_CONFIG, LD_WAIT_INV, \
+- LD_LOCK_PERCPU); \
+-} while (0)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ static inline void local_lock_acquire(local_lock_t *l)
+ {
+ lock_map_acquire(&l->dep_map);
+@@ -51,11 +36,30 @@ static inline void local_lock_release(lo
+ lock_map_release(&l->dep_map);
+ }
+
++static inline void local_lock_debug_init(local_lock_t *l)
++{
++ l->owner = NULL;
++}
+ #else /* CONFIG_DEBUG_LOCK_ALLOC */
++# define LOCAL_LOCK_DEBUG_INIT(lockname)
+ static inline void local_lock_acquire(local_lock_t *l) { }
+ static inline void local_lock_release(local_lock_t *l) { }
++static inline void local_lock_debug_init(local_lock_t *l) { }
+ #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
+
++#define INIT_LOCAL_LOCK(lockname) { LOCAL_LOCK_DEBUG_INIT(lockname) }
++
++#define __local_lock_init(lock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ debug_check_no_locks_freed((void *)lock, sizeof(*lock));\
++ lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \
++ 0, LD_WAIT_CONFIG, LD_WAIT_INV, \
++ LD_LOCK_PERCPU); \
++ local_lock_debug_init(lock); \
++} while (0)
++
+ #define __local_lock(lock) \
+ do { \
+ preempt_disable(); \
diff --git a/patches/locking-rtmutex--Set-proper-wait-context-for-lockdep.patch b/patches/locking-rtmutex--Set-proper-wait-context-for-lockdep.patch
new file mode 100644
index 000000000000..d50055af19e3
--- /dev/null
+++ b/patches/locking-rtmutex--Set-proper-wait-context-for-lockdep.patch
@@ -0,0 +1,57 @@
+Subject: locking/rtmutex: Set proper wait context for lockdep
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Aug 2021 23:59:51 +0200
+
+RT mutexes belong to the LD_WAIT_SLEEP class. Make them so.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+V5: New patch
+---
+ include/linux/rtmutex.h | 19 ++++++++++++-------
+ kernel/locking/rtmutex.c | 2 +-
+ 2 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -52,17 +52,22 @@ do { \
+ } while (0)
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+-#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+- , .dep_map = { .name = #mutexname }
++#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
++ .dep_map = { \
++ .name = #mutexname, \
++ .wait_type_inner = LD_WAIT_SLEEP, \
++ }
+ #else
+ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
+ #endif
+
+-#define __RT_MUTEX_INITIALIZER(mutexname) \
+- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+- , .waiters = RB_ROOT_CACHED \
+- , .owner = NULL \
+- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
++#define __RT_MUTEX_INITIALIZER(mutexname) \
++{ \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock), \
++ .waiters = RB_ROOT_CACHED, \
++ .owner = NULL, \
++ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
++}
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1556,7 +1556,7 @@ void __sched __rt_mutex_init(struct rt_m
+ struct lock_class_key *key)
+ {
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+- lockdep_init_map(&lock->dep_map, name, key, 0);
++ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
+
+ __rt_mutex_basic_init(lock);
+ }
diff --git a/patches/locking-spinlock-rt--Prepare-for-RT-local_lock.patch b/patches/locking-spinlock-rt--Prepare-for-RT-local_lock.patch
new file mode 100644
index 000000000000..55a83438a93c
--- /dev/null
+++ b/patches/locking-spinlock-rt--Prepare-for-RT-local_lock.patch
@@ -0,0 +1,110 @@
+Subject: locking/spinlock/rt: Prepare for RT local_lock
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 13 Aug 2021 17:00:22 +0200
+
+Add the static and runtime initializer mechanics to support the RT variant
+of local_lock, which requires the lock type in the lockdep map to be set
+to LD_LOCK_PERCPU.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+V5: New patch
+---
+ include/linux/spinlock_rt.h | 24 ++++++++++++++++--------
+ include/linux/spinlock_types.h | 6 ++++++
+ include/linux/spinlock_types_raw.h | 8 ++++++++
+ kernel/locking/spinlock_rt.c | 7 +++++--
+ 4 files changed, 35 insertions(+), 10 deletions(-)
+
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -8,20 +8,28 @@
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+- struct lock_class_key *key);
++ struct lock_class_key *key, bool percpu);
+ #else
+ static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+- struct lock_class_key *key)
++ struct lock_class_key *key, bool percpu)
+ {
+ }
+ #endif
+
+-#define spin_lock_init(slock) \
+-do { \
+- static struct lock_class_key __key; \
+- \
+- rt_mutex_base_init(&(slock)->lock); \
+- __rt_spin_lock_init(slock, #slock, &__key); \
++#define spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_base_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key, false); \
++} while (0)
++
++#define local_spin_lock_init(slock) \
++do { \
++ static struct lock_class_key __key; \
++ \
++ rt_mutex_base_init(&(slock)->lock); \
++ __rt_spin_lock_init(slock, #slock, &__key, true); \
+ } while (0)
+
+ extern void rt_spin_lock(spinlock_t *lock);
+--- a/include/linux/spinlock_types.h
++++ b/include/linux/spinlock_types.h
+@@ -60,6 +60,12 @@ typedef struct spinlock {
+ SPIN_DEP_MAP_INIT(name) \
+ }
+
++#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
++ { \
++ .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
++ LOCAL_SPIN_DEP_MAP_INIT(name) \
++ }
++
+ #define DEFINE_SPINLOCK(name) \
+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+--- a/include/linux/spinlock_types_raw.h
++++ b/include/linux/spinlock_types_raw.h
+@@ -37,9 +37,17 @@ typedef struct raw_spinlock {
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
++
++# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
++ .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_CONFIG, \
++ .lock_type = LD_LOCK_PERCPU, \
++ }
+ #else
+ # define RAW_SPIN_DEP_MAP_INIT(lockname)
+ # define SPIN_DEP_MAP_INIT(lockname)
++# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
+ #endif
+
+ #ifdef CONFIG_DEBUG_SPINLOCK
+--- a/kernel/locking/spinlock_rt.c
++++ b/kernel/locking/spinlock_rt.c
+@@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ void __rt_spin_lock_init(spinlock_t *lock, const char *name,
+- struct lock_class_key *key)
++ struct lock_class_key *key, bool percpu)
+ {
++ u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
++
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+- lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
++ lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
++ LD_WAIT_INV, type);
+ }
+ EXPORT_SYMBOL(__rt_spin_lock_init);
+ #endif
diff --git a/patches/locking_RT__Add_might_sleeping_annotation..patch b/patches/locking_RT__Add_might_sleeping_annotation..patch
deleted file mode 100644
index f5a29aab6251..000000000000
--- a/patches/locking_RT__Add_might_sleeping_annotation..patch
+++ /dev/null
@@ -1,24 +0,0 @@
-Subject: locking/RT: Add might sleeping annotation.
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu May 20 18:09:38 2021 +0200
-
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- kernel/locking/spinlock_rt.c | 1 +
- 1 file changed, 1 insertion(+)
----
---- a/kernel/locking/spinlock_rt.c
-+++ b/kernel/locking/spinlock_rt.c
-@@ -32,6 +32,7 @@ static __always_inline void rtlock_lock(
-
- static __always_inline void __rt_spin_lock(spinlock_t *lock)
- {
-+ ___might_sleep(__FILE__, __LINE__, 0);
- rtlock_lock(&lock->lock);
- rcu_read_lock();
- migrate_disable();
diff --git a/patches/locking__dont_check_for___LINUX_SPINLOCK_TYPES_H_on_-RT_archs.patch b/patches/locking__dont_check_for___LINUX_SPINLOCK_TYPES_H_on_-RT_archs.patch
deleted file mode 100644
index 68c4156da2d0..000000000000
--- a/patches/locking__dont_check_for___LINUX_SPINLOCK_TYPES_H_on_-RT_archs.patch
+++ /dev/null
@@ -1,147 +0,0 @@
-Subject: locking: don't check for __LINUX_SPINLOCK_TYPES_H on -RT archs
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri Aug 4 17:40:42 2017 +0200
-
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
-Upstream uses arch_spinlock_t within spinlock_t and requests that
-spinlock_types.h header file is included first.
-On -RT we have the rt_mutex with its raw_lock wait_lock which needs
-architectures' spinlock_types.h header file for its definition. However
-we need rt_mutex first because it is used to build the spinlock_t so
-that check does not work for us.
-Therefore I am dropping that check.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- arch/alpha/include/asm/spinlock_types.h | 4 ----
- arch/arm/include/asm/spinlock_types.h | 4 ----
- arch/arm64/include/asm/spinlock_types.h | 4 ----
- arch/hexagon/include/asm/spinlock_types.h | 4 ----
- arch/ia64/include/asm/spinlock_types.h | 4 ----
- arch/powerpc/include/asm/spinlock_types.h | 4 ----
- arch/s390/include/asm/spinlock_types.h | 4 ----
- arch/sh/include/asm/spinlock_types.h | 4 ----
- arch/xtensa/include/asm/spinlock_types.h | 4 ----
- 9 files changed, 36 deletions(-)
----
---- a/arch/alpha/include/asm/spinlock_types.h
-+++ b/arch/alpha/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef _ALPHA_SPINLOCK_TYPES_H
- #define _ALPHA_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
---- a/arch/arm/include/asm/spinlock_types.h
-+++ b/arch/arm/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #define TICKET_SHIFT 16
-
- typedef struct {
---- a/arch/arm64/include/asm/spinlock_types.h
-+++ b/arch/arm64/include/asm/spinlock_types.h
-@@ -5,10 +5,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
--# error "please don't include this file directly"
--#endif
--
- #include <asm-generic/qspinlock_types.h>
- #include <asm-generic/qrwlock_types.h>
-
---- a/arch/hexagon/include/asm/spinlock_types.h
-+++ b/arch/hexagon/include/asm/spinlock_types.h
-@@ -8,10 +8,6 @@
- #ifndef _ASM_SPINLOCK_TYPES_H
- #define _ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
---- a/arch/ia64/include/asm/spinlock_types.h
-+++ b/arch/ia64/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef _ASM_IA64_SPINLOCK_TYPES_H
- #define _ASM_IA64_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
---- a/arch/powerpc/include/asm/spinlock_types.h
-+++ b/arch/powerpc/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
- #define _ASM_POWERPC_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- #ifdef CONFIG_PPC_QUEUED_SPINLOCKS
- #include <asm-generic/qspinlock_types.h>
- #include <asm-generic/qrwlock_types.h>
---- a/arch/s390/include/asm/spinlock_types.h
-+++ b/arch/s390/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- int lock;
- } arch_spinlock_t;
---- a/arch/sh/include/asm/spinlock_types.h
-+++ b/arch/sh/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SH_SPINLOCK_TYPES_H
- #define __ASM_SH_SPINLOCK_TYPES_H
-
--#ifndef __LINUX_SPINLOCK_TYPES_H
--# error "please don't include this file directly"
--#endif
--
- typedef struct {
- volatile unsigned int lock;
- } arch_spinlock_t;
---- a/arch/xtensa/include/asm/spinlock_types.h
-+++ b/arch/xtensa/include/asm/spinlock_types.h
-@@ -2,10 +2,6 @@
- #ifndef __ASM_SPINLOCK_TYPES_H
- #define __ASM_SPINLOCK_TYPES_H
-
--#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H)
--# error "please don't include this file directly"
--#endif
--
- #include <asm-generic/qspinlock_types.h>
- #include <asm-generic/qrwlock_types.h>
-
diff --git a/patches/locking_local_lock__Add_RT_support.patch b/patches/locking_local_lock__Add_RT_support.patch
deleted file mode 100644
index 1ee0071d957b..000000000000
--- a/patches/locking_local_lock__Add_RT_support.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-Subject: locking/local_lock: Add RT support
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Apr 13 23:34:56 2021 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-On PREEMPT_RT enabled kernels local_lock has a real spinlock
-inside. Provide the necessary macros to substitute the non-RT variants.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- include/linux/local_lock_internal.h | 55 ++++++++++++++++++++++++++++++++++++
- 1 file changed, 55 insertions(+)
----
---- a/include/linux/local_lock_internal.h
-+++ b/include/linux/local_lock_internal.h
-@@ -6,6 +6,8 @@
- #include <linux/percpu-defs.h>
- #include <linux/lockdep.h>
-
-+#ifndef CONFIG_PREEMPT_RT
-+
- typedef struct {
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-@@ -63,6 +65,59 @@ static inline void local_lock_release(lo
- #define ll_local_irq_save(flags) local_irq_save(flags)
- #define ll_local_irq_restore(flags) local_irq_restore(flags)
-
-+#else /* !CONFIG_PREEMPT_RT */
-+
-+/*
-+ * The preempt RT mapping of local locks: a spinlock.
-+ */
-+typedef struct {
-+ spinlock_t lock;
-+} local_lock_t;
-+
-+#define INIT_LOCAL_LOCK(lockname) { \
-+ __SPIN_LOCK_UNLOCKED((lockname).lock), \
-+ }
-+
-+#define __local_lock_init(l) \
-+do { \
-+ spin_lock_init(&(l)->lock); \
-+} while (0)
-+
-+static inline void local_lock_acquire(local_lock_t *l)
-+{
-+ spin_lock(&l->lock);
-+}
-+
-+static inline void local_lock_release(local_lock_t *l)
-+{
-+ spin_unlock(&l->lock);
-+}
-+
-+/*
-+ * On RT enabled kernels the serialization is guaranteed by the spinlock in
-+ * local_lock_t, so the only guarantee to make is to not leave the CPU.
-+ */
-+#define ll_preempt_disable() migrate_disable()
-+#define ll_preempt_enable() migrate_enable()
-+#define ll_local_irq_disable() migrate_disable()
-+#define ll_local_irq_enable() migrate_enable()
-+
-+#define ll_local_irq_save(flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ flags = 0; \
-+ migrate_disable(); \
-+ } while (0)
-+
-+#define ll_local_irq_restore(flags) \
-+ do { \
-+ typecheck(unsigned long, flags); \
-+ (void)flags; \
-+ migrate_enable(); \
-+ } while (0)
-+
-+#endif /* CONFIG_PREEMPT_RT */
-+
- #define __local_lock(lock) \
- do { \
- ll_preempt_disable(); \
diff --git a/patches/locking_local_lock__Prepare_for_RT_support.patch b/patches/locking_local_lock__Prepare_for_RT_support.patch
deleted file mode 100644
index ed5f43fbcfe3..000000000000
--- a/patches/locking_local_lock__Prepare_for_RT_support.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-Subject: locking/local_lock: Prepare for RT support
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Apr 13 23:26:09 2021 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-PREEMPT_RT enabled kernels will add a real lock to local_lock and have to
-replace the preemption/interrupt disable/enable pairs by
-migrate_disable/enable pairs.
-
-To avoid duplicating the inline helpers for RT provide defines
-which map the relevant invocations to the non-RT variants.
-
-No functional change.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
-
----
- include/linux/local_lock_internal.h | 19 +++++++++++++------
- 1 file changed, 13 insertions(+), 6 deletions(-)
----
---- a/include/linux/local_lock_internal.h
-+++ b/include/linux/local_lock_internal.h
-@@ -56,38 +56,45 @@ static inline void local_lock_acquire(lo
- static inline void local_lock_release(local_lock_t *l) { }
- #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
-
-+#define ll_preempt_disable() preempt_disable()
-+#define ll_preempt_enable() preempt_enable()
-+#define ll_local_irq_disable() local_irq_disable()
-+#define ll_local_irq_enable() local_irq_enable()
-+#define ll_local_irq_save(flags) local_irq_save(flags)
-+#define ll_local_irq_restore(flags) local_irq_restore(flags)
-+
- #define __local_lock(lock) \
- do { \
-- preempt_disable(); \
-+ ll_preempt_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
- #define __local_lock_irq(lock) \
- do { \
-- local_irq_disable(); \
-+ ll_local_irq_disable(); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
- #define __local_lock_irqsave(lock, flags) \
- do { \
-- local_irq_save(flags); \
-+ ll_local_irq_save(flags); \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
- #define __local_unlock(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
-- preempt_enable(); \
-+ ll_preempt_enable(); \
- } while (0)
-
- #define __local_unlock_irq(lock) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
-- local_irq_enable(); \
-+ ll_local_irq_enable(); \
- } while (0)
-
- #define __local_unlock_irqrestore(lock, flags) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
-- local_irq_restore(flags); \
-+ ll_local_irq_restore(flags); \
- } while (0)
diff --git a/patches/locking_mutex__Consolidate_core_headers.patch b/patches/locking_mutex__Consolidate_core_headers.patch
index 3995eb04cd1f..94f76073b117 100644
--- a/patches/locking_mutex__Consolidate_core_headers.patch
+++ b/patches/locking_mutex__Consolidate_core_headers.patch
@@ -9,16 +9,13 @@ is mostly waste of disc space and has no real value. Stick the debug
variants into the common mutex.h file as counterpart to the stubs for the
non-debug case.
-That allows to add helpers and defines to the common header for the
-upcoming handling of mutexes and ww_mutexes on PREEMPT_RT.
-
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/locking/mutex-debug.c | 4 +---
kernel/locking/mutex-debug.h | 29 -----------------------------
kernel/locking/mutex.c | 6 +-----
- kernel/locking/mutex.h | 20 +++++++++++++++++---
- 4 files changed, 19 insertions(+), 40 deletions(-)
+ kernel/locking/mutex.h | 37 +++++++++++++++++++++++--------------
+ 4 files changed, 25 insertions(+), 51 deletions(-)
delete mode 100644 kernel/locking/mutex-debug.h
---
--- a/kernel/locking/mutex-debug.c
@@ -88,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
--- a/kernel/locking/mutex.h
+++ b/kernel/locking/mutex.h
-@@ -5,11 +5,24 @@
+@@ -5,19 +5,28 @@
* started by Ingo Molnar:
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
@@ -97,6 +94,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * !CONFIG_DEBUG_MUTEXES case. Most of them are NOPs:
*/
+-#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
+-#define debug_mutex_free_waiter(waiter) do { } while (0)
+-#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
+-#define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
+-#define debug_mutex_unlock(lock) do { } while (0)
+-#define debug_mutex_init(lock, name, key) do { } while (0)
+-
+-static inline void
+-debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
+-{
+-}
+#ifdef CONFIG_DEBUG_MUTEXES
+extern void debug_mutex_lock_common(struct mutex *lock,
+ struct mutex_waiter *waiter);
@@ -112,12 +120,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+extern void debug_mutex_init(struct mutex *lock, const char *name,
+ struct lock_class_key *key);
+#else /* CONFIG_DEBUG_MUTEXES */
-+
- #define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
- #define debug_mutex_free_waiter(waiter) do { } while (0)
- #define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
-@@ -21,3 +34,4 @@ static inline void
- debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
- {
- }
++# define debug_mutex_lock_common(lock, waiter) do { } while (0)
++# define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
++# define debug_mutex_free_waiter(waiter) do { } while (0)
++# define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
++# define debug_mutex_remove_waiter(lock, waiter, ti) do { } while (0)
++# define debug_mutex_unlock(lock) do { } while (0)
++# define debug_mutex_init(lock, name, key) do { } while (0)
+#endif /* !CONFIG_DEBUG_MUTEXES */
diff --git a/patches/locking_rtmutex__Add_mutex_variant_for_RT.patch b/patches/locking_rtmutex__Add_mutex_variant_for_RT.patch
index ac86aa8c6fcd..0ae3711f8978 100644
--- a/patches/locking_rtmutex__Add_mutex_variant_for_RT.patch
+++ b/patches/locking_rtmutex__Add_mutex_variant_for_RT.patch
@@ -11,11 +11,11 @@ No functional change when CONFIG_PREEMPT_RT=n
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/mutex.h | 67 ++++++++++++++++++++---
+ include/linux/mutex.h | 66 +++++++++++++++++++----
kernel/locking/mutex.c | 4 +
kernel/locking/rtmutex_api.c | 122 +++++++++++++++++++++++++++++++++++++++++++
lib/Kconfig.debug | 11 ++-
- 4 files changed, 188 insertions(+), 16 deletions(-)
+ 4 files changed, 187 insertions(+), 16 deletions(-)
---
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -124,6 +126,51 @@ extern void __mutex_init(struct mutex *l
+@@ -124,6 +126,50 @@ extern void __mutex_init(struct mutex *l
*/
extern bool mutex_is_locked(struct mutex *lock);
@@ -89,18 +89,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
+
-+#define mutex_init(mutex) \
++#define __mutex_init(mutex, name, key) \
+do { \
-+ static struct lock_class_key __key; \
-+ \
+ rt_mutex_base_init(&(mutex)->rtmutex); \
-+ __mutex_rt_init((mutex), #mutex, &__key); \
++ __mutex_rt_init((mutex), name, key); \
+} while (0)
+
-+#define __mutex_init(mutex, name, key) \
++#define mutex_init(mutex) \
+do { \
-+ rt_mutex_base_init(&(mutex)->rtmutex); \
-+ __mutex_rt_init((mutex), name, key); \
++ static struct lock_class_key __key; \
++ \
++ __mutex_init((mutex), #mutex, &__key); \
+} while (0)
+#endif /* CONFIG_PREEMPT_RT */
+
@@ -140,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct lock_class_key *key)
+{
+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
-+ lockdep_init_map(&mutex->dep_map, name, key, 0);
++ lockdep_init_map_wait(&mutex->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__mutex_rt_init);
+
diff --git a/patches/locking_rtmutex__Include_only_rbtree_types.patch b/patches/locking_rtmutex__Include_only_rbtree_types.patch
index 35b1398f1da5..a7dd4755c6c7 100644
--- a/patches/locking_rtmutex__Include_only_rbtree_types.patch
+++ b/patches/locking_rtmutex__Include_only_rbtree_types.patch
@@ -20,12 +20,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -14,7 +14,7 @@
- #define __LINUX_RT_MUTEX_H
+@@ -15,7 +15,7 @@
+ #include <linux/compiler.h>
#include <linux/linkage.h>
-#include <linux/rbtree.h>
+#include <linux/rbtree_types.h>
#include <linux/spinlock_types_raw.h>
- #include <linux/compiler.h>
+ extern int max_lock_depth; /* for sysctl */
diff --git a/patches/locking_rtmutex__Prevent_future_include_recursion_hell.patch b/patches/locking_rtmutex__Prevent_future_include_recursion_hell.patch
index be73ad38ff91..928987affc0b 100644
--- a/patches/locking_rtmutex__Prevent_future_include_recursion_hell.patch
+++ b/patches/locking_rtmutex__Prevent_future_include_recursion_hell.patch
@@ -20,12 +20,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -15,7 +15,7 @@
-
+@@ -16,7 +16,7 @@
+ #include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock_types_raw.h>
- #include <linux/compiler.h>
extern int max_lock_depth; /* for sysctl */
+
diff --git a/patches/locking_rwlock__Provide_RT_variant.patch b/patches/locking_rwlock__Provide_RT_variant.patch
index 12cd3ce82273..66ce98bc62c9 100644
--- a/patches/locking_rwlock__Provide_RT_variant.patch
+++ b/patches/locking_rwlock__Provide_RT_variant.patch
@@ -12,14 +12,16 @@ across the slow lock operations (contended case).
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+V5: Add missing might_sleep() and fix lockdep init (Sebastian)
+---
include/linux/rwlock_rt.h | 140 ++++++++++++++++++++++++++++++++++++++++
- include/linux/rwlock_types.h | 35 +++++++++-
+ include/linux/rwlock_types.h | 49 ++++++++++----
include/linux/spinlock_rt.h | 2
kernel/Kconfig.locks | 2
kernel/locking/spinlock.c | 7 ++
kernel/locking/spinlock_debug.c | 5 +
- kernel/locking/spinlock_rt.c | 129 ++++++++++++++++++++++++++++++++++++
- 7 files changed, 317 insertions(+), 3 deletions(-)
+ kernel/locking/spinlock_rt.c | 131 +++++++++++++++++++++++++++++++++++++
+ 7 files changed, 323 insertions(+), 13 deletions(-)
create mode 100644 include/linux/rwlock_rt.h
---
--- /dev/null
@@ -167,10 +169,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
-@@ -5,9 +5,9 @@
+@@ -5,9 +5,19 @@
# error "Do not include directly, include spinlock_types.h"
#endif
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define RW_DEP_MAP_INIT(lockname) \
++ .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_CONFIG, \
++ }
++#else
++# define RW_DEP_MAP_INIT(lockname)
++#endif
++
+#ifndef CONFIG_PREEMPT_RT
/*
- * include/linux/rwlock_types.h - generic rwlock type definitions
@@ -179,7 +191,24 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* portions Copyright 2005, Red Hat, Inc., Ingo Molnar
* Released under the General Public License (GPL).
-@@ -50,4 +50,35 @@ typedef struct {
+@@ -25,16 +35,6 @@ typedef struct {
+
+ #define RWLOCK_MAGIC 0xdeaf1eed
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define RW_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_CONFIG, \
+- }
+-#else
+-# define RW_DEP_MAP_INIT(lockname)
+-#endif
+-
+ #ifdef CONFIG_DEBUG_SPINLOCK
+ #define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __ARCH_RW_LOCK_UNLOCKED, \
+@@ -50,4 +50,29 @@ typedef struct {
#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
@@ -195,23 +224,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+} rwlock_t;
+
-+#ifdef CONFIG_DEBUG_LOCK_ALLOC
-+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
-+#else
-+# define RW_DEP_MAP_INIT(lockname)
-+#endif
-+
-+#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
-+
-+#define DEFINE_RWLOCK(name) \
-+ rwlock_t name = __RW_LOCK_UNLOCKED(name)
-+
+#define __RWLOCK_RT_INITIALIZER(name) \
+{ \
+ .rwbase = __RWBASE_INITIALIZER(name), \
+ RW_DEP_MAP_INIT(name) \
+}
+
++#define __RW_LOCK_UNLOCKED(name) __RWLOCK_RT_INITIALIZER(name)
++
++#define DEFINE_RWLOCK(name) \
++ rwlock_t name = __RW_LOCK_UNLOCKED(name)
++
+#endif /* CONFIG_PREEMPT_RT */
+
#endif /* __LINUX_RWLOCK_TYPES_H */
@@ -301,7 +324,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif /* !CONFIG_PREEMPT_RT */
--- a/kernel/locking/spinlock_rt.c
+++ b/kernel/locking/spinlock_rt.c
-@@ -126,3 +126,132 @@ void __rt_spin_lock_init(spinlock_t *loc
+@@ -127,3 +127,134 @@ void __rt_spin_lock_init(spinlock_t *loc
}
EXPORT_SYMBOL(__rt_spin_lock_init);
#endif
@@ -385,6 +408,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+void __sched rt_read_lock(rwlock_t *rwlock)
+{
++ ___might_sleep(__FILE__, __LINE__, 0);
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ rwbase_read_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+ rcu_read_lock();
@@ -394,6 +418,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+void __sched rt_write_lock(rwlock_t *rwlock)
+{
++ ___might_sleep(__FILE__, __LINE__, 0);
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ rwbase_write_lock(&rwlock->rwbase, TASK_RTLOCK_WAIT);
+ rcu_read_lock();
@@ -430,7 +455,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct lock_class_key *key)
+{
+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
-+ lockdep_init_map(&rwlock->dep_map, name, key, 0);
++ lockdep_init_map_wait(&rwlock->dep_map, name, key, 0, LD_WAIT_CONFIG);
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+#endif
diff --git a/patches/locking_rwsem__Add_rtmutex_based_R_W_semaphore_implementation.patch b/patches/locking_rwsem__Add_rtmutex_based_R_W_semaphore_implementation.patch
index 31629c7b0d42..2d9af514bb1b 100644
--- a/patches/locking_rwsem__Add_rtmutex_based_R_W_semaphore_implementation.patch
+++ b/patches/locking_rwsem__Add_rtmutex_based_R_W_semaphore_implementation.patch
@@ -44,22 +44,49 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: Fix indent fail (Peter Z)
---
- include/linux/rwsem.h | 58 ++++++++++++++++++++++++++
+ include/linux/rwsem.h | 78 ++++++++++++++++++++++++++++++-----
kernel/locking/rwsem.c | 108 +++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 166 insertions(+)
+ 2 files changed, 176 insertions(+), 10 deletions(-)
---
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
-@@ -16,6 +16,9 @@
+@@ -16,6 +16,19 @@
#include <linux/spinlock.h>
#include <linux/atomic.h>
#include <linux/err.h>
+
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++# define __RWSEM_DEP_MAP_INIT(lockname) \
++ .dep_map = { \
++ .name = #lockname, \
++ .wait_type_inner = LD_WAIT_SLEEP, \
++ },
++#else
++# define __RWSEM_DEP_MAP_INIT(lockname)
++#endif
++
+#ifndef CONFIG_PREEMPT_RT
+
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
#include <linux/osq_lock.h>
#endif
+@@ -64,16 +77,6 @@ static inline int rwsem_is_locked(struct
+
+ /* Common initializer macros and functions */
+
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define __RWSEM_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_SLEEP, \
+- },
+-#else
+-# define __RWSEM_DEP_MAP_INIT(lockname)
+-#endif
+-
+ #ifdef CONFIG_DEBUG_RWSEMS
+ # define __RWSEM_DEBUG_INIT(lockname) .magic = &lockname,
+ #else
@@ -119,6 +122,61 @@ static inline int rwsem_is_contended(str
return !list_empty(&sem->wait_list);
}
@@ -78,7 +105,7 @@ V2: Fix indent fail (Peter Z)
+#define __RWSEM_INITIALIZER(name) \
+ { \
+ .rwbase = __RWBASE_INITIALIZER(name), \
-+ RW_DEP_MAP_INIT(name) \
++ __RWSEM_DEP_MAP_INIT(name) \
+ }
+
+#define DECLARE_RWSEM(lockname) \
@@ -171,7 +198,7 @@ V2: Fix indent fail (Peter Z)
+ struct lock_class_key *key)
+{
+ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
-+ lockdep_init_map(&sem->dep_map, name, key, 0);
++ lockdep_init_map_wait(&sem->dep_map, name, key, 0, LD_WAIT_SLEEP);
+}
+EXPORT_SYMBOL(__rwsem_init);
+#endif
diff --git a/patches/locking_spinlock__Provide_RT_variant.patch b/patches/locking_spinlock__Provide_RT_variant.patch
index 2f93fe790d23..8e1569a0a0b0 100644
--- a/patches/locking_spinlock__Provide_RT_variant.patch
+++ b/patches/locking_spinlock__Provide_RT_variant.patch
@@ -9,9 +9,11 @@ spinlock specific rtmutex code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+V5: Add missing might_sleep() and fix lockdep init (Sebastian)
+---
kernel/locking/Makefile | 1
- kernel/locking/spinlock_rt.c | 128 +++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 129 insertions(+)
+ kernel/locking/spinlock_rt.c | 129 +++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 130 insertions(+)
create mode 100644 kernel/locking/spinlock_rt.c
---
--- a/kernel/locking/Makefile
@@ -26,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
--- /dev/null
+++ b/kernel/locking/spinlock_rt.c
-@@ -0,0 +1,128 @@
+@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * PREEMPT_RT substitution for spin/rw_locks
@@ -61,6 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+static __always_inline void __rt_spin_lock(spinlock_t *lock)
+{
++ ___might_sleep(__FILE__, __LINE__, 0);
+ rtlock_lock(&lock->lock);
+ rcu_read_lock();
+ migrate_disable();
@@ -151,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct lock_class_key *key)
+{
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-+ lockdep_init_map(&lock->dep_map, name, key, 0);
++ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+#endif
diff --git a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
index ce7b1d67e695..5fb0bd61c5b5 100644
--- a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
+++ b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2988,7 +2988,7 @@ unsigned long wait_task_inactive(struct
+@@ -2990,7 +2990,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
cpu_relax();
}
-@@ -3003,7 +3003,7 @@ unsigned long wait_task_inactive(struct
+@@ -3005,7 +3005,7 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rtmutex--Provide-rt_mutex_base_is_locked--.patch b/patches/rtmutex--Provide-rt_mutex_base_is_locked--.patch
index a4499c25ea7f..813be494c14a 100644
--- a/patches/rtmutex--Provide-rt_mutex_base_is_locked--.patch
+++ b/patches/rtmutex--Provide-rt_mutex_base_is_locked--.patch
@@ -14,14 +14,14 @@ V4: Use READ_ONCE() - Davidlohr
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -16,6 +16,7 @@
+@@ -13,6 +13,7 @@
+ #ifndef __LINUX_RT_MUTEX_H
+ #define __LINUX_RT_MUTEX_H
+
++#include <linux/compiler.h>
#include <linux/linkage.h>
#include <linux/rbtree.h>
#include <linux/spinlock_types.h>
-+#include <linux/compiler.h>
-
- extern int max_lock_depth; /* for sysctl */
-
@@ -32,6 +33,17 @@ struct rt_mutex_base {
.owner = NULL \
}
diff --git a/patches/rtmutex--Remove-rt_mutex_is_locked--.patch b/patches/rtmutex--Remove-rt_mutex_is_locked--.patch
index d1f9239d3f46..69d3a1157c88 100644
--- a/patches/rtmutex--Remove-rt_mutex_is_locked--.patch
+++ b/patches/rtmutex--Remove-rt_mutex_is_locked--.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -67,17 +67,6 @@ do { \
+@@ -72,17 +72,6 @@ do { \
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
diff --git a/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch b/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
index 68449683b50b..10d3e179aeed 100644
--- a/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
+++ b/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
@@ -16,13 +16,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
V2: New patch
---
- include/linux/rtmutex.h | 29 +++++++++++++-----
+ include/linux/rtmutex.h | 23 ++++++++++----
kernel/futex.c | 4 +-
kernel/locking/rtmutex.c | 64 ++++++++++++++++++++--------------------
- kernel/locking/rtmutex_api.c | 39 +++++++++++++-----------
+ kernel/locking/rtmutex_api.c | 41 ++++++++++++++-----------
kernel/locking/rtmutex_common.h | 38 +++++++++++------------
kernel/rcu/tree_plugin.h | 6 +--
- 6 files changed, 100 insertions(+), 80 deletions(-)
+ 6 files changed, 97 insertions(+), 79 deletions(-)
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -59,23 +59,17 @@ V2: New patch
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
-@@ -58,11 +71,11 @@ do { \
- #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
- #endif
-
--#define __RT_MUTEX_INITIALIZER(mutexname) \
-- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
-- , .waiters = RB_ROOT_CACHED \
-- , .owner = NULL \
-- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
-+#define __RT_MUTEX_INITIALIZER(mutexname) \
-+{ \
-+ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex) \
-+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
-+}
+@@ -63,9 +76,7 @@ do { \
+
+ #define __RT_MUTEX_INITIALIZER(mutexname) \
+ { \
+- .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock), \
+- .waiters = RB_ROOT_CACHED, \
+- .owner = NULL, \
++ .rtmutex = __RT_MUTEX_BASE_INITIALIZER(mutexname.rtmutex), \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ }
- #define DEFINE_RT_MUTEX(mutexname) \
- struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -179,7 +179,7 @@ struct futex_pi_state {
@@ -433,12 +427,16 @@ V2: New patch
{
DEFINE_WAKE_Q(wake_q);
unsigned long flags;
-@@ -185,9 +191,8 @@ void __sched __rt_mutex_init(struct rt_m
- struct lock_class_key *key)
+@@ -182,12 +188,11 @@ void __sched rt_mutex_futex_unlock(struc
+ * Initializing of a locked rt_mutex is not allowed
+ */
+ void __sched __rt_mutex_init(struct rt_mutex *lock, const char *name,
+- struct lock_class_key *key)
++ struct lock_class_key *key)
{
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ __rt_mutex_base_init(&lock->rtmutex);
- lockdep_init_map(&lock->dep_map, name, key, 0);
+ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
-
- __rt_mutex_basic_init(lock);
}
diff --git a/patches/rtmutex__Split_API_and_implementation.patch b/patches/rtmutex__Split_API_and_implementation.patch
index fabeadcbd751..0011c5683845 100644
--- a/patches/rtmutex__Split_API_and_implementation.patch
+++ b/patches/rtmutex__Split_API_and_implementation.patch
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- struct lock_class_key *key)
-{
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-- lockdep_init_map(&lock->dep_map, name, key, 0);
+- lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
-
- __rt_mutex_basic_init(lock);
-}
@@ -752,7 +752,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct lock_class_key *key)
+{
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
-+ lockdep_init_map(&lock->dep_map, name, key, 0);
++ lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_SLEEP);
+
+ __rt_mutex_basic_init(lock);
+}
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index 059dd41a1b5c..28e357d431eb 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -343,7 +343,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -2140,6 +2181,7 @@ void migrate_enable(void)
+@@ -2142,6 +2183,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4191,6 +4233,9 @@ int sched_fork(unsigned long clone_flags
+@@ -4193,6 +4235,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -5963,6 +6008,7 @@ static void __sched notrace __schedule(u
+@@ -5965,6 +6010,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6180,6 +6226,30 @@ static void __sched notrace preempt_sche
+@@ -6182,6 +6228,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6193,7 +6263,8 @@ asmlinkage __visible void __sched notrac
+@@ -6195,7 +6265,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -410,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6226,6 +6297,9 @@ asmlinkage __visible void __sched notrac
+@@ -6228,6 +6299,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -420,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8347,7 +8421,9 @@ void __init init_idle(struct task_struct
+@@ -8349,7 +8423,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch b/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
index 35b42a8a55ac..b073ddf37795 100644
--- a/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
+++ b/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -9141,7 +9141,7 @@ void __init sched_init(void)
+@@ -9143,7 +9143,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched__Move_mmdrop_to_RCU_on_RT.patch b/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
index e87ccb861956..ba8951a631c1 100644
--- a/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
+++ b/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4624,9 +4624,13 @@ static struct rq *finish_task_switch(str
+@@ -4626,9 +4626,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -8457,6 +8461,7 @@ void sched_setnuma(struct task_struct *p
+@@ -8459,6 +8463,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index d1a67a56e17c..cdc8aaa88579 100644
--- a/patches/series
+++ b/patches/series
@@ -126,6 +126,8 @@ debugobjects__Make_RT_aware.patch
###########################################################################
# Locking core
###########################################################################
+locking-local_lock--Add-missing-owner-initialization.patch
+locking-rtmutex--Set-proper-wait-context-for-lockdep.patch
sched__Split_out_the_wakeup_state_check.patch
sched__Introduce_TASK_RTLOCK_WAIT.patch
sched--Reorganize-current--state-helpers.patch
@@ -194,20 +196,22 @@ rtmutex__Prevent_lockdep_false_positive_with_PI_futexes.patch
preempt__Adjust_PREEMPT_LOCK_OFFSET_for_RT.patch
locking_rtmutex__Implement_equal_priority_lock_stealing.patch
locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
+locking-spinlock-rt--Prepare-for-RT-local_lock.patch
+locking-local_lock--Add-PREEMPT_RT-support.patch
###########################################################################
# Locking: RT bits. Need review
###########################################################################
-locking_local_lock__Prepare_for_RT_support.patch
-locking_local_lock__Add_RT_support.patch
-locking_RT__Add_might_sleeping_annotation..patch
-locking__dont_check_for___LINUX_SPINLOCK_TYPES_H_on_-RT_archs.patch
-
-lockdep__Make_it_RT_aware.patch
-lockdep__selftest__Only_do_hardirq_context_test_for_raw_spinlock.patch
-lockdep__selftest__fix_warnings_due_to_missing_PREEMPT_RT_conditionals.patch
-lockdep__disable_self-test.patch
-
+lockdep-selftests-Avoid-using-local_lock_-acquire-re.patch
+0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
+0003-rtmutex-Add-a-special-case-for-ww-mutex-handling.patch
+0004-rtmutex-Add-rt_mutex_lock_nest_lock-and-rt_mutex_loc.patch
+0005-lockdep-Make-it-RT-aware.patch
+0006-lockdep-selftests-Add-rtmutex-to-the-last-column.patch
+0007-lockdep-selftests-Unbalanced-migrate_disable-rcu_rea.patch
+0008-lockdep-selftests-Skip-the-softirq-related-tests-on-.patch
+0009-lockdep-selftests-Use-correct-depmap-for-local_lock-.patch
+0010-lockdep-selftests-Adapt-ww-tests-for-PREEMPT_RT.patch
###########################################################################
# preempt: Conditional variants
diff --git a/patches/softirq__Disable_softirq_stacks_for_RT.patch b/patches/softirq__Disable_softirq_stacks_for_RT.patch
index 049718cf9c74..a14ae5179069 100644
--- a/patches/softirq__Disable_softirq_stacks_for_RT.patch
+++ b/patches/softirq__Disable_softirq_stacks_for_RT.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static __always_inline void call_do_irq(struct pt_regs *regs, void *sp)
{
-@@ -809,10 +811,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
+@@ -814,10 +816,12 @@ void *mcheckirq_ctx[NR_CPUS] __read_most
void *softirq_ctx[NR_CPUS] __read_mostly;
void *hardirq_ctx[NR_CPUS] __read_mostly;