summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-02 18:09:43 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-02 18:09:43 +0200
commit3f2f51f445198d5347a5f00a3e777f021a0efe94 (patch)
treef2ca176104b57a3f03f9f5a2bb4929cafb26fdf3
parenteb14dee088c64cf70c04fe77a688b4ff8a31b167 (diff)
downloadlinux-rt-3f2f51f445198d5347a5f00a3e777f021a0efe94.tar.gz
[ANNOUNCE] v5.14-rc4-rt4v5.14-rc4-rt4-patches
Dear RT folks! I'm pleased to announce the v5.14-rc4-rt4 patch set. Changes since v5.14-rc4-rt3: - Updating the locking bits: - Use proper task state in ww_mutex_lock_interruptible(), reported by Mike Galbraith. - Fix the wake_q handling for a task which blocks simultaneously as a regular task and additionally as a sleeper on a sleeping lock. Regression introduced in the v5.14 cycle, reported by Mike Galbraith, patched by Thomas Gleixner. - Address the futex related review comments by Peter Zijlstra. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. The delta patch against v5.14-rc4-rt3 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc4-rt3-rt4.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc4-rt4 The RT patch against v5.14-rc4 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc4-rt4.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc4-rt4.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch2
-rw-r--r--patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch6
-rw-r--r--patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch10
-rw-r--r--patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch10
-rw-r--r--patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch8
-rw-r--r--patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch6
-rw-r--r--patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch8
-rw-r--r--patches/ARM__Allow_to_enable_RT.patch2
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/POWERPC__Allow_to_enable_RT.patch2
-rw-r--r--patches/arm__Add_support_for_lazy_preemption.patch2
-rw-r--r--patches/futex__Clarify_comment_in_futex_requeue.patch2
-rw-r--r--patches/futex__Cleanup_stale_comments.patch10
-rw-r--r--patches/futex__Correct_the_number_of_requeued_waiters_for_PI.patch4
-rw-r--r--patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch87
-rw-r--r--patches/futex__Restructure_futex_requeue.patch4
-rw-r--r--patches/futex__Validate_waiter_correctly_in_futex_proxy_trylock_atomic.patch11
-rw-r--r--patches/locking-rtmutex--Extend-the-rtmutex-core-to-support-ww_mutex.patch24
-rw-r--r--patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch6
-rw-r--r--patches/locking_rtmutex__Guard_regular_sleeping_locks_specific_functions.patch4
-rw-r--r--patches/locking_rtmutex__Implement_equal_priority_lock_stealing.patch4
-rw-r--r--patches/locking_rtmutex__Prepare_RT_rt_mutex_wake_q_for_RT_locks.patch71
-rw-r--r--patches/locking_rtmutex__Provide_the_spin_rwlock_core_lock_function.patch2
-rw-r--r--patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch12
-rw-r--r--patches/locking_ww_mutex__Implement_ww_rt_mutex.patch4
-rw-r--r--patches/locking_ww_mutex__Remove___sched_annotation.patch2
-rw-r--r--patches/powerpc__Add_support_for_lazy_preemption.patch2
-rw-r--r--patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch2
-rw-r--r--patches/x86__Support_for_lazy_preemption.patch2
29 files changed, 148 insertions, 163 deletions
diff --git a/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch b/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
index 47274bc95907..8ed00bc011f2 100644
--- a/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
+++ b/patches/0001-mm-slub-don-t-call-flush_all-from-slab_debug_trace_o.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -5820,9 +5820,6 @@ static int slab_debug_trace_open(struct
+@@ -5822,9 +5822,6 @@ static int slab_debug_trace_open(struct
if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL))
return -ENOMEM;
diff --git a/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch b/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
index de75c4e82f24..70023867233b 100644
--- a/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
+++ b/patches/0002-mm-slub-allocate-private-object-map-for-debugfs-list.patch
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return object_map;
}
-@@ -4874,17 +4880,17 @@ static int add_location(struct loc_track
+@@ -4876,17 +4882,17 @@ static int add_location(struct loc_track
}
static void process_slab(struct loc_track *t, struct kmem_cache *s,
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_SLUB_DEBUG */
-@@ -5811,14 +5817,21 @@ static int slab_debug_trace_open(struct
+@@ -5813,14 +5819,21 @@ static int slab_debug_trace_open(struct
struct loc_track *t = __seq_open_private(filep, &slab_debugfs_sops,
sizeof(struct loc_track));
struct kmem_cache *s = file_inode(filep)->i_private;
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_kmem_cache_node(s, node, n) {
unsigned long flags;
-@@ -5829,12 +5842,13 @@ static int slab_debug_trace_open(struct
+@@ -5831,12 +5844,13 @@ static int slab_debug_trace_open(struct
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list)
diff --git a/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch b/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
index 962386b978f7..afaa6338e103 100644
--- a/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
+++ b/patches/0003-mm-slub-allocate-private-object-map-for-validate_sla.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -4674,11 +4674,11 @@ static int count_total(struct page *page
+@@ -4676,11 +4676,11 @@ static int count_total(struct page *page
#endif
#ifdef CONFIG_SLUB_DEBUG
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_lock(page);
-@@ -4686,21 +4686,20 @@ static void validate_slab(struct kmem_ca
+@@ -4688,21 +4688,20 @@ static void validate_slab(struct kmem_ca
goto unlock;
/* Now we know that a valid freelist exists */
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long count = 0;
struct page *page;
-@@ -4709,7 +4708,7 @@ static int validate_slab_node(struct kme
+@@ -4711,7 +4710,7 @@ static int validate_slab_node(struct kme
spin_lock_irqsave(&n->list_lock, flags);
list_for_each_entry(page, &n->partial, slab_list) {
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
count++;
}
if (count != n->nr_partial) {
-@@ -4722,7 +4721,7 @@ static int validate_slab_node(struct kme
+@@ -4724,7 +4723,7 @@ static int validate_slab_node(struct kme
goto out;
list_for_each_entry(page, &n->full, slab_list) {
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
count++;
}
if (count != atomic_long_read(&n->nr_slabs)) {
-@@ -4741,10 +4740,17 @@ long validate_slab_cache(struct kmem_cac
+@@ -4743,10 +4742,17 @@ long validate_slab_cache(struct kmem_cac
int node;
unsigned long count = 0;
struct kmem_cache_node *n;
diff --git a/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch b/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
index 8c7ad8ac284a..c833fcf3b0e7 100644
--- a/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
+++ b/patches/0012-mm-slub-move-disabling-enabling-irqs-to-___slab_allo.patch
@@ -133,7 +133,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return p;
}
-@@ -3334,8 +3341,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3342,8 +3349,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < size; i++) {
void *object = kfence_alloc(s, s->object_size, flags);
-@@ -3356,6 +3363,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3364,6 +3371,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
*/
c->tid = next_tid(c->tid);
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Invoking slow path likely have side-effect
* of re-populating per CPU c->freelist
-@@ -3368,6 +3377,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3376,6 +3385,8 @@ int kmem_cache_alloc_bulk(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
@@ -161,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue; /* goto for-loop */
}
c->freelist = get_freepointer(s, object);
-@@ -3376,6 +3387,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3384,6 +3395,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* memcg and kmem_cache debug support and memory initialization.
-@@ -3385,7 +3397,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3393,7 +3405,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
slab_want_init_on_alloc(flags, s));
return i;
error:
diff --git a/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch b/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
index acc95685ab01..df95cbf75f2f 100644
--- a/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
+++ b/patches/0031-mm-slub-optionally-save-restore-irqs-in-slab_-un-loc.patch
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irqrestore(&n->list_lock, flags);
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
-@@ -4040,9 +4051,10 @@ static void list_slab_objects(struct kme
+@@ -4048,9 +4059,10 @@ static void list_slab_objects(struct kme
void *addr = page_address(page);
unsigned long *map;
void *p;
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
map = get_map(s, page);
for_each_object(p, s, addr, page->objects) {
-@@ -4053,7 +4065,7 @@ static void list_slab_objects(struct kme
+@@ -4061,7 +4073,7 @@ static void list_slab_objects(struct kme
}
}
put_map(map);
@@ -124,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -4784,8 +4796,9 @@ static void validate_slab(struct kmem_ca
+@@ -4786,8 +4798,9 @@ static void validate_slab(struct kmem_ca
{
void *p;
void *addr = page_address(page);
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!check_slab(s, page) || !on_freelist(s, page, NULL))
goto unlock;
-@@ -4800,7 +4813,7 @@ static void validate_slab(struct kmem_ca
+@@ -4802,7 +4815,7 @@ static void validate_slab(struct kmem_ca
break;
}
unlock:
diff --git a/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch b/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
index 3cc96e2f2396..3be47d647101 100644
--- a/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
+++ b/patches/0034-mm-slub-use-migrate_disable-on-PREEMPT_RT.patch
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
return p;
}
-@@ -3491,7 +3512,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3499,7 +3520,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* IRQs, which protects against PREEMPT and interrupts
* handlers invoking normal fastpath.
*/
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_disable();
for (i = 0; i < size; i++) {
-@@ -3537,7 +3558,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3545,7 +3566,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* memcg and kmem_cache debug support and memory initialization.
-@@ -3547,7 +3568,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3555,7 +3576,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
slab_want_init_on_alloc(flags, s));
return i;
error:
diff --git a/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch b/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
index 669c5ba02dec..493ace93f65b 100644
--- a/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
+++ b/patches/0035-mm-slub-convert-kmem_cpu_slab-protection-to-local_lo.patch
@@ -358,7 +358,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stat(s, FREE_FASTPATH);
} else
__slab_free(s, page, head, tail_obj, cnt, addr);
-@@ -3513,7 +3593,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3521,7 +3601,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* handlers invoking normal fastpath.
*/
c = slub_get_cpu_ptr(s->cpu_slab);
@@ -367,7 +367,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < size; i++) {
void *object = kfence_alloc(s, s->object_size, flags);
-@@ -3534,7 +3614,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3542,7 +3622,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
*/
c->tid = next_tid(c->tid);
@@ -376,7 +376,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Invoking slow path likely have side-effect
-@@ -3548,7 +3628,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3556,7 +3636,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
maybe_wipe_obj_freeptr(s, p[i]);
@@ -385,7 +385,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue; /* goto for-loop */
}
-@@ -3557,7 +3637,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3565,7 +3645,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
maybe_wipe_obj_freeptr(s, p[i]);
}
c->tid = next_tid(c->tid);
diff --git a/patches/ARM__Allow_to_enable_RT.patch b/patches/ARM__Allow_to_enable_RT.patch
index 3ec744ce6d1f..73d77857cb0e 100644
--- a/patches/ARM__Allow_to_enable_RT.patch
+++ b/patches/ARM__Allow_to_enable_RT.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_MEMTEST
-@@ -126,6 +127,7 @@ config ARM
+@@ -125,6 +126,7 @@ config ARM
select OLD_SIGSUSPEND3
select PCI_SYSCALL if PCI
select PERF_USE_VMALLOC
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 53b69a97ca19..41fc0b58e69e 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt3
++-rt4
diff --git a/patches/POWERPC__Allow_to_enable_RT.patch b/patches/POWERPC__Allow_to_enable_RT.patch
index e4e9061e88a1..8c8782e59e28 100644
--- a/patches/POWERPC__Allow_to_enable_RT.patch
+++ b/patches/POWERPC__Allow_to_enable_RT.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF if PPC64
select ARCH_USE_MEMTEST
-@@ -224,6 +225,7 @@ config PPC
+@@ -223,6 +224,7 @@ config PPC
select HAVE_IOREMAP_PROT
select HAVE_IRQ_EXIT_ON_IRQ_STACK
select HAVE_IRQ_TIME_ACCOUNTING
diff --git a/patches/arm__Add_support_for_lazy_preemption.patch b/patches/arm__Add_support_for_lazy_preemption.patch
index 60f6ca398ead..6835a02239fd 100644
--- a/patches/arm__Add_support_for_lazy_preemption.patch
+++ b/patches/arm__Add_support_for_lazy_preemption.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -110,6 +110,7 @@ config ARM
+@@ -109,6 +109,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/futex__Clarify_comment_in_futex_requeue.patch b/patches/futex__Clarify_comment_in_futex_requeue.patch
index af0628cb2cdd..88283feb6b5b 100644
--- a/patches/futex__Clarify_comment_in_futex_requeue.patch
+++ b/patches/futex__Clarify_comment_in_futex_requeue.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1957,15 +1957,27 @@ static int futex_requeue(u32 __user *uad
+@@ -1960,15 +1960,27 @@ static int futex_requeue(u32 __user *uad
*/
if (refill_pi_state_cache())
return -ENOMEM;
diff --git a/patches/futex__Cleanup_stale_comments.patch b/patches/futex__Cleanup_stale_comments.patch
index a82e32c0865f..208995ccd6e4 100644
--- a/patches/futex__Cleanup_stale_comments.patch
+++ b/patches/futex__Cleanup_stale_comments.patch
@@ -25,7 +25,7 @@ V2: Cleanup more key ref comments - Andre
*
* @exiting is only set when the return value is -EBUSY. If so, this holds
* a refcount on the exiting task on return and the caller needs to drop it
-@@ -2618,8 +2618,7 @@ static void futex_wait_queue_me(struct f
+@@ -2621,8 +2621,7 @@ static void futex_wait_queue_me(struct f
*
* Setup the futex_q and locate the hash_bucket. Get the futex value and
* compare it with the expected value. Handle atomic faults internally.
@@ -35,7 +35,7 @@ V2: Cleanup more key ref comments - Andre
*
* Return:
* - 0 - uaddr contains val and hb has been locked;
-@@ -2697,8 +2696,8 @@ static int futex_wait(u32 __user *uaddr,
+@@ -2700,8 +2699,8 @@ static int futex_wait(u32 __user *uaddr,
current->timer_slack_ns);
retry:
/*
@@ -46,7 +46,7 @@ V2: Cleanup more key ref comments - Andre
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
-@@ -2709,7 +2708,6 @@ static int futex_wait(u32 __user *uaddr,
+@@ -2712,7 +2711,6 @@ static int futex_wait(u32 __user *uaddr,
/* If we were woken (and unqueued), we succeeded, whatever. */
ret = 0;
@@ -54,7 +54,7 @@ V2: Cleanup more key ref comments - Andre
if (!unqueue_me(&q))
goto out;
ret = -ETIMEDOUT;
-@@ -3202,8 +3200,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3205,8 +3203,8 @@ static int futex_wait_requeue_pi(u32 __u
q.requeue_pi_key = &key2;
/*
@@ -65,7 +65,7 @@ V2: Cleanup more key ref comments - Andre
*/
ret = futex_wait_setup(uaddr, val, flags, &q, &hb);
if (ret)
-@@ -3232,9 +3230,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3235,9 +3233,7 @@ static int futex_wait_requeue_pi(u32 __u
* In order for us to be here, we know our q.key == key2, and since
* we took the hb->lock above, we also know that futex_requeue() has
* completed and we no longer have to concern ourselves with a wakeup
diff --git a/patches/futex__Correct_the_number_of_requeued_waiters_for_PI.patch b/patches/futex__Correct_the_number_of_requeued_waiters_for_PI.patch
index 93f95538c6cc..d2654fa3e4e3 100644
--- a/patches/futex__Correct_the_number_of_requeued_waiters_for_PI.patch
+++ b/patches/futex__Correct_the_number_of_requeued_waiters_for_PI.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2128,6 +2128,8 @@ static int futex_requeue(u32 __user *uad
+@@ -2131,6 +2131,8 @@ static int futex_requeue(u32 __user *uad
/* Ensure we requeue to the expected futex for requeue_pi. */
if (requeue_pi && !match_futex(this->requeue_pi_key, &key2)) {
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = -EINVAL;
break;
}
-@@ -2169,6 +2171,8 @@ static int futex_requeue(u32 __user *uad
+@@ -2172,6 +2174,8 @@ static int futex_requeue(u32 __user *uad
*/
this->pi_state = NULL;
put_pi_state(pi_state);
diff --git a/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch b/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
index ea7973f0459b..07aaffa1c55e 100644
--- a/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
+++ b/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
@@ -87,8 +87,8 @@ requeue side.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/futex.c | 349 +++++++++++++++++++++++++++++++++++++++++++++------------
- 1 file changed, 278 insertions(+), 71 deletions(-)
+ kernel/futex.c | 344 +++++++++++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 273 insertions(+), 71 deletions(-)
---
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} __randomize_layout;
static const struct futex_q futex_q_init = {
-@@ -1793,6 +1799,158 @@ void requeue_futex(struct futex_q *q, st
+@@ -1793,6 +1799,153 @@ void requeue_futex(struct futex_q *q, st
q->key = *key2;
}
@@ -170,15 +170,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static inline bool futex_requeue_pi_prepare(struct futex_q *q,
+ struct futex_pi_state *pi_state)
+{
-+ int cur, res, new;
++ int old, new;
+
+ /*
+ * Set state to Q_REQUEUE_PI_IN_PROGRESS unless an early wakeup has
+ * already set Q_REQUEUE_PI_IGNORE to signal that requeue should
+ * ignore the waiter.
+ */
-+ for (cur = atomic_read(&q->requeue_state);; cur = res) {
-+ if (cur == Q_REQUEUE_PI_IGNORE)
++ old = atomic_read_acquire(&q->requeue_state);
++ do {
++ if (old == Q_REQUEUE_PI_IGNORE)
+ return false;
+
+ /*
@@ -189,74 +190,68 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * trylock, but that would just add more conditionals
+ * all over the place for a dubious value.
+ */
-+ if (cur != Q_REQUEUE_PI_NONE)
++ if (old != Q_REQUEUE_PI_NONE)
+ break;
+
+ new = Q_REQUEUE_PI_IN_PROGRESS;
-+ res = atomic_cmpxchg(&q->requeue_state, cur, new);
-+ if (likely(cur == res))
-+ break;
-+ }
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
++
+ q->pi_state = pi_state;
+ return true;
+}
+
+static inline void futex_requeue_pi_complete(struct futex_q *q, int locked)
+{
-+ int cur, res, new;
++ int old, new;
+
-+ for (cur = atomic_read(&q->requeue_state);; cur = res) {
++ old = atomic_read_acquire(&q->requeue_state);
++ do {
+ if (locked >= 0) {
+ /* Requeue succeeded. Set DONE or LOCKED */
+ new = Q_REQUEUE_PI_DONE + locked;
-+ } else if (cur == Q_REQUEUE_PI_IN_PROGRESS) {
++ } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+ /* Deadlock, no early wakeup interleave */
+ new = Q_REQUEUE_PI_NONE;
+ } else {
+ /* Deadlock, early wakeup interleave. */
+ new = Q_REQUEUE_PI_IGNORE;
+ }
-+
-+ res = atomic_cmpxchg(&q->requeue_state, cur, new);
-+ if (likely(cur == res))
-+ break;
-+ }
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+#ifdef CONFIG_PREEMPT_RT
+ /* If the waiter interleaved with the requeue let it know */
-+ if (unlikely(cur == Q_REQUEUE_PI_WAIT))
++ if (unlikely(old == Q_REQUEUE_PI_WAIT))
+ rcuwait_wake_up(&q->requeue_wait);
+#endif
+}
+
+static inline int futex_requeue_pi_wakeup_sync(struct futex_q *q)
+{
-+ int cur, new, res;
++ int old, new;
+
-+ for (cur = atomic_read(&q->requeue_state);; cur = res) {
++ old = atomic_read_acquire(&q->requeue_state);
++ do {
+ /* Is requeue done already? */
-+ if (cur >= Q_REQUEUE_PI_DONE)
-+ break;
++ if (old >= Q_REQUEUE_PI_DONE)
++ return old;
+
+ /*
+ * If not done, then tell the requeue code to either ignore
+ * the waiter or to wake it up once the requeue is done.
+ */
-+ new = !cur ? Q_REQUEUE_PI_IGNORE : Q_REQUEUE_PI_WAIT;
-+ res = atomic_cmpxchg(&q->requeue_state, cur, new);
-+ if (likely(cur == res))
-+ break;
-+ }
++ new = Q_REQUEUE_PI_WAIT;
++ if (old == Q_REQUEUE_PI_NONE)
++ new = Q_REQUEUE_PI_IGNORE;
++ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
+
+ /* If the requeue was in progress, wait for it to complete */
-+ if (cur == Q_REQUEUE_PI_IN_PROGRESS) {
++ if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+#ifdef CONFIG_PREEMPT_RT
+ rcuwait_wait_event(&q->requeue_wait,
+ atomic_read(&q->requeue_state) != Q_REQUEUE_PI_WAIT,
+ TASK_UNINTERRUPTIBLE);
+#else
-+ while (atomic_read(&q->requeue_state) == Q_REQUEUE_PI_WAIT)
-+ cpu_relax();
++ (void)atomic_cond_read_relaxed(&q->requeue_state, VAL != Q_REQUEUE_PI_WAIT);
+#endif
+ }
+
@@ -271,7 +266,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
* @q: the futex_q
-@@ -1820,6 +1978,8 @@ void requeue_pi_wake_futex(struct futex_
+@@ -1820,6 +1973,8 @@ void requeue_pi_wake_futex(struct futex_
q->lock_ptr = &hb->lock;
@@ -280,7 +275,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
wake_up_state(q->task, TASK_NORMAL);
}
-@@ -1887,6 +2047,10 @@ futex_proxy_trylock_atomic(u32 __user *p
+@@ -1890,6 +2045,10 @@ futex_proxy_trylock_atomic(u32 __user *p
if (!match_futex(top_waiter->requeue_pi_key, key2))
return -EINVAL;
@@ -291,7 +286,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Try to take the lock for top_waiter. Set the FUTEX_WAITERS bit in
* the contended case or if set_waiters is 1. The pi_state is returned
-@@ -1896,8 +2060,22 @@ futex_proxy_trylock_atomic(u32 __user *p
+@@ -1899,8 +2058,22 @@ futex_proxy_trylock_atomic(u32 __user *p
ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
exiting, set_waiters);
if (ret == 1) {
@@ -314,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
return ret;
}
-@@ -2038,6 +2216,8 @@ static int futex_requeue(u32 __user *uad
+@@ -2041,6 +2214,8 @@ static int futex_requeue(u32 __user *uad
* intend to requeue waiters, force setting the FUTEX_WAITERS
* bit. We force this here where we are able to easily handle
* faults rather in the requeue loop below.
@@ -323,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
ret = futex_proxy_trylock_atomic(uaddr2, hb1, hb2, &key1,
&key2, &pi_state,
-@@ -2051,6 +2231,27 @@ static int futex_requeue(u32 __user *uad
+@@ -2054,6 +2229,27 @@ static int futex_requeue(u32 __user *uad
* vpid of the top waiter task.
* If the lock was not taken, we have pi_state and an initial
* refcount on it. In case of an error we have nothing.
@@ -351,7 +346,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
if (ret > 0) {
WARN_ON(pi_state);
-@@ -2076,7 +2277,10 @@ static int futex_requeue(u32 __user *uad
+@@ -2079,7 +2275,10 @@ static int futex_requeue(u32 __user *uad
/* We hold a reference on the pi state. */
break;
@@ -363,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
case -EFAULT:
double_unlock_hb(hb1, hb2);
hb_waiters_dec(hb2);
-@@ -2152,21 +2356,39 @@ static int futex_requeue(u32 __user *uad
+@@ -2155,21 +2354,39 @@ static int futex_requeue(u32 __user *uad
* object of the waiter.
*/
get_pi_state(pi_state);
@@ -408,7 +403,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* rt_mutex_start_proxy_lock() detected a potential
* deadlock when we tried to queue that waiter.
-@@ -2176,15 +2398,13 @@ static int futex_requeue(u32 __user *uad
+@@ -2179,15 +2396,13 @@ static int futex_requeue(u32 __user *uad
*/
this->pi_state = NULL;
put_pi_state(pi_state);
@@ -425,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -3082,27 +3302,22 @@ static int futex_unlock_pi(u32 __user *u
+@@ -3085,27 +3300,22 @@ static int futex_unlock_pi(u32 __user *u
}
/**
@@ -458,7 +453,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* With the hb lock held, we avoid races while we process the wakeup.
-@@ -3111,22 +3326,21 @@ int handle_early_requeue_pi_wakeup(struc
+@@ -3114,22 +3324,21 @@ int handle_early_requeue_pi_wakeup(struc
* It can't be requeued from uaddr2 to something else since we don't
* support a PI aware source futex for requeue.
*/
@@ -495,7 +490,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -3179,6 +3393,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3182,6 +3391,7 @@ static int futex_wait_requeue_pi(u32 __u
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
@@ -503,7 +498,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int res, ret;
if (!IS_ENABLED(CONFIG_FUTEX_PI))
-@@ -3228,30 +3443,22 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3231,30 +3441,22 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -545,7 +540,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
put_pi_state(q.pi_state);
spin_unlock(q.lock_ptr);
-@@ -3261,18 +3468,14 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3264,18 +3466,14 @@ static int futex_wait_requeue_pi(u32 __u
*/
ret = ret < 0 ? ret : 0;
}
@@ -568,7 +563,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(q.lock_ptr);
if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter))
ret = 0;
-@@ -3292,17 +3495,21 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3295,17 +3493,21 @@ static int futex_wait_requeue_pi(u32 __u
unqueue_me_pi(&q);
spin_unlock(q.lock_ptr);
diff --git a/patches/futex__Restructure_futex_requeue.patch b/patches/futex__Restructure_futex_requeue.patch
index 15bc487e8b93..b67db6d0f59c 100644
--- a/patches/futex__Restructure_futex_requeue.patch
+++ b/patches/futex__Restructure_futex_requeue.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2116,20 +2116,17 @@ static int futex_requeue(u32 __user *uad
+@@ -2119,20 +2119,17 @@ static int futex_requeue(u32 __user *uad
break;
}
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = -EINVAL;
break;
}
-@@ -2137,50 +2134,45 @@ static int futex_requeue(u32 __user *uad
+@@ -2140,50 +2137,45 @@ static int futex_requeue(u32 __user *uad
/*
* Requeue nr_requeue waiters and possibly one more in the case
* of requeue_pi if we couldn't acquire the lock atomically.
diff --git a/patches/futex__Validate_waiter_correctly_in_futex_proxy_trylock_atomic.patch b/patches/futex__Validate_waiter_correctly_in_futex_proxy_trylock_atomic.patch
index 23e71930f71d..62a24e25769c 100644
--- a/patches/futex__Validate_waiter_correctly_in_futex_proxy_trylock_atomic.patch
+++ b/patches/futex__Validate_waiter_correctly_in_futex_proxy_trylock_atomic.patch
@@ -11,16 +11,19 @@ sake.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/futex.c | 4 ++++
- 1 file changed, 4 insertions(+)
+ kernel/futex.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
---
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1879,6 +1879,10 @@ futex_proxy_trylock_atomic(u32 __user *p
+@@ -1879,6 +1879,13 @@ futex_proxy_trylock_atomic(u32 __user *p
if (!top_waiter)
return 0;
-+ /* Ensure that this is a waiter sitting in futex_wait_requeue_pi() */
++ /*
++ * Ensure that this is a waiter sitting in futex_wait_requeue_pi()
++ * and waiting on the 'waitqueue' futex which is always !PI.
++ */
+ if (!top_waiter->rt_waiter || top_waiter->pi_state)
+ ret = -EINVAL;
+
diff --git a/patches/locking-rtmutex--Extend-the-rtmutex-core-to-support-ww_mutex.patch b/patches/locking-rtmutex--Extend-the-rtmutex-core-to-support-ww_mutex.patch
index e6fe17bb0268..84353f81ef19 100644
--- a/patches/locking-rtmutex--Extend-the-rtmutex-core-to-support-ww_mutex.patch
+++ b/patches/locking-rtmutex--Extend-the-rtmutex-core-to-support-ww_mutex.patch
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline void
-@@ -939,6 +991,7 @@ try_to_take_rt_mutex(struct rt_mutex_bas
+@@ -945,6 +997,7 @@ try_to_take_rt_mutex(struct rt_mutex_bas
static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task,
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
enum rtmutex_chainwalk chwalk)
{
struct task_struct *owner = rt_mutex_owner(lock);
-@@ -975,6 +1028,16 @@ static int __sched task_blocks_on_rt_mut
+@@ -981,6 +1034,16 @@ static int __sched task_blocks_on_rt_mut
raw_spin_unlock(&task->pi_lock);
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!owner)
return 0;
-@@ -1260,6 +1323,7 @@ static void __sched remove_waiter(struct
+@@ -1266,6 +1329,7 @@ static void __sched remove_waiter(struct
/**
* rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* @state: the state the task should block in (TASK_INTERRUPTIBLE
* or TASK_UNINTERRUPTIBLE)
* @timeout: the pre-initialized and started timer, or NULL for none
-@@ -1268,10 +1332,12 @@ static void __sched remove_waiter(struct
+@@ -1274,10 +1338,12 @@ static void __sched remove_waiter(struct
* Must be called with lock->wait_lock held and interrupts disabled
*/
static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int ret = 0;
for (;;) {
-@@ -1288,6 +1354,12 @@ static int __sched rt_mutex_slowlock_blo
+@@ -1294,6 +1360,12 @@ static int __sched rt_mutex_slowlock_blo
break;
}
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
schedule();
-@@ -1310,6 +1382,9 @@ static void __sched rt_mutex_handle_dead
+@@ -1316,6 +1388,9 @@ static void __sched rt_mutex_handle_dead
if (res != -EDEADLOCK || detect_deadlock)
return;
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Yell loudly and stop the task right here.
*/
-@@ -1323,31 +1398,46 @@ static void __sched rt_mutex_handle_dead
+@@ -1329,31 +1404,46 @@ static void __sched rt_mutex_handle_dead
/**
* __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
* @lock: The rtmutex to block lock
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__set_current_state(TASK_RUNNING);
remove_waiter(lock, waiter);
rt_mutex_handle_deadlock(ret, chwalk, waiter);
-@@ -1362,14 +1452,17 @@ static int __sched __rt_mutex_slowlock(s
+@@ -1368,14 +1458,17 @@ static int __sched __rt_mutex_slowlock(s
}
static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
@@ -230,7 +230,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_free_waiter(&waiter);
return ret;
-@@ -1378,9 +1471,11 @@ static inline int __rt_mutex_slowlock_lo
+@@ -1384,9 +1477,11 @@ static inline int __rt_mutex_slowlock_lo
/*
* rt_mutex_slowlock - Locking slowpath invoked when fast path fails
* @lock: The rtmutex to block lock
@@ -242,7 +242,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned int state)
{
unsigned long flags;
-@@ -1395,7 +1490,7 @@ static int __sched rt_mutex_slowlock(str
+@@ -1401,7 +1496,7 @@ static int __sched rt_mutex_slowlock(str
* irqsave/restore variants.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return ret;
-@@ -1407,7 +1502,7 @@ static __always_inline int __rt_mutex_lo
+@@ -1413,7 +1508,7 @@ static __always_inline int __rt_mutex_lo
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
@@ -260,7 +260,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif /* RT_MUTEX_BUILD_MUTEX */
-@@ -1434,7 +1529,7 @@ static void __sched rtlock_slowlock_lock
+@@ -1440,7 +1535,7 @@ static void __sched rtlock_slowlock_lock
/* Save current state and set state to TASK_RTLOCK_WAIT */
current_save_and_set_rtlock_wait_state();
diff --git a/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch b/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
index 23d5fc97b043..bed4aad350e3 100644
--- a/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
+++ b/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.rst for details.
*/
-@@ -1529,6 +1534,43 @@ static __always_inline int __rt_mutex_lo
+@@ -1535,6 +1540,43 @@ static __always_inline int __rt_mutex_lo
* Functions required for spin/rw_lock substitution on RT kernels
*/
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rtlock_slowlock_locked - Slow path lock acquisition for RT locks
* @lock: The underlying rt mutex
-@@ -1536,6 +1578,7 @@ static __always_inline int __rt_mutex_lo
+@@ -1542,6 +1584,7 @@ static __always_inline int __rt_mutex_lo
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
struct rt_mutex_waiter waiter;
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_assert_held(&lock->wait_lock);
-@@ -1554,9 +1597,14 @@ static void __sched rtlock_slowlock_lock
+@@ -1560,9 +1603,14 @@ static void __sched rtlock_slowlock_lock
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
diff --git a/patches/locking_rtmutex__Guard_regular_sleeping_locks_specific_functions.patch b/patches/locking_rtmutex__Guard_regular_sleeping_locks_specific_functions.patch
index 18759f54c970..ba8bfba3544b 100644
--- a/patches/locking_rtmutex__Guard_regular_sleeping_locks_specific_functions.patch
+++ b/patches/locking_rtmutex__Guard_regular_sleeping_locks_specific_functions.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1069,10 +1069,139 @@ static void __sched mark_wakeup_next_wai
+@@ -1075,10 +1075,139 @@ static void __sched mark_wakeup_next_wai
raw_spin_unlock(&current->pi_lock);
}
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* have just failed to try_to_take_rt_mutex().
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
-@@ -1280,125 +1409,4 @@ static __always_inline int __rt_mutex_lo
+@@ -1286,125 +1415,4 @@ static __always_inline int __rt_mutex_lo
return rt_mutex_slowlock(lock, state);
}
diff --git a/patches/locking_rtmutex__Implement_equal_priority_lock_stealing.patch b/patches/locking_rtmutex__Implement_equal_priority_lock_stealing.patch
index da94c24c17fe..84f0ad0955ec 100644
--- a/patches/locking_rtmutex__Implement_equal_priority_lock_stealing.patch
+++ b/patches/locking_rtmutex__Implement_equal_priority_lock_stealing.patch
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define __node_2_waiter(node) \
rb_entry((node), struct rt_mutex_waiter, tree_entry)
-@@ -906,19 +926,21 @@ try_to_take_rt_mutex(struct rt_mutex_bas
+@@ -912,19 +932,21 @@ try_to_take_rt_mutex(struct rt_mutex_bas
* trylock attempt.
*/
if (waiter) {
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* If the lock has waiters already we check whether @task is
-@@ -929,13 +951,9 @@ try_to_take_rt_mutex(struct rt_mutex_bas
+@@ -935,13 +957,9 @@ try_to_take_rt_mutex(struct rt_mutex_bas
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
diff --git a/patches/locking_rtmutex__Prepare_RT_rt_mutex_wake_q_for_RT_locks.patch b/patches/locking_rtmutex__Prepare_RT_rt_mutex_wake_q_for_RT_locks.patch
index 35402c5b8ced..d89675965cd0 100644
--- a/patches/locking_rtmutex__Prepare_RT_rt_mutex_wake_q_for_RT_locks.patch
+++ b/patches/locking_rtmutex__Prepare_RT_rt_mutex_wake_q_for_RT_locks.patch
@@ -13,36 +13,34 @@ No functional change for non-RT enabled kernels.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/sched/wake_q.h | 7 ++++++-
- kernel/locking/rtmutex.c | 12 ++++++++++--
- kernel/locking/rtmutex_common.h | 3 +++
- kernel/sched/core.c | 4 ++--
- 4 files changed, 21 insertions(+), 5 deletions(-)
+V3: Switch back to the working version (Mike)
+V2: Make it symmetric (PeterZ)
+---
+ include/linux/sched/wake_q.h | 1 -
+ kernel/locking/rtmutex.c | 18 ++++++++++++++++--
+ kernel/locking/rtmutex_common.h | 5 ++++-
+ 3 files changed, 20 insertions(+), 4 deletions(-)
---
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
-@@ -61,6 +61,11 @@ static inline bool wake_q_empty(struct w
-
+@@ -62,5 +62,4 @@ static inline bool wake_q_empty(struct w
extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
--extern void wake_up_q(struct wake_q_head *head);
-+extern void __wake_up_q(struct wake_q_head *head, unsigned int state);
-+
-+static inline void wake_up_q(struct wake_q_head *head)
-+{
-+ __wake_up_q(head, TASK_NORMAL);
-+}
-
+ extern void wake_up_q(struct wake_q_head *head);
+-
#endif /* _LINUX_SCHED_WAKE_Q_H */
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -351,12 +351,20 @@ static __always_inline void rt_mutex_adj
+@@ -351,12 +351,26 @@ static __always_inline void rt_mutex_adj
static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh,
struct rt_mutex_waiter *w)
{
- wake_q_add(&wqh->head, w->task);
+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) {
-+ wake_q_add(&wqh->rt_head, w->task);
++ if (IS_ENABLED(CONFIG_PROVE_LOCKING))
++ WARN_ON_ONCE(wqh->rtlock_task);
++ get_task_struct(w->task);
++ wqh->rtlock_task = w->task;
+ } else {
+ wake_q_add(&wqh->head, w->task);
+ }
@@ -51,8 +49,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh)
{
- wake_up_q(&wqh->head);
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !wake_q_empty(&wqh->rt_head))
-+ __wake_up_q(&wqh->rt_head, TASK_RTLOCK_WAIT);
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) {
++ wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT);
++ put_task_struct(wqh->rtlock_task);
++ wqh->rtlock_task = NULL;
++ }
+
+ if (!wake_q_empty(&wqh->head))
+ wake_up_q(&wqh->head);
@@ -61,41 +62,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -43,14 +43,17 @@ struct rt_mutex_waiter {
+@@ -42,15 +42,18 @@ struct rt_mutex_waiter {
+ /**
* rt_wake_q_head - Wrapper around regular wake_q_head to support
* "sleeping" spinlocks on RT
- * @head: The regular wake_q_head for sleeping lock variants
-+ * @rt_head: The wake_q_head for RT lock (spin/rwlock) variants
+- * @head: The regular wake_q_head for sleeping lock variants
++ * @head: The regular wake_q_head for sleeping lock variants
++ * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups
*/
struct rt_wake_q_head {
struct wake_q_head head;
-+ struct wake_q_head rt_head;
++ struct task_struct *rtlock_task;
};
#define DEFINE_RT_WAKE_Q(name) \
struct rt_wake_q_head name = { \
.head = WAKE_Q_HEAD_INITIALIZER(name.head), \
-+ .rt_head = WAKE_Q_HEAD_INITIALIZER(name.rt_head),\
++ .rtlock_task = NULL, \
}
/*
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -916,7 +916,7 @@ void wake_q_add_safe(struct wake_q_head
- put_task_struct(task);
- }
-
--void wake_up_q(struct wake_q_head *head)
-+void __wake_up_q(struct wake_q_head *head, unsigned int state)
- {
- struct wake_q_node *node = head->first;
-
-@@ -932,7 +932,7 @@ void wake_up_q(struct wake_q_head *head)
- * wake_up_process() executes a full barrier, which pairs with
- * the queueing in wake_q_add() so as not to miss wakeups.
- */
-- wake_up_process(task);
-+ wake_up_state(task, state);
- put_task_struct(task);
- }
- }
diff --git a/patches/locking_rtmutex__Provide_the_spin_rwlock_core_lock_function.patch b/patches/locking_rtmutex__Provide_the_spin_rwlock_core_lock_function.patch
index 57f661554349..e45c69710a8b 100644
--- a/patches/locking_rtmutex__Provide_the_spin_rwlock_core_lock_function.patch
+++ b/patches/locking_rtmutex__Provide_the_spin_rwlock_core_lock_function.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1410,3 +1410,63 @@ static __always_inline int __rt_mutex_lo
+@@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lo
return rt_mutex_slowlock(lock, state);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
diff --git a/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch b/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
index 9eaaacb4d463..b192d183887b 100644
--- a/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
+++ b/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
@@ -3,7 +3,7 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 30 Jul 2021 11:58:42 +0200
There is no reason to restrict adaptive spinwait to the rt mutex based
-'spinlocks'. Testing shows a 4x impromevemt for hackbench.
+'spinlocks'. Testing on RT shows a 4x impromevemt for hackbench.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1277,6 +1277,43 @@ static __always_inline void __rt_mutex_u
+@@ -1283,6 +1283,43 @@ static __always_inline void __rt_mutex_u
rt_mutex_slowunlock(lock);
}
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef RT_MUTEX_BUILD_MUTEX
/*
* Functions required for:
-@@ -1361,6 +1398,7 @@ static int __sched rt_mutex_slowlock_blo
+@@ -1367,6 +1404,7 @@ static int __sched rt_mutex_slowlock_blo
struct rt_mutex_waiter *waiter)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int ret = 0;
for (;;) {
-@@ -1383,9 +1421,14 @@ static int __sched rt_mutex_slowlock_blo
+@@ -1389,9 +1427,14 @@ static int __sched rt_mutex_slowlock_blo
break;
}
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock_irq(&lock->wait_lock);
set_current_state(state);
-@@ -1534,43 +1577,6 @@ static __always_inline int __rt_mutex_lo
+@@ -1540,43 +1583,6 @@ static __always_inline int __rt_mutex_lo
* Functions required for spin/rw_lock substitution on RT kernels
*/
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rtlock_slowlock_locked - Slow path lock acquisition for RT locks
* @lock: The underlying rt mutex
-@@ -1603,7 +1609,7 @@ static void __sched rtlock_slowlock_lock
+@@ -1609,7 +1615,7 @@ static void __sched rtlock_slowlock_lock
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);
diff --git a/patches/locking_ww_mutex__Implement_ww_rt_mutex.patch b/patches/locking_ww_mutex__Implement_ww_rt_mutex.patch
index a69b1005df76..8d69d9f0b218 100644
--- a/patches/locking_ww_mutex__Implement_ww_rt_mutex.patch
+++ b/patches/locking_ww_mutex__Implement_ww_rt_mutex.patch
@@ -10,6 +10,8 @@ on RT enabled kernels.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+V3: Make lock_interruptible interruptible for real (Mike)
+---
kernel/locking/Makefile | 2 -
kernel/locking/ww_rt_mutex.c | 76 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+), 1 deletion(-)
@@ -90,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+int __sched
+ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
-+ return __ww_rt_mutex_lock(lock, ctx, TASK_UNINTERRUPTIBLE, _RET_IP_);
++ return __ww_rt_mutex_lock(lock, ctx, TASK_INTERRUPTIBLE, _RET_IP_);
+}
+EXPORT_SYMBOL(ww_mutex_lock_interruptible);
+
diff --git a/patches/locking_ww_mutex__Remove___sched_annotation.patch b/patches/locking_ww_mutex__Remove___sched_annotation.patch
index 71fae1e3a8d9..3e5f684adb71 100644
--- a/patches/locking_ww_mutex__Remove___sched_annotation.patch
+++ b/patches/locking_ww_mutex__Remove___sched_annotation.patch
@@ -4,7 +4,7 @@ Date: Fri Jul 16 18:07:49 2021 +0200
From: Peter Zijlstra <peterz@infradead.org>
-None of these functions will be on the stack when we block in
+None of these functions will be on the stack when blocking in
schedule(), hence __sched is not needed.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
diff --git a/patches/powerpc__Add_support_for_lazy_preemption.patch b/patches/powerpc__Add_support_for_lazy_preemption.patch
index 733e4bd51845..b458ef15a382 100644
--- a/patches/powerpc__Add_support_for_lazy_preemption.patch
+++ b/patches/powerpc__Add_support_for_lazy_preemption.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -240,6 +240,7 @@ config PPC
+@@ -239,6 +239,7 @@ config PPC
select HAVE_PERF_EVENTS_NMI if PPC64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch b/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
index d217930df20a..863dcd246b48 100644
--- a/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
+++ b/patches/rtmutex--Split-out-the-inner-parts-of-struct-rtmutex.patch
@@ -2,6 +2,8 @@ Subject: rtmutex: Split out the inner parts of struct rtmutex
From: Peter Zijlstra <peterz@infradead.org>
Date: Wed, 14 Jul 2021 15:30:47 +0200
+From: Peter Zijlstra <peterz@infradead.org>
+
RT builds substitutions for rwsem, mutex, spinlock and rwlock around
rtmutexes. Split the inner working out so each lock substitution can use
them with the appropiate lockdep annotations. This avoid having an extra
diff --git a/patches/x86__Support_for_lazy_preemption.patch b/patches/x86__Support_for_lazy_preemption.patch
index d30599dcde91..f9d95b040bb0 100644
--- a/patches/x86__Support_for_lazy_preemption.patch
+++ b/patches/x86__Support_for_lazy_preemption.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -233,6 +233,7 @@ config X86
+@@ -232,6 +232,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP