summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-07-30 15:05:26 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-07-30 15:05:26 +0200
commit694d2b3c7022892144c8ae3488fa007113c5cfa4 (patch)
tree1c8b5b50de3df46bc97c8fa1b76ef30a7f06b04f
parent8d35e84483e7e691a920d59bf110de3114ea207a (diff)
downloadlinux-rt-5.14-rc3-rt2-patches.tar.gz
[ANNOUNCE] v5.14-rc3-rt2v5.14-rc3-rt2-patches
Dear RT folks! I'm pleased to announce the v5.14-rc3-rt2 patch set. Changes since v5.14-rc3-rt1: - To cope with SLUB based hackbench regression adaptive spinning has been introduced for all rtmutex based locks. This improves the hackbench time significantly. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. The delta patch against v5.14-rc3-rt1 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc3-rt1-rt2.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc3-rt2 The RT patch against v5.14-rc3 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc3-rt2.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc3-rt2.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch136
-rw-r--r--patches/series1
3 files changed, 138 insertions, 1 deletions
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 0e6dbb0e3c10..d960d516454d 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt1
++-rt2
diff --git a/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch b/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
new file mode 100644
index 000000000000..9eaaacb4d463
--- /dev/null
+++ b/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
@@ -0,0 +1,136 @@
+Subject: locking/rtmutex: Use adaptive spinwait for all rtmutex based locks
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 30 Jul 2021 11:58:42 +0200
+
+There is no reason to restrict adaptive spinwait to the rt mutex based
+'spinlocks'. Testing shows a 4x impromevemt for hackbench.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+---
+ kernel/locking/rtmutex.c | 84 +++++++++++++++++++++++++----------------------
+ 1 file changed, 45 insertions(+), 39 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1277,6 +1277,43 @@ static __always_inline void __rt_mutex_u
+ rt_mutex_slowunlock(lock);
+ }
+
++#ifdef CONFIG_SMP
++/*
++ * Note that owner is a speculative pointer and dereferencing relies
++ * on rcu_read_lock() and the check against the lock owner.
++ */
++static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
++ struct task_struct *owner)
++{
++ bool res = true;
++
++ rcu_read_lock();
++ for (;;) {
++ /* Owner changed. Trylock again */
++ if (owner != rt_mutex_owner(lock))
++ break;
++ /*
++ * Ensure that owner->on_cpu is dereferenced _after_
++ * checking the above to be valid.
++ */
++ barrier();
++ if (!owner->on_cpu) {
++ res = false;
++ break;
++ }
++ cpu_relax();
++ }
++ rcu_read_unlock();
++ return res;
++}
++#else
++static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
++ struct task_struct *owner)
++{
++ return false;
++}
++#endif
++
+ #ifdef RT_MUTEX_BUILD_MUTEX
+ /*
+ * Functions required for:
+@@ -1361,6 +1398,7 @@ static int __sched rt_mutex_slowlock_blo
+ struct rt_mutex_waiter *waiter)
+ {
+ struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
++ struct task_struct *owner;
+ int ret = 0;
+
+ for (;;) {
+@@ -1383,9 +1421,14 @@ static int __sched rt_mutex_slowlock_blo
+ break;
+ }
+
++ if (waiter == rt_mutex_top_waiter(lock))
++ owner = rt_mutex_owner(lock);
++ else
++ owner = NULL;
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- schedule();
++ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
++ schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(state);
+@@ -1534,43 +1577,6 @@ static __always_inline int __rt_mutex_lo
+ * Functions required for spin/rw_lock substitution on RT kernels
+ */
+
+-#ifdef CONFIG_SMP
+-/*
+- * Note that owner is a speculative pointer and dereferencing relies
+- * on rcu_read_lock() and the check against the lock owner.
+- */
+-static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
+- struct task_struct *owner)
+-{
+- bool res = true;
+-
+- rcu_read_lock();
+- for (;;) {
+- /* Owner changed. Trylock again */
+- if (owner != rt_mutex_owner(lock))
+- break;
+- /*
+- * Ensure that owner->on_cpu is dereferenced _after_
+- * checking the above to be valid.
+- */
+- barrier();
+- if (!owner->on_cpu) {
+- res = false;
+- break;
+- }
+- cpu_relax();
+- }
+- rcu_read_unlock();
+- return res;
+-}
+-#else
+-static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
+- struct task_struct *owner)
+-{
+- return false;
+-}
+-#endif
+-
+ /**
+ * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
+ * @lock: The underlying rt mutex
+@@ -1603,7 +1609,7 @@ static void __sched rtlock_slowlock_lock
+ owner = NULL;
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- if (!owner || !rtlock_adaptive_spinwait(lock, owner))
++ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
+ schedule_rtlock();
+
+ raw_spin_lock_irq(&lock->wait_lock);
diff --git a/patches/series b/patches/series
index 01044844a584..a1f95cf35d55 100644
--- a/patches/series
+++ b/patches/series
@@ -185,6 +185,7 @@ rtmutex__Prevent_lockdep_false_positive_with_PI_futexes.patch
preempt__Adjust_PREEMPT_LOCK_OFFSET_for_RT.patch
locking_rtmutex__Implement_equal_priority_lock_stealing.patch
locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
+locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
###########################################################################
# Locking: RT bits. Need review