summaryrefslogtreecommitdiff
path: root/patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch')
-rw-r--r--patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch141
1 files changed, 0 insertions, 141 deletions
diff --git a/patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch b/patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch
deleted file mode 100644
index 9abfaa66bc97..000000000000
--- a/patches/0070-locking-rtmutex-Add-adaptive-spinwait-mechanism.patch
+++ /dev/null
@@ -1,141 +0,0 @@
-From: Steven Rostedt <rostedt@goodmis.org>
-Date: Sun, 15 Aug 2021 23:29:25 +0200
-Subject: [PATCH 70/72] locking/rtmutex: Add adaptive spinwait mechanism
-
-Going to sleep when locks are contended can be quite inefficient when the
-contention time is short and the lock owner is running on a different CPU.
-
-The MCS mechanism cannot be used because MCS is strictly FIFO ordered while
-for rtmutex based locks the waiter ordering is priority based.
-
-Provide a simple adaptive spinwait mechanism which currently restricts the
-spinning to the top priority waiter.
-
-[ tglx: Provide a contemporary changelog, extended it to all rtmutex based
- locks and updated it to match the other spin on owner implementations ]
-
-Originally-by: Gregory Haskins <ghaskins@novell.com>
-Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Ingo Molnar <mingo@kernel.org>
-Link: https://lore.kernel.org/r/20210815211305.912050691@linutronix.de
----
- kernel/locking/rtmutex.c | 67 +++++++++++++++++++++++++++++++++++++++++++++--
- 1 file changed, 65 insertions(+), 2 deletions(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -8,6 +8,11 @@
- * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
- * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
- * Copyright (C) 2006 Esben Nielsen
-+ * Adaptive Spinlocks:
-+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
-+ * and Peter Morreale,
-+ * Adaptive Spinlocks simplification:
-+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
- *
- * See Documentation/locking/rt-mutex-design.rst for details.
- */
-@@ -1297,6 +1302,52 @@ static __always_inline void __rt_mutex_u
- rt_mutex_slowunlock(lock);
- }
-
-+#ifdef CONFIG_SMP
-+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
-+ struct rt_mutex_waiter *waiter,
-+ struct task_struct *owner)
-+{
-+ bool res = true;
-+
-+ rcu_read_lock();
-+ for (;;) {
-+ /* If owner changed, trylock again. */
-+ if (owner != rt_mutex_owner(lock))
-+ break;
-+ /*
-+ * Ensure that @owner is dereferenced after checking that
-+ * the lock owner still matches @owner. If that fails,
-+ * @owner might point to freed memory. If it still matches,
-+ * the rcu_read_lock() ensures the memory stays valid.
-+ */
-+ barrier();
-+ /*
-+ * Stop spinning when:
-+ * - the lock owner has been scheduled out
-+ * - current is not longer the top waiter
-+ * - current is requested to reschedule (redundant
-+ * for CONFIG_PREEMPT_RCU=y)
-+ * - the VCPU on which owner runs is preempted
-+ */
-+ if (!owner->on_cpu || waiter != rt_mutex_top_waiter(lock) ||
-+ need_resched() || vcpu_is_preempted(task_cpu(owner))) {
-+ res = false;
-+ break;
-+ }
-+ cpu_relax();
-+ }
-+ rcu_read_unlock();
-+ return res;
-+}
-+#else
-+static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
-+ struct rt_mutex_waiter *waiter,
-+ struct task_struct *owner)
-+{
-+ return false;
-+}
-+#endif
-+
- #ifdef RT_MUTEX_BUILD_MUTEX
- /*
- * Functions required for:
-@@ -1381,6 +1432,7 @@ static int __sched rt_mutex_slowlock_blo
- struct rt_mutex_waiter *waiter)
- {
- struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
-+ struct task_struct *owner;
- int ret = 0;
-
- for (;;) {
-@@ -1403,9 +1455,14 @@ static int __sched rt_mutex_slowlock_blo
- break;
- }
-
-+ if (waiter == rt_mutex_top_waiter(lock))
-+ owner = rt_mutex_owner(lock);
-+ else
-+ owner = NULL;
- raw_spin_unlock_irq(&lock->wait_lock);
-
-- schedule();
-+ if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
-+ schedule();
-
- raw_spin_lock_irq(&lock->wait_lock);
- set_current_state(state);
-@@ -1561,6 +1618,7 @@ static __always_inline int __rt_mutex_lo
- static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
- {
- struct rt_mutex_waiter waiter;
-+ struct task_struct *owner;
-
- lockdep_assert_held(&lock->wait_lock);
-
-@@ -1579,9 +1637,14 @@ static void __sched rtlock_slowlock_lock
- if (try_to_take_rt_mutex(lock, current, &waiter))
- break;
-
-+ if (&waiter == rt_mutex_top_waiter(lock))
-+ owner = rt_mutex_owner(lock);
-+ else
-+ owner = NULL;
- raw_spin_unlock_irq(&lock->wait_lock);
-
-- schedule_rtlock();
-+ if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
-+ schedule_rtlock();
-
- raw_spin_lock_irq(&lock->wait_lock);
- set_current_state(TASK_RTLOCK_WAIT);