summaryrefslogtreecommitdiff
path: root/patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-18 10:40:00 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-18 10:40:00 +0200
commit32ffa5bc13e5bb878b58b8b8f437e5caddc45fe0 (patch)
tree32ea079b91b8c46e6fe5ffd32df56c2eb4db8c62 /patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch
parent7909bbc59ec2a39c42b7b71d0ba1bbfb837c79e5 (diff)
downloadlinux-rt-32ffa5bc13e5bb878b58b8b8f437e5caddc45fe0.tar.gz
[ANNOUNCE] v5.14-rc6-rt11v5.14-rc6-rt11-patches
Dear RT folks! I'm pleased to announce the v5.14-rc6-rt11 patch set. Changes since v5.14-rc6-rt10: - The RCU & ARM64 patches by Valentin Schneider have been updated to v3. The logic in migratable() for UP has been changed and the function itself was renamed (which is different as posted to the list). - The printk.h includes a locking header directly. This unbreaks the POWER and POWER64 build and makes another patch (an earlier attempt to unbreak recursive includes) obsolete. - Update the SLUB series by Vlastimil Babka to slub-local-lock-v4r4: - Clark Williams reported a crash in the SLUB memory allocator. It was there since the SLUB rework in v5.13-rt1. Patch by Vlastimil Babka. - Sven Eckelmann reported a crash on non-RT with LOCKSTAT enabled. Patch by Vlastimil Babka. - rcutorture works again. Patch by Valentin Schneider. - Update RT's locking patches to what has been merge tip/locking/core. A visible change is the definition of local_lock_t on PREEMPT_RT. As a result the access to local_lock_t's dep_map is the same on RT & !RT. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. - Clark Williams reported issues in i915 (execlists_dequeue_irq()) - Clark Williams reported issues with kcov enabled. - Valentin Schneider reported a few splats on ARM64, see https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/ The delta patch against v5.14-rc6-rt10 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc6-rt10-rt11.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc6-rt11 The RT patch against v5.14-rc6 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc6-rt11.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc6-rt11.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch')
-rw-r--r--patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch184
1 files changed, 0 insertions, 184 deletions
diff --git a/patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch b/patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch
deleted file mode 100644
index 2a419a3b26d5..000000000000
--- a/patches/locking_rtmutex__Provide_rt_mutex_slowlock_locked.patch
+++ /dev/null
@@ -1,184 +0,0 @@
-Subject: locking/rtmutex: Provide rt_mutex_slowlock_locked()
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue Jul 6 16:36:46 2021 +0200
-
-From: Thomas Gleixner <tglx@linutronix.de>
-
-Split the inner workings of rt_mutex_slowlock() out into a separate
-function which can be reused by the upcoming RT lock substitutions,
-e.g. for rw_semaphores.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-V2: Add the dropped debug_rt_mutex_free_waiter() - Valentin
----
- kernel/locking/rtmutex.c | 100 ++++++++++++++++++++++++-------------------
- kernel/locking/rtmutex_api.c | 2
- 2 files changed, 59 insertions(+), 43 deletions(-)
----
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1106,7 +1106,7 @@ static void __sched remove_waiter(struct
- }
-
- /**
-- * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
-+ * rt_mutex_slowlock_block() - Perform the wait-wake-try-to-take loop
- * @lock: the rt_mutex to take
- * @state: the state the task should block in (TASK_INTERRUPTIBLE
- * or TASK_UNINTERRUPTIBLE)
-@@ -1115,10 +1115,10 @@ static void __sched remove_waiter(struct
- *
- * Must be called with lock->wait_lock held and interrupts disabled
- */
--static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
-- unsigned int state,
-- struct hrtimer_sleeper *timeout,
-- struct rt_mutex_waiter *waiter)
-+static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
-+ unsigned int state,
-+ struct hrtimer_sleeper *timeout,
-+ struct rt_mutex_waiter *waiter)
- {
- int ret = 0;
-
-@@ -1168,52 +1168,37 @@ static void __sched rt_mutex_handle_dead
- }
- }
-
--/*
-- * Slow path lock function:
-+/**
-+ * __rt_mutex_slowlock - Locking slowpath invoked with lock::wait_lock held
-+ * @lock: The rtmutex to block lock
-+ * @state: The task state for sleeping
-+ * @chwalk: Indicator whether full or partial chainwalk is requested
-+ * @waiter: Initializer waiter for blocking
- */
--static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
-- unsigned int state,
-- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk)
-+static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
-+ unsigned int state,
-+ enum rtmutex_chainwalk chwalk,
-+ struct rt_mutex_waiter *waiter)
- {
-- struct rt_mutex_waiter waiter;
-- unsigned long flags;
-- int ret = 0;
--
-- rt_mutex_init_waiter(&waiter);
-+ int ret;
-
-- /*
-- * Technically we could use raw_spin_[un]lock_irq() here, but this can
-- * be called in early boot if the cmpxchg() fast path is disabled
-- * (debug, no architecture support). In this case we will acquire the
-- * rtmutex with lock->wait_lock held. But we cannot unconditionally
-- * enable interrupts in that early boot case. So we need to use the
-- * irqsave/restore variants.
-- */
-- raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+ lockdep_assert_held(&lock->wait_lock);
-
- /* Try to acquire the lock again: */
-- if (try_to_take_rt_mutex(lock, current, NULL)) {
-- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+ if (try_to_take_rt_mutex(lock, current, NULL))
- return 0;
-- }
-
- set_current_state(state);
-
-- /* Setup the timer, when timeout != NULL */
-- if (unlikely(timeout))
-- hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
--
-- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
-+ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
-
- if (likely(!ret))
-- /* sleep on the mutex */
-- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
-+ ret = rt_mutex_slowlock_block(lock, state, NULL, waiter);
-
- if (unlikely(ret)) {
- __set_current_state(TASK_RUNNING);
-- remove_waiter(lock, &waiter);
-- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
-+ remove_waiter(lock, waiter);
-+ rt_mutex_handle_deadlock(ret, chwalk, waiter);
- }
-
- /*
-@@ -1221,14 +1206,45 @@ static int __sched rt_mutex_slowlock(str
- * unconditionally. We might have to fix that up.
- */
- fixup_rt_mutex_waiters(lock);
-+ return ret;
-+}
-
-- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-+static inline int __rt_mutex_slowlock_locked(struct rt_mutex_base *lock,
-+ unsigned int state)
-+{
-+ struct rt_mutex_waiter waiter;
-+ int ret;
-
-- /* Remove pending timer: */
-- if (unlikely(timeout))
-- hrtimer_cancel(&timeout->timer);
-+ rt_mutex_init_waiter(&waiter);
-+
-+ ret = __rt_mutex_slowlock(lock, state, RT_MUTEX_MIN_CHAINWALK, &waiter);
-
- debug_rt_mutex_free_waiter(&waiter);
-+ return ret;
-+}
-+
-+/*
-+ * rt_mutex_slowlock - Locking slowpath invoked when fast path fails
-+ * @lock: The rtmutex to block lock
-+ * @state: The task state for sleeping
-+ */
-+static int __sched rt_mutex_slowlock(struct rt_mutex_base *lock,
-+ unsigned int state)
-+{
-+ unsigned long flags;
-+ int ret;
-+
-+ /*
-+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
-+ * be called in early boot if the cmpxchg() fast path is disabled
-+ * (debug, no architecture support). In this case we will acquire the
-+ * rtmutex with lock->wait_lock held. But we cannot unconditionally
-+ * enable interrupts in that early boot case. So we need to use the
-+ * irqsave/restore variants.
-+ */
-+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
-+ ret = __rt_mutex_slowlock_locked(lock, state);
-+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-
- return ret;
- }
-@@ -1239,7 +1255,7 @@ static __always_inline int __rt_mutex_lo
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
-
-- return rt_mutex_slowlock(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
-+ return rt_mutex_slowlock(lock, state);
- }
-
- static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
---- a/kernel/locking/rtmutex_api.c
-+++ b/kernel/locking/rtmutex_api.c
-@@ -342,7 +342,7 @@ int __sched rt_mutex_wait_proxy_lock(str
- raw_spin_lock_irq(&lock->wait_lock);
- /* sleep on the mutex */
- set_current_state(TASK_INTERRUPTIBLE);
-- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
-+ ret = rt_mutex_slowlock_block(lock, TASK_INTERRUPTIBLE, to, waiter);
- /*
- * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
- * have to fix that up.