diff options
Diffstat (limited to 'patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch')
-rw-r--r-- | patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch | 121 |
1 files changed, 0 insertions, 121 deletions
diff --git a/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch b/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch deleted file mode 100644 index 204bdbfb9db9..000000000000 --- a/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch +++ /dev/null @@ -1,121 +0,0 @@ -From: Thomas Gleixner <tglx@linutronix.de> -Date: Thu, 12 Oct 2017 16:36:39 +0200 -Subject: [PATCH 13/22] locking/rtmutex: export lockdep-less version of - rt_mutex's lock, trylock and unlock - -Required for lock implementation ontop of rtmutex. - -Signed-off-by: Thomas Gleixner <tglx@linutronix.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - kernel/locking/rtmutex.c | 54 ++++++++++++++++++++++++++++------------ - kernel/locking/rtmutex_common.h | 3 ++ - 2 files changed, 41 insertions(+), 16 deletions(-) - ---- a/kernel/locking/rtmutex.c -+++ b/kernel/locking/rtmutex.c -@@ -1451,12 +1451,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc - rt_mutex_postunlock(&wake_q); - } - --static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) -+int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state) - { - might_sleep(); -+ return rt_mutex_fastlock(lock, state, rt_mutex_slowlock); -+} -+ -+/** -+ * rt_mutex_lock_state - lock a rt_mutex with a given state -+ * -+ * @lock: The rt_mutex to be locked -+ * @state: The state to set when blocking on the rt_mutex -+ */ -+static inline int __sched rt_mutex_lock_state(struct rt_mutex *lock, -+ unsigned int subclass, int state) -+{ -+ int ret; - - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); -+ ret = __rt_mutex_lock_state(lock, state); -+ if (ret) -+ mutex_release(&lock->dep_map, _RET_IP_); -+ return ret; -+} -+ -+static inline void __rt_mutex_lock(struct rt_mutex *lock, unsigned int subclass) -+{ -+ rt_mutex_lock_state(lock, subclass, TASK_UNINTERRUPTIBLE); - } - - #ifdef CONFIG_DEBUG_LOCK_ALLOC -@@ -1497,16 +1518,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); - */ - int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) - { -- int ret; -- -- might_sleep(); -- -- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -- ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); -- if (ret) -- mutex_release(&lock->dep_map, _RET_IP_); -- -- return ret; -+ return rt_mutex_lock_state(lock, 0, TASK_INTERRUPTIBLE); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); - -@@ -1523,6 +1535,14 @@ int __sched __rt_mutex_futex_trylock(str - return __rt_mutex_slowtrylock(lock); - } - -+int __sched __rt_mutex_trylock(struct rt_mutex *lock) -+{ -+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) -+ return 0; -+ -+ return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -+} -+ - /** - * rt_mutex_trylock - try to lock a rt_mutex - * -@@ -1538,10 +1558,7 @@ int __sched rt_mutex_trylock(struct rt_m - { - int ret; - -- if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) -- return 0; -- -- ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -+ ret = __rt_mutex_trylock(lock); - if (ret) - mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); - -@@ -1549,6 +1566,11 @@ int __sched rt_mutex_trylock(struct rt_m - } - EXPORT_SYMBOL_GPL(rt_mutex_trylock); - -+void __sched __rt_mutex_unlock(struct rt_mutex *lock) -+{ -+ rt_mutex_fastunlock(lock, rt_mutex_slowunlock); -+} -+ - /** - * rt_mutex_unlock - unlock a rt_mutex - * ---- a/kernel/locking/rtmutex_common.h -+++ b/kernel/locking/rtmutex_common.h -@@ -156,6 +156,9 @@ extern bool __rt_mutex_futex_unlock(stru - extern void rt_mutex_postunlock(struct wake_q_head *wake_q); - /* RW semaphore special interface */ - -+extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state); -+extern int __rt_mutex_trylock(struct rt_mutex *lock); -+extern void __rt_mutex_unlock(struct rt_mutex *lock); - int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, |