summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/block-mq-drop-preempt-disable.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/rt-add-rt-locks.patch18
-rw-r--r--patches/rtmutex-Make-lock_killable-work.patch44
-rw-r--r--patches/rtmutex-Provide-locked-slowpath.patch149
-rw-r--r--patches/rtmutex-Provide-rt_mutex_lock_state.patch111
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch24
-rw-r--r--patches/rwsem-rt-Lift-single-reader-restriction.patch740
-rw-r--r--patches/series4
10 files changed, 1072 insertions, 24 deletions
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 91180ea78218..fd3a94019794 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void __blk_mq_complete_request(struct request *rq)
-@@ -915,14 +915,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+@@ -906,14 +906,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 340816c8febc..0cccc7790a5d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt15
++-rt16
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 279a0c8314f1..87ae1b25f18f 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2488,12 +2488,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index c5cd8758c714..b9bc3b25fd2b 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -2005,7 +2005,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1266,7 +1649,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1261,7 +1644,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -2014,7 +2014,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1360,7 +1743,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1355,7 +1738,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to undo a potential priority boosting.
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -2024,7 +2024,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long flags;
-@@ -1414,7 +1798,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1409,7 +1793,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -2033,7 +2033,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1468,17 +1852,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1463,17 +1847,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -2056,7 +2056,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Undo pi boosting if necessary: */
if (deboost)
-@@ -1606,7 +1993,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1601,7 +1988,8 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
* simple and will not need to retry.
*/
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -2066,7 +2066,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1617,21 +2005,23 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1612,21 +2000,23 @@ bool __sched __rt_mutex_futex_unlock(str
return false; /* done */
}
@@ -2092,7 +2092,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_adjust_prio(current);
}
}
-@@ -1666,13 +2056,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1661,13 +2051,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -2107,7 +2107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1687,7 +2076,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1682,7 +2071,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -2116,7 +2116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1893,3 +2282,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1888,3 +2277,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
diff --git a/patches/rtmutex-Make-lock_killable-work.patch b/patches/rtmutex-Make-lock_killable-work.patch
new file mode 100644
index 000000000000..695b8409b82e
--- /dev/null
+++ b/patches/rtmutex-Make-lock_killable-work.patch
@@ -0,0 +1,44 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 Apr 2017 12:50:59 +0200
+Subject: [PATCH] rtmutex: Make lock_killable work
+
+Locking an rt mutex killable does not work because signal handling is
+restricted to TASK_INTERRUPTIBLE.
+
+Use signal_pending_state() unconditionaly.
+
+Cc: rt-stable@vger.kernel.org
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 19 +++++++------------
+ 1 file changed, 7 insertions(+), 12 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1206,18 +1206,13 @@ static int __sched
+ if (try_to_take_rt_mutex(lock, current, waiter))
+ break;
+
+- /*
+- * TASK_INTERRUPTIBLE checks for signals and
+- * timeout. Ignored otherwise.
+- */
+- if (unlikely(state == TASK_INTERRUPTIBLE)) {
+- /* Signal pending? */
+- if (signal_pending(current))
+- ret = -EINTR;
+- if (timeout && !timeout->task)
+- ret = -ETIMEDOUT;
+- if (ret)
+- break;
++ if (timeout && !timeout->task) {
++ ret = -ETIMEDOUT;
++ break;
++ }
++ if (signal_pending_state(state, current)) {
++ ret = -EINTR;
++ break;
+ }
+
+ raw_spin_unlock_irq(&lock->wait_lock);
diff --git a/patches/rtmutex-Provide-locked-slowpath.patch b/patches/rtmutex-Provide-locked-slowpath.patch
new file mode 100644
index 000000000000..d6eba1c43321
--- /dev/null
+++ b/patches/rtmutex-Provide-locked-slowpath.patch
@@ -0,0 +1,149 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 Apr 2017 12:51:01 +0200
+Subject: [PATCH] rtmutex: Provide locked slowpath
+
+The new rt rwsem implementation needs rtmutex::wait_lock to protect struct
+rw_semaphore. Dropping the lock and reaquiring it for locking the rtmutex
+would open a race window.
+
+Split out the inner workings of the locked slowpath so it can be called with
+wait_lock held.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 72 +++++++++++++++++++++++-----------------
+ kernel/locking/rtmutex_common.h | 9 +++++
+ 2 files changed, 51 insertions(+), 30 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1750,36 +1750,18 @@ static void ww_mutex_account_lock(struct
+ }
+ #endif
+
+-/*
+- * Slow path lock function:
+- */
+-static int __sched
+-rt_mutex_slowlock(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk,
+- struct ww_acquire_ctx *ww_ctx)
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
++ struct rt_mutex_waiter *waiter)
+ {
+- struct rt_mutex_waiter waiter;
+- unsigned long flags;
+- int ret = 0;
+-
+- rt_mutex_init_waiter(&waiter, false);
+-
+- /*
+- * Technically we could use raw_spin_[un]lock_irq() here, but this can
+- * be called in early boot if the cmpxchg() fast path is disabled
+- * (debug, no architecture support). In this case we will acquire the
+- * rtmutex with lock->wait_lock held. But we cannot unconditionally
+- * enable interrupts in that early boot case. So we need to use the
+- * irqsave/restore variants.
+- */
+- raw_spin_lock_irqsave(&lock->wait_lock, flags);
++ int ret;
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+ if (ww_ctx)
+ ww_mutex_account_lock(lock, ww_ctx);
+- raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return 0;
+ }
+
+@@ -1789,13 +1771,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ if (unlikely(timeout))
+ hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
+
+- ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
++ ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
+
+- if (likely(!ret))
++ if (likely(!ret)) {
+ /* sleep on the mutex */
+- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter,
++ ret = __rt_mutex_slowlock(lock, state, timeout, waiter,
+ ww_ctx);
+- else if (ww_ctx) {
++ } else if (ww_ctx) {
+ /* ww_mutex received EDEADLK, let it become EALREADY */
+ ret = __mutex_lock_check_stamp(lock, ww_ctx);
+ BUG_ON(!ret);
+@@ -1804,10 +1786,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ if (unlikely(ret)) {
+ __set_current_state(TASK_RUNNING);
+ if (rt_mutex_has_waiters(lock))
+- remove_waiter(lock, &waiter);
++ remove_waiter(lock, waiter);
+ /* ww_mutex want to report EDEADLK/EALREADY, let them */
+ if (!ww_ctx)
+- rt_mutex_handle_deadlock(ret, chwalk, &waiter);
++ rt_mutex_handle_deadlock(ret, chwalk, waiter);
+ } else if (ww_ctx) {
+ ww_mutex_account_lock(lock, ww_ctx);
+ }
+@@ -1817,6 +1799,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ * unconditionally. We might have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
++ return ret;
++}
++
++/*
++ * Slow path lock function:
++ */
++static int __sched
++rt_mutex_slowlock(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx)
++{
++ struct rt_mutex_waiter waiter;
++ unsigned long flags;
++ int ret = 0;
++
++ rt_mutex_init_waiter(&waiter, false);
++
++ /*
++ * Technically we could use raw_spin_[un]lock_irq() here, but this can
++ * be called in early boot if the cmpxchg() fast path is disabled
++ * (debug, no architecture support). In this case we will acquire the
++ * rtmutex with lock->wait_lock held. But we cannot unconditionally
++ * enable interrupts in that early boot case. So we need to use the
++ * irqsave/restore variants.
++ */
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++
++ ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx,
++ &waiter);
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -129,6 +129,15 @@ extern bool __rt_mutex_futex_unlock(stru
+
+ extern void rt_mutex_adjust_prio(struct task_struct *task);
+
++/* RW semaphore special interface */
++struct ww_acquire_ctx;
++
++int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
++ struct hrtimer_sleeper *timeout,
++ enum rtmutex_chainwalk chwalk,
++ struct ww_acquire_ctx *ww_ctx,
++ struct rt_mutex_waiter *waiter);
++
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # include "rtmutex-debug.h"
+ #else
diff --git a/patches/rtmutex-Provide-rt_mutex_lock_state.patch b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
new file mode 100644
index 000000000000..d638e11ec9da
--- /dev/null
+++ b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -0,0 +1,111 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 Apr 2017 12:51:00 +0200
+Subject: [PATCH] rtmutex: Provide rt_mutex_lock_state()
+
+Allow rtmutex to be locked with arbitrary states. Preparatory patch for the
+rt rwsem rework.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 1 +
+ kernel/locking/rtmutex.c | 44 +++++++++++++++++++++++++-------------------
+ 2 files changed, 26 insertions(+), 19 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -105,6 +105,7 @@ extern void __rt_mutex_init(struct rt_mu
+ extern void rt_mutex_destroy(struct rt_mutex *lock);
+
+ extern void rt_mutex_lock(struct rt_mutex *lock);
++extern int rt_mutex_lock_state(struct rt_mutex *lock, int state);
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+ extern int rt_mutex_lock_killable(struct rt_mutex *lock);
+ extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -2003,21 +2003,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+ }
+
+ /**
++ * rt_mutex_lock_state - lock a rt_mutex with a given state
++ *
++ * @lock: The rt_mutex to be locked
++ * @state: The state to set when blocking on the rt_mutex
++ */
++int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state)
++{
++ might_sleep();
++
++ return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock);
++}
++
++/**
+ * rt_mutex_lock - lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
+ */
+ void __sched rt_mutex_lock(struct rt_mutex *lock)
+ {
+- might_sleep();
+-
+- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
++ rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+
+ /**
+ * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
+- *
++ **
+ * @lock: the rt_mutex to be locked
+ *
+ * Returns:
+@@ -2026,20 +2037,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+ */
+ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
+ {
+- might_sleep();
+-
+- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
++ return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
+
+-/*
+- * Futex variant, must not use fastpath.
+- */
+-int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
+-{
+- return rt_mutex_slowtrylock(lock);
+-}
+-
+ /**
+ * rt_mutex_lock_killable - lock a rt_mutex killable
+ *
+@@ -2049,16 +2050,21 @@ int __sched rt_mutex_futex_trylock(struc
+ * Returns:
+ * 0 on success
+ * -EINTR when interrupted by a signal
+- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+ */
+ int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
+ {
+- might_sleep();
+-
+- return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
++ return rt_mutex_lock_state(lock, TASK_KILLABLE);
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+
++/*
++ * Futex variant, must not use fastpath.
++ */
++int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
++{
++ return rt_mutex_slowtrylock(lock);
++}
++
+ /**
+ * rt_mutex_timed_lock - lock a rt_mutex interruptible
+ * the timeout structure is provided
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 68142ad38c64..a49d4432a136 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -86,8 +86,8 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1603,6 +1639,12 @@ static int __sched
- break;
+@@ -1598,6 +1634,12 @@ static int __sched
+ break;
}
+ if (ww_ctx && ww_ctx->acquired > 0) {
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1637,13 +1679,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1632,13 +1674,90 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1663,6 +1782,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1658,6 +1777,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0;
}
-@@ -1677,13 +1798,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1672,13 +1793,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -226,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1814,29 +1945,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1809,29 +1940,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -264,7 +264,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1881,7 +2016,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1876,7 +2011,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -273,7 +273,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1898,7 +2033,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1893,7 +2028,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -282,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1925,7 +2060,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1920,7 +2055,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1949,6 +2084,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1944,6 +2079,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -299,7 +299,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2230,7 +2366,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2225,7 +2361,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -308,7 +308,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irq(&lock->wait_lock);
-@@ -2283,24 +2419,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2278,24 +2414,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
diff --git a/patches/rwsem-rt-Lift-single-reader-restriction.patch b/patches/rwsem-rt-Lift-single-reader-restriction.patch
new file mode 100644
index 000000000000..3f02568c3b26
--- /dev/null
+++ b/patches/rwsem-rt-Lift-single-reader-restriction.patch
@@ -0,0 +1,740 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sat, 1 Apr 2017 12:51:02 +0200
+Subject: [PATCH] rwsem/rt: Lift single reader restriction
+
+The RT specific R/W semaphore implementation restricts the number of readers
+to one because a writer cannot block on multiple readers and inherit its
+priority or budget.
+
+The single reader restricting is painful in various ways:
+
+ - Performance bottleneck for multi-threaded applications in the page fault
+ path (mmap sem)
+
+ - Progress blocker for drivers which are carefully crafted to avoid the
+ potential reader/writer deadlock in mainline.
+
+The analysis of the writer code pathes shows, that properly written RT tasks
+should not take them. Syscalls like mmap(), file access which take mmap sem
+write locked have unbound latencies which are completely unrelated to mmap
+sem. Other R/W sem users like graphics drivers are not suitable for RT tasks
+either.
+
+So there is little risk to hurt RT tasks when the RT rwsem implementation is
+changed in the following way:
+
+ - Allow concurrent readers
+
+ - Make writers block until the last reader left the critical section. This
+ blocking is not subject to priority/budget inheritance.
+
+ - Readers blocked on a writer inherit their priority/budget in the normal
+ way.
+
+There is a drawback with this scheme. R/W semaphores become writer unfair
+though the applications which have triggered writer starvation (mostly on
+mmap_sem) in the past are not really the typical workloads running on a RT
+system. So while it's unlikely to hit writer starvation, it's possible. If
+there are unexpected workloads on RT systems triggering it, we need to rethink
+the approach.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rwsem.h | 9 +
+ include/linux/rwsem_rt.h | 166 +++++-----------------------
+ kernel/locking/Makefile | 4
+ kernel/locking/rt.c | 167 ----------------------------
+ kernel/locking/rwsem-rt.c | 268 ++++++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 310 insertions(+), 304 deletions(-)
+ create mode 100644 kernel/locking/rwsem-rt.c
+
+--- a/include/linux/rwsem.h
++++ b/include/linux/rwsem.h
+@@ -110,6 +110,13 @@ static inline int rwsem_is_contended(str
+ return !list_empty(&sem->wait_list);
+ }
+
++#endif /* !PREEMPT_RT_FULL */
++
++/*
++ * The functions below are the same for all rwsem implementations including
++ * the RT specific variant.
++ */
++
+ /*
+ * lock for reading
+ */
+@@ -188,6 +195,4 @@ extern void up_read_non_owner(struct rw_
+ # define up_read_non_owner(sem) up_read(sem)
+ #endif
+
+-#endif /* !PREEMPT_RT_FULL */
+-
+ #endif /* _LINUX_RWSEM_H */
+--- a/include/linux/rwsem_rt.h
++++ b/include/linux/rwsem_rt.h
+@@ -5,163 +5,63 @@
+ #error "Include rwsem.h"
+ #endif
+
+-/*
+- * RW-semaphores are a spinlock plus a reader-depth count.
+- *
+- * Note that the semantics are different from the usual
+- * Linux rw-sems, in PREEMPT_RT mode we do not allow
+- * multiple readers to hold the lock at once, we only allow
+- * a read-lock owner to read-lock recursively. This is
+- * better for latency, makes the implementation inherently
+- * fair and makes it simpler as well.
+- */
+-
+ #include <linux/rtmutex.h>
++#include <linux/swait.h>
++
++#define READER_BIAS (1U << 31)
++#define WRITER_BIAS (1U << 30)
+
+ struct rw_semaphore {
+- struct rt_mutex lock;
+- int read_depth;
++ atomic_t readers;
++ struct rt_mutex rtmutex;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+ };
+
+-#define __RWSEM_INITIALIZER(name) \
+- { .lock = __RT_MUTEX_INITIALIZER(name.lock), \
+- RW_DEP_MAP_INIT(name) }
++#define __RWSEM_INITIALIZER(name) \
++{ \
++ .readers = ATOMIC_INIT(READER_BIAS), \
++ .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \
++ RW_DEP_MAP_INIT(name) \
++}
+
+ #define DECLARE_RWSEM(lockname) \
+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+-extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
+- struct lock_class_key *key);
+-
+-#define __rt_init_rwsem(sem, name, key) \
+- do { \
+- rt_mutex_init(&(sem)->lock); \
+- __rt_rwsem_init((sem), (name), (key));\
+- } while (0)
++extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name,
++ struct lock_class_key *key);
+
+-#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
++#define __init_rwsem(sem, name, key) \
++do { \
++ rt_mutex_init(&(sem)->rtmutex); \
++ __rwsem_init((sem), (name), (key)); \
++} while (0)
+
+-# define rt_init_rwsem(sem) \
++#define init_rwsem(sem) \
+ do { \
+ static struct lock_class_key __key; \
+ \
+- __rt_init_rwsem((sem), #sem, &__key); \
++ __init_rwsem((sem), #sem, &__key); \
+ } while (0)
+
+-extern void rt_down_write(struct rw_semaphore *rwsem);
+-extern int rt_down_write_killable(struct rw_semaphore *rwsem);
+-extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
+-extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
+-extern int rt_down_write_killable_nested(struct rw_semaphore *rwsem,
+- int subclass);
+-extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+- struct lockdep_map *nest);
+-extern void rt__down_read(struct rw_semaphore *rwsem);
+-extern void rt_down_read(struct rw_semaphore *rwsem);
+-extern int rt_down_write_trylock(struct rw_semaphore *rwsem);
+-extern int rt__down_read_trylock(struct rw_semaphore *rwsem);
+-extern int rt_down_read_trylock(struct rw_semaphore *rwsem);
+-extern void __rt_up_read(struct rw_semaphore *rwsem);
+-extern void rt_up_read(struct rw_semaphore *rwsem);
+-extern void rt_up_write(struct rw_semaphore *rwsem);
+-extern void rt_downgrade_write(struct rw_semaphore *rwsem);
+-
+-#define init_rwsem(sem) rt_init_rwsem(sem)
+-#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock)
+-
+-static inline int rwsem_is_contended(struct rw_semaphore *sem)
+-{
+- /* rt_mutex_has_waiters() */
+- return !RB_EMPTY_ROOT(&sem->lock.waiters);
+-}
+-
+-static inline void __down_read(struct rw_semaphore *sem)
+-{
+- rt__down_read(sem);
+-}
+-
+-static inline void down_read(struct rw_semaphore *sem)
+-{
+- rt_down_read(sem);
+-}
+-
+-static inline int __down_read_trylock(struct rw_semaphore *sem)
+-{
+- return rt__down_read_trylock(sem);
+-}
+-
+-static inline int down_read_trylock(struct rw_semaphore *sem)
+-{
+- return rt_down_read_trylock(sem);
+-}
+-
+-static inline void down_write(struct rw_semaphore *sem)
+-{
+- rt_down_write(sem);
+-}
+-
+-static inline int down_write_killable(struct rw_semaphore *sem)
+-{
+- return rt_down_write_killable(sem);
+-}
+-
+-static inline int down_write_trylock(struct rw_semaphore *sem)
+-{
+- return rt_down_write_trylock(sem);
+-}
+-
+-static inline void __up_read(struct rw_semaphore *sem)
+-{
+- __rt_up_read(sem);
+-}
+-
+-static inline void up_read(struct rw_semaphore *sem)
+-{
+- rt_up_read(sem);
+-}
+-
+-static inline void up_write(struct rw_semaphore *sem)
+-{
+- rt_up_write(sem);
+-}
+-
+-static inline void downgrade_write(struct rw_semaphore *sem)
++static inline int rwsem_is_locked(struct rw_semaphore *sem)
+ {
+- rt_downgrade_write(sem);
++ return atomic_read(&sem->readers) != READER_BIAS;
+ }
+
+-static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
+-{
+- return rt_down_read_nested(sem, subclass);
+-}
+-
+-static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
+-{
+- rt_down_write_nested(sem, subclass);
+-}
+-
+-static inline int down_write_killable_nested(struct rw_semaphore *sem,
+- int subclass)
+-{
+- return rt_down_write_killable_nested(sem, subclass);
+-}
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-static inline void down_write_nest_lock(struct rw_semaphore *sem,
+- struct rw_semaphore *nest_lock)
++static inline int rwsem_is_contended(struct rw_semaphore *sem)
+ {
+- rt_down_write_nested_lock(sem, &nest_lock->dep_map);
++ return atomic_read(&sem->readers) > 0;
+ }
+
+-#else
++extern void __down_read(struct rw_semaphore *sem);
++extern int __down_read_trylock(struct rw_semaphore *sem);
++extern void __down_write(struct rw_semaphore *sem);
++extern int __must_check __down_write_killable(struct rw_semaphore *sem);
++extern int __down_write_trylock(struct rw_semaphore *sem);
++extern void __up_read(struct rw_semaphore *sem);
++extern void __up_write(struct rw_semaphore *sem);
++extern void __downgrade_write(struct rw_semaphore *sem);
+
+-static inline void down_write_nest_lock(struct rw_semaphore *sem,
+- struct rw_semaphore *nest_lock)
+-{
+- rt_down_write_nested_lock(sem, NULL);
+-}
+-#endif
+ #endif
+--- a/kernel/locking/Makefile
++++ b/kernel/locking/Makefile
+@@ -14,8 +14,8 @@ endif
+ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-y += mutex.o
+ obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+-obj-y += rwsem.o
+ endif
++obj-y += rwsem.o
+ obj-$(CONFIG_LOCKDEP) += lockdep.o
+ ifeq ($(CONFIG_PROC_FS),y)
+ obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
+@@ -32,6 +32,6 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+ obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
+ obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+ endif
+-obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
+ obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
+ obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -306,173 +306,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
+ }
+ EXPORT_SYMBOL(__rt_rwlock_init);
+
+-/*
+- * rw_semaphores
+- */
+-
+-void rt_up_write(struct rw_semaphore *rwsem)
+-{
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- rt_mutex_unlock(&rwsem->lock);
+-}
+-EXPORT_SYMBOL(rt_up_write);
+-
+-void __rt_up_read(struct rw_semaphore *rwsem)
+-{
+- if (--rwsem->read_depth == 0)
+- rt_mutex_unlock(&rwsem->lock);
+-}
+-
+-void rt_up_read(struct rw_semaphore *rwsem)
+-{
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- __rt_up_read(rwsem);
+-}
+-EXPORT_SYMBOL(rt_up_read);
+-
+-/*
+- * downgrade a write lock into a read lock
+- * - just wake up any readers at the front of the queue
+- */
+-void rt_downgrade_write(struct rw_semaphore *rwsem)
+-{
+- BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
+- rwsem->read_depth = 1;
+-}
+-EXPORT_SYMBOL(rt_downgrade_write);
+-
+-int rt_down_write_trylock(struct rw_semaphore *rwsem)
+-{
+- int ret = rt_mutex_trylock(&rwsem->lock);
+-
+- if (ret)
+- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_down_write_trylock);
+-
+-void rt_down_write(struct rw_semaphore *rwsem)
+-{
+- rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+- rt_mutex_lock(&rwsem->lock);
+-}
+-EXPORT_SYMBOL(rt_down_write);
+-
+-int rt_down_write_killable(struct rw_semaphore *rwsem)
+-{
+- int ret;
+-
+- rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+- ret = rt_mutex_lock_killable(&rwsem->lock);
+- if (ret)
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_down_write_killable);
+-
+-int rt_down_write_killable_nested(struct rw_semaphore *rwsem, int subclass)
+-{
+- int ret;
+-
+- rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+- ret = rt_mutex_lock_killable(&rwsem->lock);
+- if (ret)
+- rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_down_write_killable_nested);
+-
+-void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
+-{
+- rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+- rt_mutex_lock(&rwsem->lock);
+-}
+-EXPORT_SYMBOL(rt_down_write_nested);
+-
+-void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+- struct lockdep_map *nest)
+-{
+- rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
+- rt_mutex_lock(&rwsem->lock);
+-}
+-EXPORT_SYMBOL(rt_down_write_nested_lock);
+-
+-int rt__down_read_trylock(struct rw_semaphore *rwsem)
+-{
+- struct rt_mutex *lock = &rwsem->lock;
+- int ret = 1;
+-
+- /*
+- * recursive read locks succeed when current owns the rwsem,
+- * but not when read_depth == 0 which means that the rwsem is
+- * write locked.
+- */
+- if (rt_mutex_owner(lock) != current)
+- ret = rt_mutex_trylock(&rwsem->lock);
+- else if (!rwsem->read_depth)
+- ret = 0;
+-
+- if (ret)
+- rwsem->read_depth++;
+- return ret;
+-
+-}
+-
+-int rt_down_read_trylock(struct rw_semaphore *rwsem)
+-{
+- int ret;
+-
+- ret = rt__down_read_trylock(rwsem);
+- if (ret)
+- rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+-
+- return ret;
+-}
+-EXPORT_SYMBOL(rt_down_read_trylock);
+-
+-void rt__down_read(struct rw_semaphore *rwsem)
+-{
+- struct rt_mutex *lock = &rwsem->lock;
+-
+- if (rt_mutex_owner(lock) != current)
+- rt_mutex_lock(&rwsem->lock);
+- rwsem->read_depth++;
+-}
+-EXPORT_SYMBOL(rt__down_read);
+-
+-static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
+-{
+- rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+- rt__down_read(rwsem);
+-}
+-
+-void rt_down_read(struct rw_semaphore *rwsem)
+-{
+- __rt_down_read(rwsem, 0);
+-}
+-EXPORT_SYMBOL(rt_down_read);
+-
+-void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
+-{
+- __rt_down_read(rwsem, subclass);
+-}
+-EXPORT_SYMBOL(rt_down_read_nested);
+-
+-void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
+- struct lock_class_key *key)
+-{
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- /*
+- * Make sure we are not reinitializing a held lock:
+- */
+- debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
+- lockdep_init_map(&rwsem->dep_map, name, key, 0);
+-#endif
+- rwsem->read_depth = 0;
+- rwsem->lock.save_state = 0;
+-}
+-EXPORT_SYMBOL(__rt_rwsem_init);
+-
+ /**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+--- /dev/null
++++ b/kernel/locking/rwsem-rt.c
+@@ -0,0 +1,268 @@
++/*
++ */
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/export.h>
++
++#include "rtmutex_common.h"
++
++/*
++ * RT-specific reader/writer semaphores
++ *
++ * down_write()
++ * 1) Lock sem->rtmutex
++ * 2) Remove the reader BIAS to force readers into the slow path
++ * 3) Wait until all readers have left the critical region
++ * 4) Mark it write locked
++ *
++ * up_write()
++ * 1) Remove the write locked marker
++ * 2) Set the reader BIAS so readers can use the fast path again
++ * 3) Unlock sem->rtmutex to release blocked readers
++ *
++ * down_read()
++ * 1) Try fast path acquisition (reader BIAS is set)
++ * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag
++ * 3) If !writelocked, acquire it for read
++ * 4) If writelocked, block on sem->rtmutex
++ * 5) unlock sem->rtmutex, goto 1)
++ *
++ * up_read()
++ * 1) Try fast path release (reader count != 1)
++ * 2) Wake the writer waiting in down_write()#3
++ *
++ * down_read()#3 has the consequence, that rw semaphores on RT are not writer
++ * fair, but writers, which should be avoided in RT tasks (think mmap_sem),
++ * are subject to the rtmutex priority/DL inheritance mechanism.
++ *
++ * It's possible to make the rw semaphores writer fair by keeping a list of
++ * active readers. A blocked writer would force all newly incoming readers to
++ * block on the rtmutex, but the rtmutex would have to be proxy locked for one
++ * reader after the other. We can't use multi-reader inheritance because there
++ * is no way to support that with SCHED_DEADLINE. Implementing the one by one
++ * reader boosting/handover mechanism is a major surgery for a very dubious
++ * value.
++ *
++ * The risk of writer starvation is there, but the pathological use cases
++ * which trigger it are not necessarily the typical RT workloads.
++ */
++
++void __rwsem_init(struct rw_semaphore *sem, const char *name,
++ struct lock_class_key *key)
++{
++#ifdef CONFIG_DEBUG_LOCK_ALLOC
++ /*
++ * Make sure we are not reinitializing a held semaphore:
++ */
++ debug_check_no_locks_freed((void *)sem, sizeof(*sem));
++ lockdep_init_map(&sem->dep_map, name, key, 0);
++#endif
++ atomic_set(&sem->readers, READER_BIAS);
++}
++EXPORT_SYMBOL(__rwsem_init);
++
++int __down_read_trylock(struct rw_semaphore *sem)
++{
++ int r, old;
++
++ /*
++ * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is
++ * set.
++ */
++ for (r = atomic_read(&sem->readers); r < 0;) {
++ old = atomic_cmpxchg(&sem->readers, r, r + 1);
++ if (likely(old == r))
++ return 1;
++ r = old;
++ }
++ return 0;
++}
++
++void __sched __down_read(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ struct rt_mutex_waiter waiter;
++
++ if (__down_read_trylock(sem))
++ return;
++
++ might_sleep();
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Allow readers as long as the writer has not completely
++ * acquired the semaphore for write.
++ */
++ if (atomic_read(&sem->readers) != WRITER_BIAS) {
++ atomic_inc(&sem->readers);
++ raw_spin_unlock_irq(&m->wait_lock);
++ return;
++ }
++
++ /*
++ * Call into the slow lock path with the rtmutex->wait_lock
++ * held, so this can't result in the following race:
++ *
++ * Reader1 Reader2 Writer
++ * down_read()
++ * down_write()
++ * rtmutex_lock(m)
++ * swait()
++ * down_read()
++ * unlock(m->wait_lock)
++ * up_read()
++ * swake()
++ * lock(m->wait_lock)
++ * sem->writelocked=true
++ * unlock(m->wait_lock)
++ *
++ * up_write()
++ * sem->writelocked=false
++ * rtmutex_unlock(m)
++ * down_read()
++ * down_write()
++ * rtmutex_lock(m)
++ * swait()
++ * rtmutex_lock(m)
++ *
++ * That would put Reader1 behind the writer waiting on
++ * Reader2 to call up_read() which might be unbound.
++ */
++ rt_mutex_init_waiter(&waiter, false);
++ rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL,
++ RT_MUTEX_MIN_CHAINWALK, NULL,
++ &waiter);
++ /*
++ * The slowlock() above is guaranteed to return with the rtmutex is
++ * now held, so there can't be a writer active. Increment the reader
++ * count and immediately drop the rtmutex again.
++ */
++ atomic_inc(&sem->readers);
++ raw_spin_unlock_irq(&m->wait_lock);
++ rt_mutex_unlock(m);
++
++ debug_rt_mutex_free_waiter(&waiter);
++}
++
++void __up_read(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ struct task_struct *tsk;
++
++ /*
++ * sem->readers can only hit 0 when a writer is waiting for the
++ * active readers to leave the critical region.
++ */
++ if (!atomic_dec_and_test(&sem->readers))
++ return;
++
++ might_sleep();
++ raw_spin_lock_irq(&m->wait_lock);
++ /*
++ * Wake the writer, i.e. the rtmutex owner. It might release the
++ * rtmutex concurrently in the fast path (due to a signal), but to
++ * clean up the rwsem it needs to acquire m->wait_lock. The worst
++ * case which can happen is a spurious wakeup.
++ */
++ tsk = rt_mutex_owner(m);
++ if (tsk)
++ wake_up_process(tsk);
++
++ raw_spin_unlock_irq(&m->wait_lock);
++}
++
++static void __up_write_unlock(struct rw_semaphore *sem, int bias,
++ unsigned long flags)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++
++ atomic_add(READER_BIAS - bias, &sem->readers);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ rt_mutex_unlock(m);
++}
++
++static int __sched __down_write_common(struct rw_semaphore *sem, int state)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ /* Take the rtmutex as a first step */
++ if (rt_mutex_lock_state(m, state))
++ return -EINTR;
++
++ /* Force readers into slow path */
++ atomic_sub(READER_BIAS, &sem->readers);
++ might_sleep();
++
++ set_current_state(state);
++ for (;;) {
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /* Have all readers left the critical region? */
++ if (!atomic_read(&sem->readers)) {
++ atomic_set(&sem->readers, WRITER_BIAS);
++ __set_current_state(TASK_RUNNING);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 0;
++ }
++
++ if (signal_pending_state(state, current)) {
++ __set_current_state(TASK_RUNNING);
++ __up_write_unlock(sem, 0, flags);
++ return -EINTR;
++ }
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++
++ if (atomic_read(&sem->readers) != 0) {
++ schedule();
++ set_current_state(state);
++ }
++ }
++}
++
++void __sched __down_write(struct rw_semaphore *sem)
++{
++ __down_write_common(sem, TASK_UNINTERRUPTIBLE);
++}
++
++int __sched __down_write_killable(struct rw_semaphore *sem)
++{
++ return __down_write_common(sem, TASK_KILLABLE);
++}
++
++int __down_write_trylock(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ if (!rt_mutex_trylock(m))
++ return 0;
++
++ atomic_sub(READER_BIAS, &sem->readers);
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ if (!atomic_read(&sem->readers)) {
++ atomic_set(&sem->readers, WRITER_BIAS);
++ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
++ return 1;
++ }
++ __up_write_unlock(sem, 0, flags);
++ return 0;
++}
++
++void __up_write(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ __up_write_unlock(sem, WRITER_BIAS, flags);
++}
++
++void __downgrade_write(struct rw_semaphore *sem)
++{
++ struct rt_mutex *m = &sem->rtmutex;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&m->wait_lock, flags);
++ /* Release it and account current as reader */
++ __up_write_unlock(sem, WRITER_BIAS - 1, flags);
++}
diff --git a/patches/series b/patches/series
index 421febb3a03e..b9e4172bfd95 100644
--- a/patches/series
+++ b/patches/series
@@ -343,12 +343,16 @@ pid.h-include-atomic.h.patch
arm-include-definition-for-cpumask_t.patch
locking-locktorture-Do-NOT-include-rwlock.h-directly.patch
rtmutex-lock-killable.patch
+rtmutex-Make-lock_killable-work.patch
spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
rt-drop_mutex_disable_on_not_debug.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
+rtmutex-Provide-rt_mutex_lock_state.patch
+rtmutex-Provide-locked-slowpath.patch
+rwsem-rt-Lift-single-reader-restriction.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
# RCU