summaryrefslogtreecommitdiff
path: root/patches
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-05 13:38:22 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-08-05 13:38:22 +0200
commit13e82cf3ac1654d935270c4c1c8eb33a0ca53bcb (patch)
tree48c17ec6e85845ac5e8a721076a2596c9f972ce5 /patches
parent223af5c63a98be58aecfa5b498717f02d0dc0085 (diff)
downloadlinux-rt-13e82cf3ac1654d935270c4c1c8eb33a0ca53bcb.tar.gz
[ANNOUNCE] v5.14-rc4-rt6v5.14-rc4-rt6-patches
Dear RT folks! I'm pleased to announce the v5.14-rc4-rt6 patch set. Changes since v5.14-rc4-rt5: - The locking bits haven been updated: - Compile fixes. Regressions were reported by Mike Galbraith, Daniel Wagner and Daniel Bristot de Oliveira. - Thomas Gleixner addressed Peter Zijlstra's review comments. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. The delta patch against v5.14-rc4-rt5 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc4-rt5-rt6.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc4-rt6 The RT patch against v5.14-rc4 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc4-rt6.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc4-rt6.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches')
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch4
-rw-r--r--patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch66
-rw-r--r--patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch136
-rw-r--r--patches/locking_spinlock__Provide_RT_specific_spinlock_type.patch6
-rw-r--r--patches/locking_spinlock__Provide_RT_variant.patch2
-rw-r--r--patches/locking_spinlock__Split_the_lock_types_header.patch52
-rw-r--r--patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch2
-rw-r--r--patches/sched--Reorganize-current--state-helpers.patch8
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch2
-rw-r--r--patches/sched__Prepare_for_RT_sleeping_spin_rwlocks.patch25
-rw-r--r--patches/sched__Provide_schedule_point_for_RT_locks.patch2
-rw-r--r--patches/series2
-rw-r--r--patches/signal_x86__Delay_calling_signals_in_atomic.patch2
14 files changed, 122 insertions, 189 deletions
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index f2d35e0c0528..7b3d2414e699 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt6
diff --git a/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch b/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
index 7f049038b5b6..9a54c33f0a6d 100644
--- a/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
+++ b/patches/futex__Prevent_requeue_pi_lock_nesting_issue_on_RT.patch
@@ -223,14 +223,14 @@ V3: Folded Peter's improvements
+ if (locked >= 0) {
+ /* Requeue succeeded. Set DONE or LOCKED */
+ WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_PROGRESS &&
-+ old != Q_REQUEUE_PI_WAIT)
++ old != Q_REQUEUE_PI_WAIT);
+ new = Q_REQUEUE_PI_DONE + locked;
+ } else if (old == Q_REQUEUE_PI_IN_PROGRESS) {
+ /* Deadlock, no early wakeup interleave */
+ new = Q_REQUEUE_PI_NONE;
+ } else {
+ /* Deadlock, early wakeup interleave. */
-+ WARN_ON_ONCE(old != Q_REQUEUE_PI_IN_WAIT);
++ WARN_ON_ONCE(old != Q_REQUEUE_PI_WAIT);
+ new = Q_REQUEUE_PI_IGNORE;
+ }
+ } while (!atomic_try_cmpxchg(&q->requeue_state, &old, new));
diff --git a/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch b/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
index bed4aad350e3..53031925f6ef 100644
--- a/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
+++ b/patches/locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
@@ -4,11 +4,14 @@ Date: Tue Jul 6 16:36:57 2021 +0200
From: Steven Rostedt <rostedt@goodmis.org>
-Going to sleep when a spinlock or rwlock is contended can be quite
-inefficient when the contention time is short and the lock owner is running
-on a different CPU. The MCS mechanism is not applicable to rtmutex based
-locks, so provide a simple adaptive spinwait mechanism for the RT specific
-spin/rwlock implementations.
+Going to sleep when locks are contended can be quite inefficient when the
+contention time is short and the lock owner is running on a different CPU.
+
+The MCS mechanism cannot be used because MCS is strictly FIFO ordered while
+for rtmutex based locks the waiter ordering is priority based.
+
+Provide a simple adaptive spinwait mechanism which currently restricts the
+spinning to the top priority waiter.
[ tglx: Provide a contemporary changelog ]
@@ -16,8 +19,8 @@ Originally-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/locking/rtmutex.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++-
- 1 file changed, 49 insertions(+), 1 deletion(-)
+ kernel/locking/rtmutex.c | 59 +++++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 57 insertions(+), 2 deletions(-)
---
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -33,16 +36,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.rst for details.
*/
-@@ -1535,6 +1540,43 @@ static __always_inline int __rt_mutex_lo
- * Functions required for spin/rw_lock substitution on RT kernels
- */
+@@ -1278,6 +1283,44 @@ static __always_inline void __rt_mutex_u
+ rt_mutex_slowunlock(lock);
+ }
+#ifdef CONFIG_SMP
+/*
+ * Note that owner is a speculative pointer and dereferencing relies
+ * on rcu_read_lock() and the check against the lock owner.
+ */
-+static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
++static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
+ struct task_struct *owner)
+{
+ bool res = true;
@@ -57,7 +60,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * checking the above to be valid.
+ */
+ barrier();
-+ if (!owner->on_cpu) {
++ if (!owner->on_cpu || need_resched() ||
++ vcpu_is_preempted(task_cpu(owner))) {
+ res = false;
+ break;
+ }
@@ -67,17 +71,41 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return res;
+}
+#else
-+static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
++static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
+ struct task_struct *owner)
+{
+ return false;
+}
+#endif
+
- /**
- * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
- * @lock: The underlying rt mutex
-@@ -1542,6 +1584,7 @@ static __always_inline int __rt_mutex_lo
+ #ifdef RT_MUTEX_BUILD_MUTEX
+ /*
+ * Functions required for:
+@@ -1362,6 +1405,7 @@ static int __sched rt_mutex_slowlock_blo
+ struct rt_mutex_waiter *waiter)
+ {
+ struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
++ struct task_struct *owner;
+ int ret = 0;
+
+ for (;;) {
+@@ -1384,9 +1428,14 @@ static int __sched rt_mutex_slowlock_blo
+ break;
+ }
+
++ if (waiter == rt_mutex_top_waiter(lock))
++ owner = rt_mutex_owner(lock);
++ else
++ owner = NULL;
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- schedule();
++ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
++ schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(state);
+@@ -1542,6 +1591,7 @@ static __always_inline int __rt_mutex_lo
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
struct rt_mutex_waiter waiter;
@@ -85,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_assert_held(&lock->wait_lock);
-@@ -1560,9 +1603,14 @@ static void __sched rtlock_slowlock_lock
+@@ -1560,9 +1610,14 @@ static void __sched rtlock_slowlock_lock
if (try_to_take_rt_mutex(lock, current, &waiter))
break;
@@ -96,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
- schedule_rtlock();
-+ if (!owner || !rtlock_adaptive_spinwait(lock, owner))
++ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
+ schedule_rtlock();
raw_spin_lock_irq(&lock->wait_lock);
diff --git a/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch b/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
deleted file mode 100644
index b192d183887b..000000000000
--- a/patches/locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
+++ /dev/null
@@ -1,136 +0,0 @@
-Subject: locking/rtmutex: Use adaptive spinwait for all rtmutex based locks
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 30 Jul 2021 11:58:42 +0200
-
-There is no reason to restrict adaptive spinwait to the rt mutex based
-'spinlocks'. Testing on RT shows a 4x impromevemt for hackbench.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
----
- kernel/locking/rtmutex.c | 84 +++++++++++++++++++++++++----------------------
- 1 file changed, 45 insertions(+), 39 deletions(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1283,6 +1283,43 @@ static __always_inline void __rt_mutex_u
- rt_mutex_slowunlock(lock);
- }
-
-+#ifdef CONFIG_SMP
-+/*
-+ * Note that owner is a speculative pointer and dereferencing relies
-+ * on rcu_read_lock() and the check against the lock owner.
-+ */
-+static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
-+ struct task_struct *owner)
-+{
-+ bool res = true;
-+
-+ rcu_read_lock();
-+ for (;;) {
-+ /* Owner changed. Trylock again */
-+ if (owner != rt_mutex_owner(lock))
-+ break;
-+ /*
-+ * Ensure that owner->on_cpu is dereferenced _after_
-+ * checking the above to be valid.
-+ */
-+ barrier();
-+ if (!owner->on_cpu) {
-+ res = false;
-+ break;
-+ }
-+ cpu_relax();
-+ }
-+ rcu_read_unlock();
-+ return res;
-+}
-+#else
-+static bool rtmutex_adaptive_spinwait(struct rt_mutex_base *lock,
-+ struct task_struct *owner)
-+{
-+ return false;
-+}
-+#endif
-+
- #ifdef RT_MUTEX_BUILD_MUTEX
- /*
- * Functions required for:
-@@ -1367,6 +1404,7 @@ static int __sched rt_mutex_slowlock_blo
- struct rt_mutex_waiter *waiter)
- {
- struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
-+ struct task_struct *owner;
- int ret = 0;
-
- for (;;) {
-@@ -1389,9 +1427,14 @@ static int __sched rt_mutex_slowlock_blo
- break;
- }
-
-+ if (waiter == rt_mutex_top_waiter(lock))
-+ owner = rt_mutex_owner(lock);
-+ else
-+ owner = NULL;
- raw_spin_unlock_irq(&lock->wait_lock);
-
-- schedule();
-+ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
-+ schedule();
-
- raw_spin_lock_irq(&lock->wait_lock);
- set_current_state(state);
-@@ -1540,43 +1583,6 @@ static __always_inline int __rt_mutex_lo
- * Functions required for spin/rw_lock substitution on RT kernels
- */
-
--#ifdef CONFIG_SMP
--/*
-- * Note that owner is a speculative pointer and dereferencing relies
-- * on rcu_read_lock() and the check against the lock owner.
-- */
--static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
-- struct task_struct *owner)
--{
-- bool res = true;
--
-- rcu_read_lock();
-- for (;;) {
-- /* Owner changed. Trylock again */
-- if (owner != rt_mutex_owner(lock))
-- break;
-- /*
-- * Ensure that owner->on_cpu is dereferenced _after_
-- * checking the above to be valid.
-- */
-- barrier();
-- if (!owner->on_cpu) {
-- res = false;
-- break;
-- }
-- cpu_relax();
-- }
-- rcu_read_unlock();
-- return res;
--}
--#else
--static bool rtlock_adaptive_spinwait(struct rt_mutex_base *lock,
-- struct task_struct *owner)
--{
-- return false;
--}
--#endif
--
- /**
- * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
- * @lock: The underlying rt mutex
-@@ -1609,7 +1615,7 @@ static void __sched rtlock_slowlock_lock
- owner = NULL;
- raw_spin_unlock_irq(&lock->wait_lock);
-
-- if (!owner || !rtlock_adaptive_spinwait(lock, owner))
-+ if (!owner || !rtmutex_adaptive_spinwait(lock, owner))
- schedule_rtlock();
-
- raw_spin_lock_irq(&lock->wait_lock);
diff --git a/patches/locking_spinlock__Provide_RT_specific_spinlock_type.patch b/patches/locking_spinlock__Provide_RT_specific_spinlock_type.patch
index 6add16b60809..f1ad420500f5 100644
--- a/patches/locking_spinlock__Provide_RT_specific_spinlock_type.patch
+++ b/patches/locking_spinlock__Provide_RT_specific_spinlock_type.patch
@@ -16,9 +16,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
-@@ -51,6 +51,9 @@
+@@ -11,6 +11,9 @@
- #define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+ #include <linux/spinlock_types_raw.h>
+#ifndef CONFIG_PREEMPT_RT
+
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
typedef struct spinlock {
union {
struct raw_spinlock rlock;
-@@ -79,6 +82,29 @@ typedef struct spinlock {
+@@ -39,6 +42,29 @@ typedef struct spinlock {
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
diff --git a/patches/locking_spinlock__Provide_RT_variant.patch b/patches/locking_spinlock__Provide_RT_variant.patch
index c10f798f1af5..2f93fe790d23 100644
--- a/patches/locking_spinlock__Provide_RT_variant.patch
+++ b/patches/locking_spinlock__Provide_RT_variant.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * during that time are redirected to the saved state so no wake up is
+ * missed.
+ *
-+ * - Non RT spin/rw_locks disable preemption and evtl. interrupts.
++ * - Non RT spin/rw_locks disable preemption and eventually interrupts.
+ * Disabling preemption has the side effect of disabling migration and
+ * preventing RCU grace periods.
+ *
diff --git a/patches/locking_spinlock__Split_the_lock_types_header.patch b/patches/locking_spinlock__Split_the_lock_types_header.patch
index 5fa48e3f8cc9..41b90fe4bf6b 100644
--- a/patches/locking_spinlock__Split_the_lock_types_header.patch
+++ b/patches/locking_spinlock__Split_the_lock_types_header.patch
@@ -12,11 +12,13 @@ No functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
+V3: Remove the duplicate defines
+---
include/linux/rwlock_types.h | 4 ++
include/linux/spinlock.h | 4 ++
- include/linux/spinlock_types.h | 19 ----------
+ include/linux/spinlock_types.h | 59 ---------------------------------
include/linux/spinlock_types_raw.h | 65 +++++++++++++++++++++++++++++++++++++
- 4 files changed, 74 insertions(+), 18 deletions(-)
+ 4 files changed, 74 insertions(+), 58 deletions(-)
create mode 100644 include/linux/spinlock_types_raw.h
---
--- a/include/linux/rwlock_types.h
@@ -54,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
-@@ -9,24 +9,7 @@
+@@ -9,64 +9,7 @@
* Released under the General Public License (GPL).
*/
@@ -76,10 +78,50 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
+-
+-#define SPINLOCK_MAGIC 0xdead4ead
+-
+-#define SPINLOCK_OWNER_INIT ((void *)-1L)
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+-# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_SPIN, \
+- }
+-# define SPIN_DEP_MAP_INIT(lockname) \
+- .dep_map = { \
+- .name = #lockname, \
+- .wait_type_inner = LD_WAIT_CONFIG, \
+- }
+-#else
+-# define RAW_SPIN_DEP_MAP_INIT(lockname)
+-# define SPIN_DEP_MAP_INIT(lockname)
+-#endif
+-
+-#ifdef CONFIG_DEBUG_SPINLOCK
+-# define SPIN_DEBUG_INIT(lockname) \
+- .magic = SPINLOCK_MAGIC, \
+- .owner_cpu = -1, \
+- .owner = SPINLOCK_OWNER_INIT,
+-#else
+-# define SPIN_DEBUG_INIT(lockname)
+-#endif
+-
+-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \
+- { \
+- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+- SPIN_DEBUG_INIT(lockname) \
+- RAW_SPIN_DEP_MAP_INIT(lockname) }
+-
+-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
+- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+-
+-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+#include <linux/spinlock_types_raw.h>
- #define SPINLOCK_MAGIC 0xdead4ead
-
+ typedef struct spinlock {
+ union {
--- /dev/null
+++ b/include/linux/spinlock_types_raw.h
@@ -0,0 +1,65 @@
diff --git a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
index cd8e19e992ae..5fb0bd61c5b5 100644
--- a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
+++ b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Special states are those that do not use the normal wait-loop pattern. See
* the comment with set_special_state().
-@@ -1979,6 +1975,81 @@ static inline int test_tsk_need_resched(
+@@ -1980,6 +1976,81 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/sched--Reorganize-current--state-helpers.patch b/patches/sched--Reorganize-current--state-helpers.patch
index 9ef1ba37f9e8..6ec247c58a21 100644
--- a/patches/sched--Reorganize-current--state-helpers.patch
+++ b/patches/sched--Reorganize-current--state-helpers.patch
@@ -42,14 +42,14 @@ V3: New patch.
- current->task_state_change = _THIS_IP_; \
- smp_store_mb(current->__state, (state_value)); \
+#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
-+# debug_normal_state_change(state_value) \
++# define debug_normal_state_change(state_value) \
+ do { \
+ WARN_ON_ONCE(is_special_task_state(state_value)); \
+ current->task_state_change = _THIS_IP_; \
} while (0)
-#define set_special_state(state_value) \
-+# debug_special_state_change(state_value) \
++# define debug_special_state_change(state_value) \
do { \
- unsigned long flags; /* may shadow */ \
WARN_ON_ONCE(!is_special_task_state(state_value)); \
@@ -60,8 +60,8 @@ V3: New patch.
} while (0)
+
#else
-+# debug_normal_state_change(cond) do { } while (0)
-+# debug_special_state_change(cond) do { } while (0)
++# define debug_normal_state_change(cond) do { } while (0)
++# define debug_special_state_change(cond) do { } while (0)
+#endif
+
/*
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index be6d3cd51a30..f3a887bdaecd 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1979,6 +1979,43 @@ static inline int test_tsk_need_resched(
+@@ -1980,6 +1980,43 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/sched__Prepare_for_RT_sleeping_spin_rwlocks.patch b/patches/sched__Prepare_for_RT_sleeping_spin_rwlocks.patch
index ea3b18f9a979..88d5b7849844 100644
--- a/patches/sched__Prepare_for_RT_sleeping_spin_rwlocks.patch
+++ b/patches/sched__Prepare_for_RT_sleeping_spin_rwlocks.patch
@@ -40,35 +40,36 @@ For non-RT kernels there is no functional change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/sched.h | 65 ++++++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/sched.h | 66 ++++++++++++++++++++++++++++++++++++++++++++++++++
kernel/sched/core.c | 33 +++++++++++++++++++++++++
- 2 files changed, 98 insertions(+)
+ 2 files changed, 99 insertions(+)
---
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -143,9 +143,21 @@ struct task_group;
+@@ -143,9 +143,22 @@ struct task_group;
current->task_state_change = _THIS_IP_; \
} while (0)
-+# debug_rtlock_wait_set_state() \
++# define debug_rtlock_wait_set_state() \
+ do { \
+ current->saved_state_change = current->task_state_change;\
+ current->task_state_change = _THIS_IP_; \
+ } while (0)
+
-+# debug_rtlock_wait_restore_state() \
++# define debug_rtlock_wait_restore_state() \
+ do { \
+ current->task_state_change = current->saved_state_change;\
+ } while (0)
++
#else
- # debug_normal_state_change(cond) do { } while (0)
- # debug_special_state_change(cond) do { } while (0)
-+# debug_rtlock_wait_set_state() do { } while (0)
-+# debug_rtlock_wait_restore_state() do { } while (0)
+ # define debug_normal_state_change(cond) do { } while (0)
+ # define debug_special_state_change(cond) do { } while (0)
++# define debug_rtlock_wait_set_state() do { } while (0)
++# define debug_rtlock_wait_restore_state() do { } while (0)
#endif
/*
-@@ -213,6 +225,51 @@ struct task_group;
+@@ -213,6 +226,51 @@ struct task_group;
raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
} while (0)
@@ -120,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define get_current_state() READ_ONCE(current->__state)
/* Task command name length: */
-@@ -668,6 +725,11 @@ struct task_struct {
+@@ -668,6 +726,11 @@ struct task_struct {
#endif
unsigned int __state;
@@ -132,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
* scheduling-critical items should be added above here.
-@@ -1361,6 +1423,9 @@ struct task_struct {
+@@ -1361,6 +1424,9 @@ struct task_struct {
struct kmap_ctrl kmap_ctrl;
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
diff --git a/patches/sched__Provide_schedule_point_for_RT_locks.patch b/patches/sched__Provide_schedule_point_for_RT_locks.patch
index dc777f3a3acc..a2b828a68956 100644
--- a/patches/sched__Provide_schedule_point_for_RT_locks.patch
+++ b/patches/sched__Provide_schedule_point_for_RT_locks.patch
@@ -29,7 +29,7 @@ V2: Adopt to the simplified mask logic
---
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -287,6 +287,9 @@ extern long schedule_timeout_idle(long t
+@@ -288,6 +288,9 @@ extern long schedule_timeout_idle(long t
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
diff --git a/patches/series b/patches/series
index 88da5aa7b135..b0ca0ed108d2 100644
--- a/patches/series
+++ b/patches/series
@@ -191,8 +191,6 @@ rtmutex__Prevent_lockdep_false_positive_with_PI_futexes.patch
preempt__Adjust_PREEMPT_LOCK_OFFSET_for_RT.patch
locking_rtmutex__Implement_equal_priority_lock_stealing.patch
locking_rtmutex__Add_adaptive_spinwait_mechanism.patch
-locking_rtmutex__Use_adaptive_spinwait_for_all_rtmutex_based_locks.patch
-
###########################################################################
# Locking: RT bits. Need review
###########################################################################
diff --git a/patches/signal_x86__Delay_calling_signals_in_atomic.patch b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
index 587f836ad9c6..e5e5b7eb61bb 100644
--- a/patches/signal_x86__Delay_calling_signals_in_atomic.patch
+++ b/patches/signal_x86__Delay_calling_signals_in_atomic.patch
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1077,6 +1077,10 @@ struct task_struct {
+@@ -1078,6 +1078,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;