summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-09 21:50:21 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-09 21:50:21 +0200
commit7d3b587475ce75c6f9f9874339fcf53c5cf2e099 (patch)
tree735b73e956be456ee259c9ca23e8c008511e68e8
parent8f85d58376d0e1cbbf1110caafd5a7106d0f5841 (diff)
downloadlinux-rt-7d3b587475ce75c6f9f9874339fcf53c5cf2e099.tar.gz
[ANNOUNCE] v5.9-rc8-rt13v5.9-rc8-rt13-patches
Dear RT folks! I'm pleased to announce the v5.9-rc8-rt13 patch set. Changes since v5.9-rc8-rt12: - Refurbish the rtmutex related patches: - Remove unused code, struct members. - Add a scheduling function to the scheduler which is invoked instead of schedule() while blocking on a sleeping lock. This allows to removes - the extra flush-block-io calls. - the sleeping lock counter. As a result blocking on a sleeping lock from within a (workerqueue/IO) worker thread won't fire up another worker. On !PREEMPT_RT this does not happen if the execution blocks on a spinlock_t. This change aligns the behaviour. Further testing is required to see if this behaviour is desired for PREEMPT_RT. - Disable migration after the sleeping lock has been acquired. This has been done before the lock was acquired due to CPU hotplug restriction. These restrictions have been lifted and there should be nothing requiring the old behaviour. The advantage is that the scheduler is able to freely place the task once the lock is available. Otherwise the task would have to wait until the CPU is no longer by a task with a higher priority. This of course doesn't work for tested locks. Known issues - It has been pointed out that due to changes to the printk code the internal buffer representation changed. This is only an issue if tools like `crash' are used to extract the printk buffer from a kernel memory image. The delta patch against v5.9-rc8-rt12 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/incr/patch-5.9-rc8-rt12-rt13.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.9-rc8-rt13 The RT patch against v5.9-rc8 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patch-5.9-rc8-rt13.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.9/older/patches-5.9-rc8-rt13.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-locking-rtmutex-Remove-cruft.patch86
-rw-r--r--patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch294
-rw-r--r--patches/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch53
-rw-r--r--patches/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch89
-rw-r--r--patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch (renamed from patches/rtmutex-futex-prepare-rt.patch)15
-rw-r--r--patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch (renamed from patches/futex-requeue-pi-fix.patch)11
-rw-r--r--patches/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch (renamed from patches/rtmutex-lock-killable.patch)30
-rw-r--r--patches/0008-locking-rtmutex-Make-lock_killable-work.patch (renamed from patches/rtmutex-Make-lock_killable-work.patch)7
-rw-r--r--patches/0009-locking-spinlock-Split-the-lock-types-header.patch (renamed from patches/spinlock-types-separate-raw.patch)4
-rw-r--r--patches/0010-locking-rtmutex-Avoid-include-hell.patch (renamed from patches/rtmutex-avoid-include-hell.patch)2
-rw-r--r--patches/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch (renamed from patches/lockdep-Reduce-header-files-in-debug_locks.h.patch)2
-rw-r--r--patches/0012-locking-split-out-the-rbtree-definition.patch (renamed from patches/locking-split-out-the-rbtree-definition.patch)2
-rw-r--r--patches/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch (renamed from patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch)10
-rw-r--r--patches/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch (renamed from patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch)28
-rw-r--r--patches/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch (renamed from patches/sched-rt-mutex-wakeup.patch)11
-rw-r--r--patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch (renamed from patches/rtmutex-add-sleeping-lock-implementation.patch)224
-rw-r--r--patches/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch (renamed from patches/rtmutex-trylock-is-okay-on-RT.patch)14
-rw-r--r--patches/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch (renamed from patches/rtmutex-add-mutex-implementation-based-on-rtmutex.patch)15
-rw-r--r--patches/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch (renamed from patches/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch)17
-rw-r--r--patches/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch (renamed from patches/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch)44
-rw-r--r--patches/0021-locking-rtmutex-wire-up-RT-s-locking.patch (renamed from patches/rtmutex-wire-up-RT-s-locking.patch)40
-rw-r--r--patches/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch (renamed from patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch)101
-rw-r--r--patches/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch224
-rw-r--r--patches/add_migrate_disable.patch209
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch26
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch94
-rw-r--r--patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch39
-rw-r--r--patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch73
-rw-r--r--patches/locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch91
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/mutex-Move-the-ww_mutext-definition-back-to-ww_mutex.patch50
-rw-r--r--patches/oleg-signal-rt-fix.patch2
-rw-r--r--patches/preempt-lazy-support.patch14
-rw-r--r--patches/preempt-nort-rt-variants.patch4
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/rt-introduce-cpu-chill.patch8
-rw-r--r--patches/rwsem-Provide-down_read_non_owner-and-up_read_non_ow.patch51
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate_enable-Remove-__schedule-call.patch2
-rw-r--r--patches/sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch4
-rw-r--r--patches/sched-migrate_enable-Use-stop_one_cpu_nowait.patch10
-rw-r--r--patches/sched-mmdrop-delayed.patch4
-rw-r--r--patches/series52
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/softirq-preempt-fix-3-re.patch4
47 files changed, 1146 insertions, 926 deletions
diff --git a/patches/0001-locking-rtmutex-Remove-cruft.patch b/patches/0001-locking-rtmutex-Remove-cruft.patch
new file mode 100644
index 000000000000..d353ef8aca37
--- /dev/null
+++ b/patches/0001-locking-rtmutex-Remove-cruft.patch
@@ -0,0 +1,86 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 15:21:17 +0200
+Subject: [PATCH 01/23] locking/rtmutex: Remove cruft
+
+Most of this is around since the very beginning. I'm not sure if this
+was used while the rtmutex-deadlock-tester was around but today it seems
+to only waste memory:
+- save_state: No users
+- name: Assigned and printed if a dead lock was detected. I'm keeping it
+ but want to point out that lockdep has the same information.
+- file + line: Printed if ::name was NULL. This is only used for
+ in-kernel locks so it ::name shouldn't be NULL and then ::file and
+ ::line isn't used.
+- magic: Assigned to NULL by rt_mutex_destroy().
+
+Remove members of rt_mutex which are not used.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 7 ++-----
+ kernel/locking/rtmutex-debug.c | 7 +------
+ kernel/locking/rtmutex.c | 3 ---
+ kernel/locking/rtmutex_common.h | 1 -
+ 4 files changed, 3 insertions(+), 15 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -32,10 +32,7 @@ struct rt_mutex {
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+- int save_state;
+- const char *name, *file;
+- int line;
+- void *magic;
++ const char *name;
+ #endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+@@ -60,7 +57,7 @@ struct hrtimer_sleeper;
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+ # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+- , .name = #mutexname, .file = __FILE__, .line = __LINE__
++ , .name = #mutexname
+
+ # define rt_mutex_init(mutex) \
+ do { \
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -42,12 +42,7 @@ static void printk_task(struct task_stru
+
+ static void printk_lock(struct rt_mutex *lock, int print_owner)
+ {
+- if (lock->name)
+- printk(" [%p] {%s}\n",
+- lock, lock->name);
+- else
+- printk(" [%p] {%s:%d}\n",
+- lock, lock->file, lock->line);
++ printk(" [%p] {%s}\n", lock, lock->name);
+
+ if (print_owner && rt_mutex_owner(lock)) {
+ printk(".. ->owner: %p\n", lock->owner);
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1655,9 +1655,6 @@ void __sched rt_mutex_futex_unlock(struc
+ void rt_mutex_destroy(struct rt_mutex *lock)
+ {
+ WARN_ON(rt_mutex_is_locked(lock));
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- lock->magic = NULL;
+-#endif
+ }
+ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -30,7 +30,6 @@ struct rt_mutex_waiter {
+ struct task_struct *task;
+ struct rt_mutex *lock;
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+- unsigned long ip;
+ struct pid *deadlock_task_pid;
+ struct rt_mutex *deadlock_lock;
+ #endif
diff --git a/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch b/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
new file mode 100644
index 000000000000..5ba0b7240ea9
--- /dev/null
+++ b/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
@@ -0,0 +1,294 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 16:05:11 +0200
+Subject: [PATCH 02/23] locking/rtmutex: Remove output from deadlock detector.
+
+In commit
+ f5694788ad8da ("rt_mutex: Add lockdep annotations")
+
+rtmutex gained lockdep annotation for rt_mutex_lock() and and related
+functions.
+lockdep will see the locking order and may complain about a deadlock
+before rtmutex' own mechanism gets a chance to detect it.
+The rtmutex deadlock detector will only complain locks with the
+RT_MUTEX_MIN_CHAINWALK and a waiter must be pending. That means it
+works only for in-kernel locks because the futex interface always uses
+RT_MUTEX_FULL_CHAINWALK.
+The requirement for an active waiter limits the detector to actual
+deadlocks and makes it possible to report potential deadlocks like
+lockdep does.
+It looks like lockdep is better suited for reporting deadlocks.
+
+Remove rtmutex' debug print on deadlock detection.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 7 --
+ kernel/locking/rtmutex-debug.c | 97 ----------------------------------------
+ kernel/locking/rtmutex-debug.h | 11 ----
+ kernel/locking/rtmutex.c | 9 ---
+ kernel/locking/rtmutex.h | 7 --
+ kernel/locking/rtmutex_common.h | 4 -
+ 6 files changed, 135 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -31,9 +31,6 @@ struct rt_mutex {
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
+ struct task_struct *owner;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- const char *name;
+-#endif
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+@@ -56,8 +53,6 @@ struct hrtimer_sleeper;
+ #endif
+
+ #ifdef CONFIG_DEBUG_RT_MUTEXES
+-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+- , .name = #mutexname
+
+ # define rt_mutex_init(mutex) \
+ do { \
+@@ -67,7 +62,6 @@ do { \
+
+ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+-# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+ # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
+ # define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+@@ -83,7 +77,6 @@ do { \
+ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+ , .waiters = RB_ROOT_CACHED \
+ , .owner = NULL \
+- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+
+ #define DEFINE_RT_MUTEX(mutexname) \
+--- a/kernel/locking/rtmutex-debug.c
++++ b/kernel/locking/rtmutex-debug.c
+@@ -32,105 +32,12 @@
+
+ #include "rtmutex_common.h"
+
+-static void printk_task(struct task_struct *p)
+-{
+- if (p)
+- printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
+- else
+- printk("<none>");
+-}
+-
+-static void printk_lock(struct rt_mutex *lock, int print_owner)
+-{
+- printk(" [%p] {%s}\n", lock, lock->name);
+-
+- if (print_owner && rt_mutex_owner(lock)) {
+- printk(".. ->owner: %p\n", lock->owner);
+- printk(".. held by: ");
+- printk_task(rt_mutex_owner(lock));
+- printk("\n");
+- }
+-}
+-
+ void rt_mutex_debug_task_free(struct task_struct *task)
+ {
+ DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
+ DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
+ }
+
+-/*
+- * We fill out the fields in the waiter to store the information about
+- * the deadlock. We print when we return. act_waiter can be NULL in
+- * case of a remove waiter operation.
+- */
+-void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+- struct rt_mutex_waiter *act_waiter,
+- struct rt_mutex *lock)
+-{
+- struct task_struct *task;
+-
+- if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
+- return;
+-
+- task = rt_mutex_owner(act_waiter->lock);
+- if (task && task != current) {
+- act_waiter->deadlock_task_pid = get_pid(task_pid(task));
+- act_waiter->deadlock_lock = lock;
+- }
+-}
+-
+-void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
+-{
+- struct task_struct *task;
+-
+- if (!waiter->deadlock_lock || !debug_locks)
+- return;
+-
+- rcu_read_lock();
+- task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
+- if (!task) {
+- rcu_read_unlock();
+- return;
+- }
+-
+- if (!debug_locks_off()) {
+- rcu_read_unlock();
+- return;
+- }
+-
+- pr_warn("\n");
+- pr_warn("============================================\n");
+- pr_warn("WARNING: circular locking deadlock detected!\n");
+- pr_warn("%s\n", print_tainted());
+- pr_warn("--------------------------------------------\n");
+- printk("%s/%d is deadlocking current task %s/%d\n\n",
+- task->comm, task_pid_nr(task),
+- current->comm, task_pid_nr(current));
+-
+- printk("\n1) %s/%d is trying to acquire this lock:\n",
+- current->comm, task_pid_nr(current));
+- printk_lock(waiter->lock, 1);
+-
+- printk("\n2) %s/%d is blocked on this lock:\n",
+- task->comm, task_pid_nr(task));
+- printk_lock(waiter->deadlock_lock, 1);
+-
+- debug_show_held_locks(current);
+- debug_show_held_locks(task);
+-
+- printk("\n%s/%d's [blocked] stackdump:\n\n",
+- task->comm, task_pid_nr(task));
+- show_stack(task, NULL, KERN_DEFAULT);
+- printk("\n%s/%d's [current] stackdump:\n\n",
+- current->comm, task_pid_nr(current));
+- dump_stack();
+- debug_show_all_locks();
+- rcu_read_unlock();
+-
+- printk("[ turning off deadlock detection."
+- "Please report this trace. ]\n\n");
+-}
+-
+ void debug_rt_mutex_lock(struct rt_mutex *lock)
+ {
+ }
+@@ -153,12 +60,10 @@ void debug_rt_mutex_proxy_unlock(struct
+ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
+ {
+ memset(waiter, 0x11, sizeof(*waiter));
+- waiter->deadlock_task_pid = NULL;
+ }
+
+ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
+ {
+- put_pid(waiter->deadlock_task_pid);
+ memset(waiter, 0x22, sizeof(*waiter));
+ }
+
+@@ -168,10 +73,8 @@ void debug_rt_mutex_init(struct rt_mutex
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+- lock->name = name;
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+ #endif
+ }
+-
+--- a/kernel/locking/rtmutex-debug.h
++++ b/kernel/locking/rtmutex-debug.h
+@@ -18,20 +18,9 @@ extern void debug_rt_mutex_unlock(struct
+ extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
+ struct task_struct *powner);
+ extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
+-extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
+- struct rt_mutex_waiter *waiter,
+- struct rt_mutex *lock);
+-extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
+-# define debug_rt_mutex_reset_waiter(w) \
+- do { (w)->deadlock_lock = NULL; } while (0)
+
+ static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
+ enum rtmutex_chainwalk walk)
+ {
+ return (waiter != NULL);
+ }
+-
+-static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+-{
+- debug_rt_mutex_print_deadlock(w);
+-}
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -597,7 +597,6 @@ static int rt_mutex_adjust_prio_chain(st
+ * walk, we detected a deadlock.
+ */
+ if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
+- debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
+ raw_spin_unlock(&lock->wait_lock);
+ ret = -EDEADLK;
+ goto out_unlock_pi;
+@@ -1189,8 +1188,6 @@ static int __sched
+
+ raw_spin_unlock_irq(&lock->wait_lock);
+
+- debug_rt_mutex_print_deadlock(waiter);
+-
+ schedule();
+
+ raw_spin_lock_irq(&lock->wait_lock);
+@@ -1211,10 +1208,6 @@ static void rt_mutex_handle_deadlock(int
+ if (res != -EDEADLOCK || detect_deadlock)
+ return;
+
+- /*
+- * Yell lowdly and stop the task right here.
+- */
+- rt_mutex_print_deadlock(w);
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+@@ -1764,8 +1757,6 @@ int __rt_mutex_start_proxy_lock(struct r
+ ret = 0;
+ }
+
+- debug_rt_mutex_print_deadlock(waiter);
+-
+ return ret;
+ }
+
+--- a/kernel/locking/rtmutex.h
++++ b/kernel/locking/rtmutex.h
+@@ -19,15 +19,8 @@
+ #define debug_rt_mutex_proxy_unlock(l) do { } while (0)
+ #define debug_rt_mutex_unlock(l) do { } while (0)
+ #define debug_rt_mutex_init(m, n, k) do { } while (0)
+-#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
+-#define debug_rt_mutex_print_deadlock(w) do { } while (0)
+ #define debug_rt_mutex_reset_waiter(w) do { } while (0)
+
+-static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+-{
+- WARN(1, "rtmutex deadlock detected\n");
+-}
+-
+ static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
+ enum rtmutex_chainwalk walk)
+ {
+--- a/kernel/locking/rtmutex_common.h
++++ b/kernel/locking/rtmutex_common.h
+@@ -29,10 +29,6 @@ struct rt_mutex_waiter {
+ struct rb_node pi_tree_entry;
+ struct task_struct *task;
+ struct rt_mutex *lock;
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+- struct pid *deadlock_task_pid;
+- struct rt_mutex *deadlock_lock;
+-#endif
+ int prio;
+ u64 deadline;
+ };
diff --git a/patches/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch b/patches/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
new file mode 100644
index 000000000000..6a89e32343ba
--- /dev/null
+++ b/patches/0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
@@ -0,0 +1,53 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 29 Sep 2020 16:32:49 +0200
+Subject: [PATCH 03/23] locking/rtmutex: Move rt_mutex_init() outside of
+ CONFIG_DEBUG_RT_MUTEXES
+
+rt_mutex_init() only initializes lockdep if CONFIG_DEBUG_RT_MUTEXES is
+enabled. The static initializer (DEFINE_RT_MUTEX) does not have such a
+restriction.
+
+Move rt_mutex_init() outside of CONFIG_DEBUG_RT_MUTEXES.
+Move the remaining functions in this CONFIG_DEBUG_RT_MUTEXES block to
+the upper block.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 12 +++---------
+ 1 file changed, 3 insertions(+), 9 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -43,6 +43,7 @@ struct hrtimer_sleeper;
+ extern int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len);
+ extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
++ extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+ #else
+ static inline int rt_mutex_debug_check_no_locks_freed(const void *from,
+ unsigned long len)
+@@ -50,22 +51,15 @@ struct hrtimer_sleeper;
+ return 0;
+ }
+ # define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
++# define rt_mutex_debug_task_free(t) do { } while (0)
+ #endif
+
+-#ifdef CONFIG_DEBUG_RT_MUTEXES
+-
+-# define rt_mutex_init(mutex) \
++#define rt_mutex_init(mutex) \
+ do { \
+ static struct lock_class_key __key; \
+ __rt_mutex_init(mutex, __func__, &__key); \
+ } while (0)
+
+- extern void rt_mutex_debug_task_free(struct task_struct *tsk);
+-#else
+-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
+-# define rt_mutex_debug_task_free(t) do { } while (0)
+-#endif
+-
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
+ , .dep_map = { .name = #mutexname }
diff --git a/patches/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch b/patches/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
new file mode 100644
index 000000000000..f58a7401300c
--- /dev/null
+++ b/patches/0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
@@ -0,0 +1,89 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 7 Oct 2020 12:11:33 +0200
+Subject: [PATCH 04/23] locking/rtmutex: Remove rt_mutex_timed_lock()
+
+rt_mutex_timed_lock() has no callers since commit
+ c051b21f71d1f ("rtmutex: Confine deadlock logic to futex")
+
+Remove rt_mutex_timed_lock().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/rtmutex.h | 3 ---
+ kernel/locking/rtmutex.c | 46 ----------------------------------------------
+ 2 files changed, 49 deletions(-)
+
+--- a/include/linux/rtmutex.h
++++ b/include/linux/rtmutex.h
+@@ -99,9 +99,6 @@ extern void rt_mutex_lock(struct rt_mute
+ #endif
+
+ extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+-extern int rt_mutex_timed_lock(struct rt_mutex *lock,
+- struct hrtimer_sleeper *timeout);
+-
+ extern int rt_mutex_trylock(struct rt_mutex *lock);
+
+ extern void rt_mutex_unlock(struct rt_mutex *lock);
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1406,21 +1406,6 @@ rt_mutex_fastlock(struct rt_mutex *lock,
+ }
+
+ static inline int
+-rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk,
+- int (*slowfn)(struct rt_mutex *lock, int state,
+- struct hrtimer_sleeper *timeout,
+- enum rtmutex_chainwalk chwalk))
+-{
+- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
+- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+- return 0;
+-
+- return slowfn(lock, state, timeout, chwalk);
+-}
+-
+-static inline int
+ rt_mutex_fasttrylock(struct rt_mutex *lock,
+ int (*slowfn)(struct rt_mutex *lock))
+ {
+@@ -1528,37 +1513,6 @@ int __sched __rt_mutex_futex_trylock(str
+ }
+
+ /**
+- * rt_mutex_timed_lock - lock a rt_mutex interruptible
+- * the timeout structure is provided
+- * by the caller
+- *
+- * @lock: the rt_mutex to be locked
+- * @timeout: timeout structure or NULL (no timeout)
+- *
+- * Returns:
+- * 0 on success
+- * -EINTR when interrupted by a signal
+- * -ETIMEDOUT when the timeout expired
+- */
+-int
+-rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
+-{
+- int ret;
+-
+- might_sleep();
+-
+- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+- ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
+- RT_MUTEX_MIN_CHAINWALK,
+- rt_mutex_slowlock);
+- if (ret)
+- mutex_release(&lock->dep_map, _RET_IP_);
+-
+- return ret;
+-}
+-EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+-
+-/**
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
index 585193616e61..0853ac5edb8e 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
@@ -1,6 +1,7 @@
-Subject: rtmutex: Handle the various new futex race conditions
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 10 Jun 2011 11:04:15 +0200
+Subject: [PATCH 05/23] locking/rtmutex: Handle the various new futex race
+ conditions
RT opens a few new interesting race conditions in the rtmutex/futex
combo due to futex hash bucket lock being a 'sleeping' spinlock and
@@ -170,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -948,6 +954,22 @@ static int task_blocks_on_rt_mutex(struc
+@@ -947,6 +953,22 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -193,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
-@@ -971,7 +993,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -970,7 +992,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
@@ -202,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1067,7 +1089,7 @@ static void remove_waiter(struct rt_mute
+@@ -1066,7 +1088,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -211,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_assert_held(&lock->wait_lock);
-@@ -1093,7 +1115,8 @@ static void remove_waiter(struct rt_mute
+@@ -1092,7 +1114,8 @@ static void remove_waiter(struct rt_mute
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -221,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&owner->pi_lock);
-@@ -1129,7 +1152,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1128,7 +1151,8 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -233,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -130,6 +130,8 @@ enum rtmutex_chainwalk {
+@@ -125,6 +125,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
index 15384521a3be..7a6645c7c6a6 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
@@ -1,12 +1,11 @@
From: Steven Rostedt <rostedt@goodmis.org>
Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: futex: Fix bug on when a requeued RT task times out
+Subject: [PATCH 06/23] futex: Fix bug on when a requeued RT task times out
Requeue with timeout causes a bug with PREEMPT_RT.
The bug comes from a timed out condition.
-
TASK 1 TASK 2
------ ------
futex_wait_requeue_pi()
@@ -16,13 +15,12 @@ The bug comes from a timed out condition.
double_lock_hb();
raw_spin_lock(pi_lock);
- if (current->pi_blocked_on) {
+ if (current->pi_blocked_on) {
} else {
current->pi_blocked_on = PI_WAKE_INPROGRESS;
run_spin_unlock(pi_lock);
spin_lock(hb->lock); <-- blocked!
-
plist_for_each_entry_safe(this) {
rt_mutex_start_proxy_lock();
task_blocks_on_rt_mutex();
@@ -45,7 +43,6 @@ Otherwise set it to a new flag PI_REQUEUE_INPROGRESS, which notifies
the proxy task that it is being requeued, and will handle things
appropriately.
-
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
@@ -65,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1777,6 +1778,34 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1721,6 +1722,34 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -102,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
RT_MUTEX_FULL_CHAINWALK);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -131,6 +131,7 @@ enum rtmutex_chainwalk {
+@@ -126,6 +126,7 @@ enum rtmutex_chainwalk {
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
diff --git a/patches/rtmutex-lock-killable.patch b/patches/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
index fd4d45d692f9..dd8bc592bdc4 100644
--- a/patches/rtmutex-lock-killable.patch
+++ b/patches/0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
@@ -1,6 +1,6 @@
-Subject: rtmutex: Add rtmutex_lock_killable()
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 09 Jun 2011 11:43:52 +0200
+Date: Thu, 9 Jun 2011 11:43:52 +0200
+Subject: [PATCH 07/23] locking/rtmutex: Add rtmutex_lock_killable()
Add "killable" type to rtmutex. We need this since rtmutex are used as
"normal" mutexes which do use this type.
@@ -8,34 +8,32 @@ Add "killable" type to rtmutex. We need this since rtmutex are used as
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/rtmutex.h | 1 +
- kernel/locking/rtmutex.c | 19 +++++++++++++++++++
- 2 files changed, 20 insertions(+)
+ kernel/locking/rtmutex.c | 17 +++++++++++++++++
+ 2 files changed, 18 insertions(+)
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -115,6 +115,7 @@ extern void rt_mutex_lock(struct rt_mute
+@@ -99,6 +99,7 @@ extern void rt_mutex_lock(struct rt_mute
#endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
- extern int rt_mutex_timed_lock(struct rt_mutex *lock,
- struct hrtimer_sleeper *timeout);
+ extern int rt_mutex_trylock(struct rt_mutex *lock);
+ extern void rt_mutex_unlock(struct rt_mutex *lock);
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1560,6 +1560,25 @@ int __sched __rt_mutex_futex_trylock(str
+@@ -1538,6 +1538,23 @@ int __sched __rt_mutex_futex_trylock(str
}
/**
+ * rt_mutex_lock_killable - lock a rt_mutex killable
+ *
-+ * @lock: the rt_mutex to be locked
-+ * @detect_deadlock: deadlock detection on/off
++ * @lock: the rt_mutex to be locked
+ *
+ * Returns:
-+ * 0 on success
-+ * -EINTR when interrupted by a signal
-+ * -EDEADLK when the lock would deadlock (when deadlock detection is on)
++ * 0 on success
++ * -EINTR when interrupted by a signal
+ */
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
+{
@@ -46,6 +44,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+
+/**
- * rt_mutex_timed_lock - lock a rt_mutex interruptible
- * the timeout structure is provided
- * by the caller
+ * rt_mutex_trylock - try to lock a rt_mutex
+ *
+ * @lock: the rt_mutex to be locked
diff --git a/patches/rtmutex-Make-lock_killable-work.patch b/patches/0008-locking-rtmutex-Make-lock_killable-work.patch
index b86456ce290b..8e871378cb80 100644
--- a/patches/rtmutex-Make-lock_killable-work.patch
+++ b/patches/0008-locking-rtmutex-Make-lock_killable-work.patch
@@ -1,13 +1,12 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 1 Apr 2017 12:50:59 +0200
-Subject: [PATCH] rtmutex: Make lock_killable work
+Subject: [PATCH 08/23] locking/rtmutex: Make lock_killable work
Locking an rt mutex killable does not work because signal handling is
restricted to TASK_INTERRUPTIBLE.
-Use signal_pending_state() unconditionaly.
+Use signal_pending_state() unconditionally.
-Cc: stable-rt@vger.kernel.org
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -16,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1198,18 +1198,13 @@ static int __sched
+@@ -1197,18 +1197,13 @@ static int __sched
if (try_to_take_rt_mutex(lock, current, waiter))
break;
diff --git a/patches/spinlock-types-separate-raw.patch b/patches/0009-locking-spinlock-Split-the-lock-types-header.patch
index a785ea286a35..029dd86e567e 100644
--- a/patches/spinlock-types-separate-raw.patch
+++ b/patches/0009-locking-spinlock-Split-the-lock-types-header.patch
@@ -1,6 +1,6 @@
-Subject: spinlock: Split the lock types header
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 19:34:01 +0200
+Subject: [PATCH 09/23] locking/spinlock: Split the lock types header
Split raw_spinlock into its own file and the remaining spinlock_t into
its own non-RT header. The non-RT header will be replaced later by sleeping
@@ -13,6 +13,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/spinlock_types_nort.h | 39 ++++++++++++++++
include/linux/spinlock_types_raw.h | 65 ++++++++++++++++++++++++++
4 files changed, 110 insertions(+), 85 deletions(-)
+ create mode 100644 include/linux/spinlock_types_nort.h
+ create mode 100644 include/linux/spinlock_types_raw.h
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
diff --git a/patches/rtmutex-avoid-include-hell.patch b/patches/0010-locking-rtmutex-Avoid-include-hell.patch
index a3b55f5b742d..9b305295caf8 100644
--- a/patches/rtmutex-avoid-include-hell.patch
+++ b/patches/0010-locking-rtmutex-Avoid-include-hell.patch
@@ -1,6 +1,6 @@
-Subject: rtmutex: Avoid include hell
From: Thomas Gleixner <tglx@linutronix.de>
Date: Wed, 29 Jun 2011 20:06:39 +0200
+Subject: [PATCH 10/23] locking/rtmutex: Avoid include hell
Include only the required raw types. This avoids pulling in the
complete spinlock header which in turn requires rtmutex.h at some point.
diff --git a/patches/lockdep-Reduce-header-files-in-debug_locks.h.patch b/patches/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch
index 6bc20919ad45..fe0a6fad4153 100644
--- a/patches/lockdep-Reduce-header-files-in-debug_locks.h.patch
+++ b/patches/0011-lockdep-Reduce-header-files-in-debug_locks.h.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Aug 2020 16:55:25 +0200
-Subject: [PATCH 2/4] lockdep: Reduce header files in debug_locks.h
+Subject: [PATCH 11/23] lockdep: Reduce header files in debug_locks.h
The inclusion of kernel.h leads to circular dependency if spinlock_t is
based on rt_mutex.
diff --git a/patches/locking-split-out-the-rbtree-definition.patch b/patches/0012-locking-split-out-the-rbtree-definition.patch
index da997f1bc738..7dab8848df37 100644
--- a/patches/locking-split-out-the-rbtree-definition.patch
+++ b/patches/0012-locking-split-out-the-rbtree-definition.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 14 Aug 2020 17:08:41 +0200
-Subject: [PATCH] locking: split out the rbtree definition
+Subject: [PATCH 12/23] locking: split out the rbtree definition
rtmutex.h needs the definition for rb_root_cached. By including kernel.h
we will get to spinlock.h which requires rtmutex.h again.
diff --git a/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch b/patches/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
index 8717951e72a9..3a27371b9df0 100644
--- a/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ b/patches/0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:14:22 +0200
-Subject: rtmutex: Provide rt_mutex_slowlock_locked()
+Subject: [PATCH 13/23] locking/rtmutex: Provide rt_mutex_slowlock_locked()
This is the inner-part of rt_mutex_slowlock(), required for rwsem-rt.
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1241,35 +1241,16 @@ static void rt_mutex_handle_deadlock(int
+@@ -1234,35 +1234,16 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1277,16 +1258,16 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1270,16 +1251,16 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1294,6 +1275,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1287,6 +1268,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This is the control structure for tasks blocked on a rt_mutex,
-@@ -159,6 +160,12 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -154,6 +155,12 @@ extern bool __rt_mutex_futex_unlock(stru
struct wake_q_head *wqh);
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
diff --git a/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/patches/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
index e83185847897..2b2e6dac8e99 100644
--- a/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
+++ b/patches/0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
@@ -1,20 +1,20 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 16:36:39 +0200
-Subject: rtmutex: export lockdep-less version of rt_mutex's lock,
- trylock and unlock
+Subject: [PATCH 14/23] locking/rtmutex: export lockdep-less version of
+ rt_mutex's lock, trylock and unlock
Required for lock implementation ontop of rtmutex.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 59 ++++++++++++++++++++++++++--------------
+ kernel/locking/rtmutex.c | 58 ++++++++++++++++++++++++++--------------
kernel/locking/rtmutex_common.h | 3 ++
- 2 files changed, 42 insertions(+), 20 deletions(-)
+ 2 files changed, 42 insertions(+), 19 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1491,12 +1491,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1469,12 +1469,33 @@ rt_mutex_fastunlock(struct rt_mutex *loc
rt_mutex_postunlock(&wake_q);
}
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-@@ -1537,16 +1558,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -1515,16 +1536,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -68,11 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1572,13 +1584,10 @@ int __sched __rt_mutex_futex_trylock(str
- * Returns:
- * 0 on success
- * -EINTR when interrupted by a signal
-- * -EDEADLK when the lock would deadlock (when deadlock detection is on)
+@@ -1552,12 +1564,18 @@ int __sched __rt_mutex_futex_trylock(str
*/
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
{
@@ -83,10 +79,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1613,6 +1622,14 @@ rt_mutex_timed_lock(struct rt_mutex *loc
- }
- EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-
+int __sched __rt_mutex_trylock(struct rt_mutex *lock)
+{
+ if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
@@ -98,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
-@@ -1628,10 +1645,7 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1573,10 +1591,7 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
@@ -110,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-@@ -1639,6 +1653,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1584,6 +1599,11 @@ int __sched rt_mutex_trylock(struct rt_m
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
@@ -124,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -162,6 +162,9 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -157,6 +157,9 @@ extern bool __rt_mutex_futex_unlock(stru
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
/* RW semaphore special interface */
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
index 9d76cc1cc10f..8ae794fc352a 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -1,6 +1,7 @@
-Subject: sched: Add saved_state for tasks blocked on sleeping locks
From: Thomas Gleixner <tglx@linutronix.de>
Date: Sat, 25 Jun 2011 09:21:04 +0200
+Subject: [PATCH 15/23] sched: Add saved_state for tasks blocked on sleeping
+ locks
Spinlocks are state preserving in !RT. RT changes the state when a
task gets blocked on a lock. So we need to remember the state before
@@ -26,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1729,6 +1731,7 @@ extern struct task_struct *find_get_task
+@@ -1725,6 +1727,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -36,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2832,7 +2832,7 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2828,7 +2828,7 @@ try_to_wake_up(struct task_struct *p, un
int cpu, success = 0;
preempt_disable();
@@ -45,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
* == smp_processor_id()'. Together this means we can special
-@@ -2862,8 +2862,26 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2858,8 +2858,26 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -73,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -3053,6 +3071,18 @@ int wake_up_process(struct task_struct *
+@@ -3049,6 +3067,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
index 5106b5612d86..46c6a8bdf148 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/0016-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -1,22 +1,23 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:11:19 +0200
-Subject: rtmutex: add sleeping lock implementation
+Subject: [PATCH 16/23] locking/rtmutex: add sleeping lock implementation
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/kernel.h | 5
+ include/linux/preempt.h | 4
include/linux/rtmutex.h | 19 +
- include/linux/sched.h | 8
+ include/linux/sched.h | 7
include/linux/sched/wake_q.h | 13 +
- include/linux/spinlock_rt.h | 154 +++++++++++++
- include/linux/spinlock_types_rt.h | 48 ++++
+ include/linux/spinlock_rt.h | 155 +++++++++++++
+ include/linux/spinlock_types_rt.h | 38 +++
kernel/fork.c | 1
kernel/futex.c | 11
- kernel/locking/rtmutex.c | 439 ++++++++++++++++++++++++++++++++++----
+ kernel/locking/rtmutex.c | 444 ++++++++++++++++++++++++++++++++++----
kernel/locking/rtmutex_common.h | 14 -
kernel/sched/core.c | 39 ++-
- 11 files changed, 694 insertions(+), 57 deletions(-)
+ 12 files changed, 694 insertions(+), 56 deletions(-)
create mode 100644 include/linux/spinlock_rt.h
create mode 100644 include/linux/spinlock_types_rt.h
@@ -41,6 +42,20 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# define cant_sleep() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
# define non_block_start() do { } while (0)
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -118,7 +118,11 @@
+ /*
+ * The preempt_count offset after spin_lock()
+ */
++#if !defined(CONFIG_PREEMPT_RT)
+ #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
++#else
++#define PREEMPT_LOCK_OFFSET 0
++#endif
+
+ /*
+ * The preempt_count offset needed for things like:
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -19,6 +19,10 @@
@@ -54,43 +69,37 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* The rt_mutex structure
*
-@@ -31,8 +35,8 @@ struct rt_mutex {
+@@ -31,6 +35,7 @@ struct rt_mutex {
raw_spinlock_t wait_lock;
struct rb_root_cached waiters;
struct task_struct *owner;
--#ifdef CONFIG_DEBUG_RT_MUTEXES
- int save_state;
-+#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *name, *file;
- int line;
- void *magic;
-@@ -82,16 +86,23 @@ do { \
++ int save_state;
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+ #endif
+@@ -67,11 +72,19 @@ do { \
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
-#define __RT_MUTEX_INITIALIZER(mutexname) \
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
-+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .waiters = RB_ROOT_CACHED \
, .owner = NULL \
- __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
+ __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
-+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 0 }
++
++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
++ , .save_state = 1 }
#define DEFINE_RT_MUTEX(mutexname) \
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
-
-+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
-+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
-+ , .save_state = 1 }
-+
- /**
- * rt_mutex_is_locked - is the mutex locked
- * @lock: the mutex to be queried
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -139,6 +139,9 @@ struct task_group;
@@ -103,15 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
-@@ -148,6 +151,7 @@ struct task_group;
- current->state = (state_value); \
- raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
- } while (0)
-+
- #else
- /*
- * set_current_state() includes a barrier so that the write of current->state
-@@ -192,6 +196,9 @@ struct task_group;
+@@ -192,6 +195,9 @@ struct task_group;
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
@@ -121,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
-@@ -978,6 +985,7 @@ struct task_struct {
+@@ -974,6 +980,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
@@ -152,7 +153,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_SCHED_WAKE_Q_H */
--- /dev/null
+++ b/include/linux/spinlock_rt.h
-@@ -0,0 +1,154 @@
+@@ -0,0 +1,155 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
@@ -309,7 +311,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/include/linux/spinlock_types_rt.h
-@@ -0,0 +1,48 @@
+@@ -0,0 +1,38 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
+
@@ -330,22 +333,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
+} spinlock_t;
+
-+#ifdef CONFIG_DEBUG_RT_MUTEXES
-+# define __RT_SPIN_INITIALIZER(name) \
++#define __RT_SPIN_INITIALIZER(name) \
+ { \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+ .save_state = 1, \
-+ .file = __FILE__, \
-+ .line = __LINE__ , \
+ }
-+#else
-+# define __RT_SPIN_INITIALIZER(name) \
-+ { \
-+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
-+ .save_state = 1, \
-+ }
-+#endif
-+
+/*
+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
+*/
@@ -360,7 +352,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -947,6 +947,7 @@ static struct task_struct *dup_task_stru
+@@ -924,6 +924,7 @@ static struct task_struct *dup_task_stru
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
@@ -370,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1481,6 +1481,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1479,6 +1479,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -378,7 +370,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1540,13 +1541,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1538,13 +1539,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -395,7 +387,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2842,7 +2843,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2840,7 +2841,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -404,7 +396,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3204,7 +3205,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3202,7 +3203,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -479,7 +471,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -701,13 +735,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -700,13 +734,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -498,7 +490,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -808,9 +845,11 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -807,9 +844,11 @@ static int rt_mutex_adjust_prio_chain(st
* @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
@@ -512,7 +504,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -846,12 +885,11 @@ static int try_to_take_rt_mutex(struct r
+@@ -845,12 +884,11 @@ static int try_to_take_rt_mutex(struct r
*/
if (waiter) {
/*
@@ -528,7 +520,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters tree.
-@@ -869,14 +907,12 @@ static int try_to_take_rt_mutex(struct r
+@@ -868,14 +906,12 @@ static int try_to_take_rt_mutex(struct r
*/
if (rt_mutex_has_waiters(lock)) {
/*
@@ -547,7 +539,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -923,6 +959,291 @@ static int try_to_take_rt_mutex(struct r
+@@ -922,6 +958,289 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -658,8 +650,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
-+ debug_rt_mutex_print_deadlock(waiter);
-+
+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
+ schedule();
+
@@ -728,9 +718,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
-+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
@@ -742,18 +732,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
-+ migrate_disable();
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+
+void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
+ struct lockdep_map *nest_lock)
+{
-+ migrate_disable();
+ spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ migrate_disable();
+}
+EXPORT_SYMBOL(rt_spin_lock_nest_lock);
+#endif
@@ -762,8 +752,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, _RET_IP_);
-+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+ migrate_enable();
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
@@ -789,12 +779,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ int ret;
+
-+ migrate_disable();
+ ret = __rt_mutex_trylock(&lock->lock);
-+ if (ret)
++ if (ret) {
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
++ migrate_disable();
++ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock);
@@ -806,10 +795,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ local_bh_disable();
+ ret = __rt_mutex_trylock(&lock->lock);
+ if (ret) {
-+ migrate_disable();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-+ } else
++ migrate_disable();
++ } else {
+ local_bh_enable();
++ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
@@ -839,7 +829,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1036,6 +1357,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1035,6 +1354,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -847,7 +837,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1075,7 +1397,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1074,7 +1394,10 @@ static void mark_wakeup_next_waiter(stru
* Pairs with preempt_enable() in rt_mutex_postunlock();
*/
preempt_disable();
@@ -859,7 +849,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&current->pi_lock);
}
-@@ -1159,21 +1484,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1158,21 +1481,22 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -884,7 +874,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1290,7 +1616,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1283,7 +1607,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -893,7 +883,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1363,7 +1689,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1356,7 +1680,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -903,7 +893,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
-@@ -1417,7 +1744,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1410,7 +1735,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -912,7 +902,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true; /* call rt_mutex_postunlock() */
-@@ -1469,9 +1796,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1447,9 +1772,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
*/
@@ -925,7 +915,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
preempt_enable();
-@@ -1480,15 +1809,17 @@ void rt_mutex_postunlock(struct wake_q_h
+@@ -1458,15 +1785,17 @@ void rt_mutex_postunlock(struct wake_q_h
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -946,7 +936,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-@@ -1666,16 +1997,13 @@ void __sched __rt_mutex_unlock(struct rt
+@@ -1612,16 +1941,13 @@ void __sched __rt_mutex_unlock(struct rt
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, _RET_IP_);
@@ -967,7 +957,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1692,23 +2020,35 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1638,23 +1964,35 @@ bool __sched __rt_mutex_futex_unlock(str
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
@@ -1006,7 +996,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1747,7 +2087,7 @@ void __rt_mutex_init(struct rt_mutex *lo
+@@ -1690,7 +2028,7 @@ void __rt_mutex_init(struct rt_mutex *lo
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
@@ -1015,7 +1005,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1767,6 +2107,14 @@ void rt_mutex_init_proxy_locked(struct r
+@@ -1710,6 +2048,14 @@ void rt_mutex_init_proxy_locked(struct r
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL, NULL);
@@ -1030,18 +1020,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1942,6 +2290,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
- struct hrtimer_sleeper *to,
- struct rt_mutex_waiter *waiter)
- {
-+ struct task_struct *tsk = current;
- int ret;
+@@ -1733,6 +2079,26 @@ void rt_mutex_proxy_unlock(struct rt_mut
+ rt_mutex_set_owner(lock, NULL);
+ }
- raw_spin_lock_irq(&lock->wait_lock);
-@@ -1953,6 +2302,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
- * have to fix that up.
- */
- fixup_rt_mutex_waiters(lock);
++static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
++{
++ struct task_struct *tsk = current;
+ /*
+ * RT has a problem here when the wait got interrupted by a timeout
+ * or a signal. task->pi_blocked_on is still set. The task must
@@ -1054,26 +1039,45 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * boosting chain of the rtmutex. That's correct because the task
+ * is not longer blocked on it.
+ */
-+ if (ret) {
-+ raw_spin_lock(&tsk->pi_lock);
-+ tsk->pi_blocked_on = NULL;
-+ raw_spin_unlock(&tsk->pi_lock);
-+ }
++ raw_spin_lock(&tsk->pi_lock);
++ tsk->pi_blocked_on = NULL;
++ raw_spin_unlock(&tsk->pi_lock);
++}
++
+ /**
+ * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
+ * @lock: the rt_mutex to take
+@@ -1805,6 +2171,9 @@ int __rt_mutex_start_proxy_lock(struct r
+ ret = 0;
+ }
+
++ if (ret)
++ fixup_rt_mutex_blocked(lock);
++
+ return ret;
+ }
+
+@@ -1894,6 +2263,9 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+ * have to fix that up.
+ */
+ fixup_rt_mutex_waiters(lock);
++ if (ret)
++ fixup_rt_mutex_blocked(lock);
+
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -30,6 +30,7 @@ struct rt_mutex_waiter {
- struct rb_node pi_tree_entry;
+@@ -31,6 +31,7 @@ struct rt_mutex_waiter {
struct task_struct *task;
struct rt_mutex *lock;
+ int prio;
+ bool savestate;
- #ifdef CONFIG_DEBUG_RT_MUTEXES
- unsigned long ip;
- struct pid *deadlock_task_pid;
-@@ -139,7 +140,7 @@ extern void rt_mutex_init_proxy_locked(s
+ u64 deadline;
+ };
+
+@@ -134,7 +135,7 @@ extern void rt_mutex_init_proxy_locked(s
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -1082,7 +1086,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -157,9 +158,12 @@ extern int __rt_mutex_futex_trylock(stru
+@@ -152,9 +153,12 @@ extern int __rt_mutex_futex_trylock(stru
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1097,7 +1101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* RW semaphore special interface */
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
-@@ -169,6 +173,10 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -164,6 +168,10 @@ int __sched rt_mutex_slowlock_locked(str
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter);
@@ -1110,7 +1114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# include "rtmutex-debug.h"
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -515,9 +515,15 @@ static bool set_nr_if_polling(struct tas
+@@ -511,9 +511,15 @@ static bool set_nr_if_polling(struct tas
#endif
#endif
@@ -1128,7 +1132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -553,7 +559,13 @@ static bool __wake_q_add(struct wake_q_h
+@@ -549,7 +555,13 @@ static bool __wake_q_add(struct wake_q_h
*/
void wake_q_add(struct wake_q_head *head, struct task_struct *task)
{
@@ -1143,7 +1147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
get_task_struct(task);
}
-@@ -576,28 +588,39 @@ void wake_q_add(struct wake_q_head *head
+@@ -572,28 +584,39 @@ void wake_q_add(struct wake_q_head *head
*/
void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task)
{
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
index 125cf09181c1..420f7499bb16 100644
--- a/patches/rtmutex-trylock-is-okay-on-RT.patch
+++ b/patches/0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
@@ -1,10 +1,12 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed 02 Dec 2015 11:34:07 +0100
-Subject: rtmutex: trylock is okay on -RT
+Date: Wed, 2 Dec 2015 11:34:07 +0100
+Subject: [PATCH 17/23] locking/rtmutex: Allow rt_mutex_trylock() on PREEMPT_RT
-non-RT kernel could deadlock on rt_mutex_trylock() in softirq context. On
--RT we don't run softirqs in IRQ context but in thread context so it is
-not a issue here.
+Non PREEMPT_RT kernel can deadlock on rt_mutex_trylock() in softirq
+context.
+On PREEMPT_RT the softirq context is handled in thread context. This
+avoids the deadlock in the slow path and PI-boosting will be done on the
+correct thread.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -13,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1955,7 +1955,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1899,7 +1899,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_killable
int __sched __rt_mutex_trylock(struct rt_mutex *lock)
{
diff --git a/patches/rtmutex-add-mutex-implementation-based-on-rtmutex.patch b/patches/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
index 6953fdb42c58..e2887c3a6d98 100644
--- a/patches/rtmutex-add-mutex-implementation-based-on-rtmutex.patch
+++ b/patches/0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
@@ -1,19 +1,21 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:17:03 +0200
-Subject: rtmutex: add mutex implementation based on rtmutex
+Subject: [PATCH 18/23] locking/rtmutex: add mutex implementation based on
+ rtmutex
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/mutex_rt.h | 130 ++++++++++++++++++++++++++
- kernel/locking/mutex-rt.c | 223 ++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/mutex_rt.h | 131 +++++++++++++++++++++++++++
+ kernel/locking/mutex-rt.c | 222 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 353 insertions(+)
create mode 100644 include/linux/mutex_rt.h
create mode 100644 kernel/locking/mutex-rt.c
--- /dev/null
+++ b/include/linux/mutex_rt.h
-@@ -0,0 +1,130 @@
+@@ -0,0 +1,131 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
+
@@ -146,10 +148,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/kernel/locking/mutex-rt.c
-@@ -0,0 +1,223 @@
+@@ -0,0 +1,222 @@
++// SPDX-License-Identifier: GPL-2.0-only
+/*
-+ * kernel/rt.c
-+ *
+ * Real-Time Preemption Support
+ *
+ * started by Ingo Molnar:
diff --git a/patches/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch b/patches/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
index c48c56dffa99..9e9b59eccfde 100644
--- a/patches/rtmutex-add-rwsem-implementation-based-on-rtmutex.patch
+++ b/patches/0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
@@ -1,6 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:28:34 +0200
-Subject: rtmutex: add rwsem implementation based on rtmutex
+Subject: [PATCH 19/23] locking/rtmutex: add rwsem implementation based on
+ rtmutex
The RT specific R/W semaphore implementation restricts the number of readers
to one because a writer cannot block on multiple readers and inherit its
@@ -14,7 +15,7 @@ The single reader restricting is painful in various ways:
- Progress blocker for drivers which are carefully crafted to avoid the
potential reader/writer deadlock in mainline.
-The analysis of the writer code pathes shows, that properly written RT tasks
+The analysis of the writer code paths shows, that properly written RT tasks
should not take them. Syscalls like mmap(), file access which take mmap sem
write locked have unbound latencies which are completely unrelated to mmap
sem. Other R/W sem users like graphics drivers are not suitable for RT tasks
@@ -41,15 +42,16 @@ the approach.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/rwsem-rt.h | 68 ++++++++++
- kernel/locking/rwsem-rt.c | 293 ++++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/rwsem-rt.h | 69 ++++++++++
+ kernel/locking/rwsem-rt.c | 292 ++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 361 insertions(+)
create mode 100644 include/linux/rwsem-rt.h
create mode 100644 kernel/locking/rwsem-rt.c
--- /dev/null
+++ b/include/linux/rwsem-rt.h
-@@ -0,0 +1,68 @@
+@@ -0,0 +1,69 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+
@@ -120,9 +122,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/kernel/locking/rwsem-rt.c
-@@ -0,0 +1,293 @@
-+/*
-+ */
+@@ -0,0 +1,292 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/rwsem.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
diff --git a/patches/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch b/patches/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
index a0fddbcc8471..c9ab69d70be1 100644
--- a/patches/rtmutex-add-rwlock-implementation-based-on-rtmutex.patch
+++ b/patches/0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
@@ -1,24 +1,26 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:18:06 +0200
-Subject: rtmutex: add rwlock implementation based on rtmutex
+Subject: [PATCH 20/23] locking/rtmutex: add rwlock implementation based on
+ rtmutex
The implementation is bias-based, similar to the rwsem implementation.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/rwlock_rt.h | 108 +++++++++++++
- include/linux/rwlock_types_rt.h | 55 ++++++
+ include/linux/rwlock_rt.h | 109 +++++++++++++
+ include/linux/rwlock_types_rt.h | 56 ++++++
kernel/Kconfig.locks | 2
- kernel/locking/rwlock-rt.c | 331 ++++++++++++++++++++++++++++++++++++++++
- 4 files changed, 495 insertions(+), 1 deletion(-)
+ kernel/locking/rwlock-rt.c | 328 ++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 494 insertions(+), 1 deletion(-)
create mode 100644 include/linux/rwlock_rt.h
create mode 100644 include/linux/rwlock_types_rt.h
create mode 100644 kernel/locking/rwlock-rt.c
--- /dev/null
+++ b/include/linux/rwlock_rt.h
-@@ -0,0 +1,108 @@
+@@ -0,0 +1,109 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
@@ -129,7 +131,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- /dev/null
+++ b/include/linux/rwlock_types_rt.h
-@@ -0,0 +1,55 @@
+@@ -0,0 +1,56 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
+
@@ -198,9 +201,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool
--- /dev/null
+++ b/kernel/locking/rwlock-rt.c
-@@ -0,0 +1,331 @@
-+/*
-+ */
+@@ -0,0 +1,328 @@
++// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/sched/debug.h>
+#include <linux/export.h>
+
@@ -469,12 +471,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ int ret;
+
-+ migrate_disable();
+ ret = __read_rt_trylock(rwlock);
-+ if (ret)
++ if (ret) {
+ rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
++ migrate_disable();
++ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
@@ -483,45 +484,44 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+{
+ int ret;
+
-+ migrate_disable();
+ ret = __write_rt_trylock(rwlock);
-+ if (ret)
++ if (ret) {
+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
-+ else
-+ migrate_enable();
++ migrate_disable();
++ }
+ return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
+{
-+ migrate_disable();
+ rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __read_rt_lock(rwlock);
++ migrate_disable();
+}
+EXPORT_SYMBOL(rt_read_lock);
+
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
-+ migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __write_rt_lock(rwlock);
++ migrate_disable();
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
-+ __read_rt_unlock(rwlock);
+ migrate_enable();
++ __read_rt_unlock(rwlock);
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+{
+ rwlock_release(&rwlock->dep_map, _RET_IP_);
-+ __write_rt_unlock(rwlock);
+ migrate_enable();
++ __write_rt_unlock(rwlock);
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
diff --git a/patches/rtmutex-wire-up-RT-s-locking.patch b/patches/0021-locking-rtmutex-wire-up-RT-s-locking.patch
index c8b37604c548..d64a9058c47c 100644
--- a/patches/rtmutex-wire-up-RT-s-locking.patch
+++ b/patches/0021-locking-rtmutex-wire-up-RT-s-locking.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
Date: Thu, 12 Oct 2017 17:31:14 +0200
-Subject: rtmutex: wire up RT's locking
+Subject: [PATCH 21/23] locking/rtmutex: wire up RT's locking
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -11,10 +11,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/spinlock_api_smp.h | 4 +++-
include/linux/spinlock_types.h | 11 ++++++++---
kernel/locking/Makefile | 10 +++++++---
- kernel/locking/rwsem.c | 7 +++++++
+ kernel/locking/rwsem.c | 6 ++++++
kernel/locking/spinlock.c | 7 +++++++
kernel/locking/spinlock_debug.c | 5 +++++
- 9 files changed, 76 insertions(+), 18 deletions(-)
+ 9 files changed, 75 insertions(+), 18 deletions(-)
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -204,15 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "lock_events.h"
/*
-@@ -1332,6 +1333,7 @@ static struct rw_semaphore *rwsem_downgr
- return sem;
- }
-
-+
- /*
- * lock for reading
- */
-@@ -1482,6 +1484,7 @@ static inline void __downgrade_write(str
+@@ -1482,6 +1483,7 @@ static inline void __downgrade_write(str
if (tmp & RWSEM_FLAG_WAITERS)
rwsem_downgrade_wake(sem);
}
@@ -220,36 +212,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* lock for reading
-@@ -1613,6 +1616,7 @@ void _down_write_nest_lock(struct rw_sem
- }
- EXPORT_SYMBOL(_down_write_nest_lock);
-
-+#ifndef CONFIG_PREEMPT_RT
- void down_read_non_owner(struct rw_semaphore *sem)
+@@ -1617,7 +1619,9 @@ void down_read_non_owner(struct rw_semap
{
might_sleep();
-@@ -1620,6 +1624,7 @@ void down_read_non_owner(struct rw_semap
+ __down_read(sem);
++#ifndef CONFIG_PREEMPT_RT
__rwsem_set_reader_owned(sem, NULL);
++#endif
}
EXPORT_SYMBOL(down_read_non_owner);
-+#endif
- void down_write_nested(struct rw_semaphore *sem, int subclass)
- {
-@@ -1644,11 +1649,13 @@ int __sched down_write_killable_nested(s
- }
- EXPORT_SYMBOL(down_write_killable_nested);
+@@ -1646,7 +1650,9 @@ EXPORT_SYMBOL(down_write_killable_nested
-+#ifndef CONFIG_PREEMPT_RT
void up_read_non_owner(struct rw_semaphore *sem)
{
++#ifndef CONFIG_PREEMPT_RT
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
++#endif
__up_read(sem);
}
EXPORT_SYMBOL(up_read_non_owner);
-+#endif
-
- #endif
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
diff --git a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/patches/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index ea99a95cb303..7ed201e99523 100644
--- a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/patches/0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -1,14 +1,50 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 12 Oct 2017 17:34:38 +0200
-Subject: rtmutex: add ww_mutex addon for mutex-rt
+Subject: [PATCH 22/23] locking/rtmutex: add ww_mutex addon for mutex-rt
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 269 ++++++++++++++++++++++++++++++++++++++--
+ include/linux/mutex.h | 8 -
+ include/linux/ww_mutex.h | 8 +
+ kernel/locking/rtmutex.c | 262 ++++++++++++++++++++++++++++++++++++++--
kernel/locking/rtmutex_common.h | 2
kernel/locking/rwsem-rt.c | 2
- 3 files changed, 259 insertions(+), 14 deletions(-)
+ 5 files changed, 262 insertions(+), 20 deletions(-)
+--- a/include/linux/mutex.h
++++ b/include/linux/mutex.h
+@@ -82,14 +82,6 @@ struct mutex {
+ struct ww_class;
+ struct ww_acquire_ctx;
+
+-struct ww_mutex {
+- struct mutex base;
+- struct ww_acquire_ctx *ctx;
+-#ifdef CONFIG_DEBUG_MUTEXES
+- struct ww_class *ww_class;
+-#endif
+-};
+-
+ /*
+ * This is the control structure for tasks blocked on mutex,
+ * which resides on the blocked task's kernel stack:
+--- a/include/linux/ww_mutex.h
++++ b/include/linux/ww_mutex.h
+@@ -28,6 +28,14 @@ struct ww_class {
+ unsigned int is_wait_die;
+ };
+
++struct ww_mutex {
++ struct mutex base;
++ struct ww_acquire_ctx *ctx;
++#ifdef CONFIG_DEBUG_MUTEXES
++ struct ww_class *ww_class;
++#endif
++};
++
+ struct ww_acquire_ctx {
+ struct task_struct *task;
+ unsigned long stamp;
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -24,6 +24,7 @@
@@ -19,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rtmutex_common.h"
-@@ -1237,6 +1238,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1234,6 +1235,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT */
@@ -60,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1515,7 +1550,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1512,7 +1547,8 @@ void rt_mutex_init_waiter(struct rt_mute
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -70,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int ret = 0;
-@@ -1533,6 +1569,12 @@ static int __sched
+@@ -1530,6 +1566,12 @@ static int __sched
break;
}
@@ -82,8 +118,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
raw_spin_unlock_irq(&lock->wait_lock);
- debug_rt_mutex_print_deadlock(waiter);
-@@ -1567,16 +1609,106 @@ static void rt_mutex_handle_deadlock(int
+ schedule();
+@@ -1558,16 +1600,106 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -191,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1586,14 +1718,24 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1577,14 +1709,24 @@ int __sched rt_mutex_slowlock_locked(str
ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk);
@@ -219,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1610,7 +1752,8 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1601,7 +1743,8 @@ int __sched rt_mutex_slowlock_locked(str
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -229,7 +265,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1628,7 +1771,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1619,7 +1762,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -239,7 +275,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1758,29 +1902,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1749,14 +1893,16 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -258,26 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int
- rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
- enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx,
- int (*slowfn)(struct rt_mutex *lock, int state,
- struct hrtimer_sleeper *timeout,
-- enum rtmutex_chainwalk chwalk))
-+ enum rtmutex_chainwalk chwalk,
-+ struct ww_acquire_ctx *ww_ctx))
- {
- if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
-
-- return slowfn(lock, state, timeout, chwalk);
-+ return slowfn(lock, state, timeout, chwalk, ww_ctx);
- }
-
- static inline int
-@@ -1825,7 +1973,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1801,7 +1947,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
{
might_sleep();
@@ -286,15 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1945,6 +2093,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
- mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
- RT_MUTEX_MIN_CHAINWALK,
-+ NULL,
- rt_mutex_slowlock);
- if (ret)
- mutex_release(&lock->dep_map, _RET_IP_);
-@@ -2322,7 +2471,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2261,7 +2407,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -303,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2392,3 +2541,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2331,3 +2477,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -312,7 +321,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
-+ unsigned tmp;
++ unsigned int tmp;
+
+ if (ctx->deadlock_inject_countdown-- == 0) {
+ tmp = ctx->deadlock_inject_interval;
@@ -403,7 +412,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -165,6 +165,7 @@ extern void rt_mutex_postunlock(struct w
+@@ -160,6 +160,7 @@ extern void rt_mutex_postunlock(struct w
struct wake_q_head *wake_sleeper_q);
/* RW semaphore special interface */
@@ -411,7 +420,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
extern int __rt_mutex_trylock(struct rt_mutex *lock);
-@@ -172,6 +173,7 @@ extern void __rt_mutex_unlock(struct rt_
+@@ -167,6 +168,7 @@ extern void __rt_mutex_unlock(struct rt_
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
@@ -421,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex_waiter *waiter,
--- a/kernel/locking/rwsem-rt.c
+++ b/kernel/locking/rwsem-rt.c
-@@ -131,7 +131,7 @@ static int __sched __down_read_common(st
+@@ -130,7 +130,7 @@ static int __sched __down_read_common(st
*/
rt_mutex_init_waiter(&waiter, false);
ret = rt_mutex_slowlock_locked(m, state, NULL, RT_MUTEX_MIN_CHAINWALK,
diff --git a/patches/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/patches/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
new file mode 100644
index 000000000000..542e92a17138
--- /dev/null
+++ b/patches/0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -0,0 +1,224 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 6 Oct 2020 13:07:17 +0200
+Subject: [PATCH 23/23] locking/rtmutex: Use custom scheduling function for
+ spin-schedule()
+
+PREEMPT_RT builds the rwsem, mutex, spinlock and rwlock typed locks on
+top of a rtmutex lock. While blocked task->pi_blocked_on is set
+(tsk_is_pi_blocked()) and task needs to schedule away while waiting.
+
+The schedule process must distinguish between blocking on a regular
+sleeping lock (rwsem and mutex) and a RT-only sleeping lock (spinlock
+and rwlock):
+- rwsem and mutex must flush block requests (blk_schedule_flush_plug())
+ even if blocked on a lock. This can not deadlock because this also
+ happens for non-RT.
+ There should be a warning if the scheduling point is within a RCU read
+ section.
+
+- spinlock and rwlock must not flush block requests. This will deadlock
+ if the callback attempts to acquire a lock which is already acquired.
+ Similarly to being preempted, there should be no warning if the
+ scheduling point is within a RCU read section.
+
+Add preempt_schedule_lock() which is invoked if scheduling is required
+while blocking on a PREEMPT_RT-only sleeping lock.
+Remove tsk_is_pi_blocked() from the scheduler path which is no longer
+needed with the additional scheduler entry point.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/include/asm/preempt.h | 3 +++
+ arch/x86/include/asm/preempt.h | 3 +++
+ include/asm-generic/preempt.h | 3 +++
+ include/linux/sched/rt.h | 8 --------
+ kernel/locking/rtmutex.c | 2 +-
+ kernel/locking/rwlock-rt.c | 2 +-
+ kernel/sched/core.c | 32 +++++++++++++++++++++-----------
+ 7 files changed, 32 insertions(+), 21 deletions(-)
+
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -81,6 +81,9 @@ static inline bool should_resched(int pr
+
+ #ifdef CONFIG_PREEMPTION
+ void preempt_schedule(void);
++#ifdef CONFIG_PREEMPT_RT
++void preempt_schedule_lock(void);
++#endif
+ #define __preempt_schedule() preempt_schedule()
+ void preempt_schedule_notrace(void);
+ #define __preempt_schedule_notrace() preempt_schedule_notrace()
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -103,6 +103,9 @@ static __always_inline bool should_resch
+ }
+
+ #ifdef CONFIG_PREEMPTION
++#ifdef CONFIG_PREEMPT_RT
++ extern void preempt_schedule_lock(void);
++#endif
+ extern asmlinkage void preempt_schedule_thunk(void);
+ # define __preempt_schedule() \
+ asm volatile ("call preempt_schedule_thunk" : ASM_CALL_CONSTRAINT)
+--- a/include/asm-generic/preempt.h
++++ b/include/asm-generic/preempt.h
+@@ -79,6 +79,9 @@ static __always_inline bool should_resch
+ }
+
+ #ifdef CONFIG_PREEMPTION
++#ifdef CONFIG_PREEMPT_RT
++extern void preempt_schedule_lock(void);
++#endif
+ extern asmlinkage void preempt_schedule(void);
+ #define __preempt_schedule() preempt_schedule()
+ extern asmlinkage void preempt_schedule_notrace(void);
+--- a/include/linux/sched/rt.h
++++ b/include/linux/sched/rt.h
+@@ -39,20 +39,12 @@ static inline struct task_struct *rt_mut
+ }
+ extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
+ extern void rt_mutex_adjust_pi(struct task_struct *p);
+-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+-{
+- return tsk->pi_blocked_on != NULL;
+-}
+ #else
+ static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
+ {
+ return NULL;
+ }
+ # define rt_mutex_adjust_pi(p) do { } while (0)
+-static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
+-{
+- return false;
+-}
+ #endif
+
+ extern void normalize_rt_tasks(void);
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1067,7 +1067,7 @@ void __sched rt_spin_lock_slowlock_locke
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ if (top_waiter != waiter || adaptive_wait(lock, lock_owner))
+- schedule();
++ preempt_schedule_lock();
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+--- a/kernel/locking/rwlock-rt.c
++++ b/kernel/locking/rwlock-rt.c
+@@ -211,7 +211,7 @@ static void __write_rt_lock(struct rt_rw
+ raw_spin_unlock_irqrestore(&m->wait_lock, flags);
+
+ if (atomic_read(&lock->readers) != 0)
+- schedule();
++ preempt_schedule_lock();
+
+ raw_spin_lock_irqsave(&m->wait_lock, flags);
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4459,7 +4459,7 @@ pick_next_task(struct rq *rq, struct tas
+ *
+ * WARNING: must be called with preemption disabled!
+ */
+-static void __sched notrace __schedule(bool preempt)
++static void __sched notrace __schedule(bool preempt, bool spinning_lock)
+ {
+ struct task_struct *prev, *next;
+ unsigned long *switch_count;
+@@ -4512,7 +4512,7 @@ static void __sched notrace __schedule(b
+ * - ptrace_{,un}freeze_traced() can change ->state underneath us.
+ */
+ prev_state = prev->state;
+- if (!preempt && prev_state) {
++ if ((!preempt || spinning_lock) && prev_state) {
+ if (signal_pending_state(prev_state, prev)) {
+ prev->state = TASK_RUNNING;
+ } else {
+@@ -4594,7 +4594,7 @@ void __noreturn do_task_dead(void)
+ /* Tell freezer to ignore us: */
+ current->flags |= PF_NOFREEZE;
+
+- __schedule(false);
++ __schedule(false, false);
+ BUG();
+
+ /* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
+@@ -4624,9 +4624,6 @@ static inline void sched_submit_work(str
+ preempt_enable_no_resched();
+ }
+
+- if (tsk_is_pi_blocked(tsk))
+- return;
+-
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+@@ -4652,7 +4649,7 @@ asmlinkage __visible void __sched schedu
+ sched_submit_work(tsk);
+ do {
+ preempt_disable();
+- __schedule(false);
++ __schedule(false, false);
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
+ sched_update_worker(tsk);
+@@ -4680,7 +4677,7 @@ void __sched schedule_idle(void)
+ */
+ WARN_ON_ONCE(current->state);
+ do {
+- __schedule(false);
++ __schedule(false, false);
+ } while (need_resched());
+ }
+
+@@ -4733,7 +4730,7 @@ static void __sched notrace preempt_sche
+ */
+ preempt_disable_notrace();
+ preempt_latency_start(1);
+- __schedule(true);
++ __schedule(true, false);
+ preempt_latency_stop(1);
+ preempt_enable_no_resched_notrace();
+
+@@ -4763,6 +4760,19 @@ asmlinkage __visible void __sched notrac
+ NOKPROBE_SYMBOL(preempt_schedule);
+ EXPORT_SYMBOL(preempt_schedule);
+
++#ifdef CONFIG_PREEMPT_RT
++void __sched notrace preempt_schedule_lock(void)
++{
++ do {
++ preempt_disable();
++ __schedule(true, true);
++ sched_preempt_enable_no_resched();
++ } while (need_resched());
++}
++NOKPROBE_SYMBOL(preempt_schedule_lock);
++EXPORT_SYMBOL(preempt_schedule_lock);
++#endif
++
+ /**
+ * preempt_schedule_notrace - preempt_schedule called by tracing
+ *
+@@ -4806,7 +4816,7 @@ asmlinkage __visible void __sched notrac
+ * an infinite recursion.
+ */
+ prev_ctx = exception_enter();
+- __schedule(true);
++ __schedule(true, false);
+ exception_exit(prev_ctx);
+
+ preempt_latency_stop(1);
+@@ -4835,7 +4845,7 @@ asmlinkage __visible void __sched preemp
+ do {
+ preempt_disable();
+ local_irq_enable();
+- __schedule(true);
++ __schedule(true, false);
+ local_irq_disable();
+ sched_preempt_enable_no_resched();
+ } while (need_resched());
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index 13da981ba887..2d8ea45b3298 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -11,19 +11,16 @@ Subject: kernel/sched/core: add migrate_disable()
]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/preempt.h | 22 +++++
- include/linux/sched.h | 35 ++++++++
- include/linux/smp.h | 3
- init/init_task.c | 4
- kernel/cpu.c | 44 ++++++++++
- kernel/locking/rtmutex.c | 12 ++
- kernel/locking/rwlock-rt.c | 18 +++-
- kernel/rcu/tree_plugin.h | 6 +
- kernel/sched/core.c | 183 ++++++++++++++++++++++++++++++++++++++++++++-
- kernel/sched/debug.c | 4
- kernel/sched/sched.h | 4
- lib/smp_processor_id.c | 5 +
- 12 files changed, 330 insertions(+), 10 deletions(-)
+ include/linux/preempt.h | 22 +++++
+ include/linux/sched.h | 15 +++
+ include/linux/smp.h | 3
+ init/init_task.c | 4 +
+ kernel/cpu.c | 44 +++++++++++
+ kernel/sched/core.c | 181 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/debug.c | 4 +
+ kernel/sched/sched.h | 4 +
+ lib/smp_processor_id.c | 5 +
+ 9 files changed, 279 insertions(+), 3 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -63,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __LINUX_PREEMPT_H */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -232,6 +232,8 @@ extern void io_schedule_finish(int token
+@@ -231,6 +231,8 @@ extern void io_schedule_finish(int token
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
@@ -72,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
-@@ -718,6 +720,20 @@ struct task_struct {
+@@ -717,6 +719,17 @@ struct task_struct {
int nr_cpus_allowed;
const cpumask_t *cpus_ptr;
cpumask_t cpus_mask;
@@ -87,37 +84,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ int migrate_disable;
+# endif
+#endif
-+#ifdef CONFIG_PREEMPT_RT
-+ int sleeping_lock;
-+#endif
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1923,6 +1939,23 @@ static __always_inline bool need_resched
- return unlikely(tif_need_resched());
- }
-
-+#ifdef CONFIG_PREEMPT_RT
-+static inline void sleeping_lock_inc(void)
-+{
-+ current->sleeping_lock++;
-+}
-+
-+static inline void sleeping_lock_dec(void)
-+{
-+ current->sleeping_lock--;
-+}
-+
-+#else
-+
-+static inline void sleeping_lock_inc(void) { }
-+static inline void sleeping_lock_dec(void) { }
-+#endif
-+
- /*
- * Wrappers for p->thread_info->cpu access. No-op on UP.
- */
-@@ -2107,4 +2140,6 @@ int sched_trace_rq_nr_running(struct rq
+@@ -2106,4 +2119,6 @@ int sched_trace_rq_nr_running(struct rq
const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
@@ -228,153 +198,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Interrupts are moved away from the dying cpu, reenable alloc/free */
irq_unlock_sparse();
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -1138,6 +1138,7 @@ void __sched rt_spin_lock_slowunlock(str
-
- void __lockfunc rt_spin_lock(spinlock_t *lock)
- {
-+ sleeping_lock_inc();
- migrate_disable();
- spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1152,6 +1153,7 @@ void __lockfunc __rt_spin_lock(struct rt
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
- {
-+ sleeping_lock_inc();
- migrate_disable();
- spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1161,6 +1163,7 @@ EXPORT_SYMBOL(rt_spin_lock_nested);
- void __lockfunc rt_spin_lock_nest_lock(spinlock_t *lock,
- struct lockdep_map *nest_lock)
- {
-+ sleeping_lock_inc();
- migrate_disable();
- spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1174,6 +1177,7 @@ void __lockfunc rt_spin_unlock(spinlock_
- spin_release(&lock->dep_map, _RET_IP_);
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
- migrate_enable();
-+ sleeping_lock_dec();
- }
- EXPORT_SYMBOL(rt_spin_unlock);
-
-@@ -1199,12 +1203,15 @@ int __lockfunc rt_spin_trylock(spinlock_
- {
- int ret;
-
-+ sleeping_lock_inc();
- migrate_disable();
- ret = __rt_mutex_trylock(&lock->lock);
-- if (ret)
-+ if (ret) {
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-- else
-+ } else {
- migrate_enable();
-+ sleeping_lock_dec();
-+ }
- return ret;
- }
- EXPORT_SYMBOL(rt_spin_trylock);
-@@ -1216,6 +1223,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
- local_bh_disable();
- ret = __rt_mutex_trylock(&lock->lock);
- if (ret) {
-+ sleeping_lock_inc();
- migrate_disable();
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- } else
---- a/kernel/locking/rwlock-rt.c
-+++ b/kernel/locking/rwlock-rt.c
-@@ -268,12 +268,15 @@ int __lockfunc rt_read_trylock(rwlock_t
- {
- int ret;
-
-+ sleeping_lock_inc();
- migrate_disable();
- ret = __read_rt_trylock(rwlock);
-- if (ret)
-+ if (ret) {
- rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
-- else
-+ } else {
- migrate_enable();
-+ sleeping_lock_dec();
-+ }
- return ret;
- }
- EXPORT_SYMBOL(rt_read_trylock);
-@@ -282,18 +285,22 @@ int __lockfunc rt_write_trylock(rwlock_t
- {
- int ret;
-
-+ sleeping_lock_inc();
- migrate_disable();
- ret = __write_rt_trylock(rwlock);
-- if (ret)
-+ if (ret) {
- rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
-- else
-+ } else {
- migrate_enable();
-+ sleeping_lock_dec();
-+ }
- return ret;
- }
- EXPORT_SYMBOL(rt_write_trylock);
-
- void __lockfunc rt_read_lock(rwlock_t *rwlock)
- {
-+ sleeping_lock_inc();
- migrate_disable();
- rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
- __read_rt_lock(rwlock);
-@@ -302,6 +309,7 @@ EXPORT_SYMBOL(rt_read_lock);
-
- void __lockfunc rt_write_lock(rwlock_t *rwlock)
- {
-+ sleeping_lock_inc();
- migrate_disable();
- rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
- __write_rt_lock(rwlock);
-@@ -313,6 +321,7 @@ void __lockfunc rt_read_unlock(rwlock_t
- rwlock_release(&rwlock->dep_map, _RET_IP_);
- __read_rt_unlock(rwlock);
- migrate_enable();
-+ sleeping_lock_dec();
- }
- EXPORT_SYMBOL(rt_read_unlock);
-
-@@ -321,6 +330,7 @@ void __lockfunc rt_write_unlock(rwlock_t
- rwlock_release(&rwlock->dep_map, _RET_IP_);
- __write_rt_unlock(rwlock);
- migrate_enable();
-+ sleeping_lock_dec();
- }
- EXPORT_SYMBOL(rt_write_unlock);
-
---- a/kernel/rcu/tree_plugin.h
-+++ b/kernel/rcu/tree_plugin.h
-@@ -289,10 +289,14 @@ void rcu_note_context_switch(bool preemp
- struct task_struct *t = current;
- struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
- struct rcu_node *rnp;
-+ int sleeping_l = 0;
-
- trace_rcu_utilization(TPS("Start context switch"));
- lockdep_assert_irqs_disabled();
-- WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0);
-+#if defined(CONFIG_PREEMPT_RT)
-+ sleeping_l = t->sleeping_lock;
-+#endif
-+ WARN_ON_ONCE(!preempt && rcu_preempt_depth() > 0 && !sleeping_l);
- if (rcu_preempt_depth() > 0 &&
- !t->rcu_read_unlock_special.b.blocked) {
-
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1736,7 +1736,7 @@ static inline bool is_cpu_allowed(struct
@@ -435,7 +258,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Promote REQ to ACT */
rq->clock_update_flags <<= 1;
update_rq_clock(rq);
-@@ -6830,6 +6845,7 @@ static void migrate_tasks(struct rq *dea
+@@ -6840,6 +6855,7 @@ static void migrate_tasks(struct rq *dea
break;
next = __pick_migrate_task(rq);
@@ -443,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Rules for changing task_struct::cpus_mask are holding
-@@ -8548,3 +8564,164 @@ void call_trace_sched_update_nr_running(
+@@ -8558,3 +8574,162 @@ void call_trace_sched_update_nr_running(
{
trace_sched_update_nr_running_tp(rq, count);
}
@@ -557,9 +380,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ preempt_enable();
+
-+ sleeping_lock_inc();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-+ sleeping_lock_dec();
+ return;
+
+ }
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
deleted file mode 100644
index 5b704bea09e4..000000000000
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-Subject: sched: Use the proper LOCK_OFFSET for cond_resched()
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Sun, 17 Jul 2011 22:51:33 +0200
-
-RT does not increment preempt count when a 'sleeping' spinlock is
-locked. Update PREEMPT_LOCK_OFFSET for that case.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/preempt.h | 4 ++++
- 1 file changed, 4 insertions(+)
-
---- a/include/linux/preempt.h
-+++ b/include/linux/preempt.h
-@@ -118,7 +118,11 @@
- /*
- * The preempt_count offset after spin_lock()
- */
-+#if !defined(CONFIG_PREEMPT_RT)
- #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
-+#else
-+#define PREEMPT_LOCK_OFFSET 0
-+#endif
-
- /*
- * The preempt_count offset needed for things like:
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index d0b8e8904c92..718086ecb11c 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3647,15 +3647,6 @@ static struct rq *finish_task_switch(str
+@@ -3700,15 +3700,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 12bd473a33f5..25e5fadbaae8 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt12
++-rt13
diff --git a/patches/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch b/patches/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
index 98d8d7414417..84c66fb9d583 100644
--- a/patches/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
+++ b/patches/locking-Make-spinlock_t-and-rwlock_t-a-RCU-section-o.patch
@@ -16,101 +16,101 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1139,6 +1139,7 @@ void __sched rt_spin_lock_slowunlock(str
- void __lockfunc rt_spin_lock(spinlock_t *lock)
+@@ -1136,6 +1136,7 @@ void __lockfunc rt_spin_lock(spinlock_t
{
- sleeping_lock_inc();
-+ rcu_read_lock();
- migrate_disable();
spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1154,6 +1155,7 @@ void __lockfunc __rt_spin_lock(struct rt
- void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
- {
- sleeping_lock_inc();
+ rcu_read_lock();
migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock);
+@@ -1150,6 +1151,7 @@ void __lockfunc rt_spin_lock_nested(spin
+ {
spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1164,6 +1166,7 @@ void __lockfunc rt_spin_lock_nest_lock(s
- struct lockdep_map *nest_lock)
- {
- sleeping_lock_inc();
+ rcu_read_lock();
migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nested);
+@@ -1159,6 +1161,7 @@ void __lockfunc rt_spin_lock_nest_lock(s
+ {
spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
-@@ -1177,6 +1180,7 @@ void __lockfunc rt_spin_unlock(spinlock_
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nest_lock);
+@@ -1169,6 +1172,7 @@ void __lockfunc rt_spin_unlock(spinlock_
+ /* NOTE: we always pass in '1' for nested, for simplicity */
spin_release(&lock->dep_map, _RET_IP_);
- rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
migrate_enable();
+ rcu_read_unlock();
- sleeping_lock_dec();
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
}
EXPORT_SYMBOL(rt_spin_unlock);
-@@ -1208,6 +1212,7 @@ int __lockfunc rt_spin_trylock(spinlock_
+@@ -1198,6 +1202,7 @@ int __lockfunc rt_spin_trylock(spinlock_
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ rcu_read_lock();
- } else {
- migrate_enable();
- sleeping_lock_dec();
-@@ -1224,6 +1229,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
+ migrate_disable();
+ }
+ return ret;
+@@ -1212,6 +1217,7 @@ int __lockfunc rt_spin_trylock_bh(spinlo
ret = __rt_mutex_trylock(&lock->lock);
if (ret) {
- sleeping_lock_inc();
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+ rcu_read_lock();
migrate_disable();
- spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- } else
+ } else {
+ local_bh_enable();
--- a/kernel/locking/rwlock-rt.c
+++ b/kernel/locking/rwlock-rt.c
-@@ -273,6 +273,7 @@ int __lockfunc rt_read_trylock(rwlock_t
+@@ -270,6 +270,7 @@ int __lockfunc rt_read_trylock(rwlock_t
ret = __read_rt_trylock(rwlock);
if (ret) {
rwlock_acquire_read(&rwlock->dep_map, 0, 1, _RET_IP_);
+ rcu_read_lock();
- } else {
- migrate_enable();
- sleeping_lock_dec();
-@@ -290,6 +291,7 @@ int __lockfunc rt_write_trylock(rwlock_t
+ migrate_disable();
+ }
+ return ret;
+@@ -283,6 +284,7 @@ int __lockfunc rt_write_trylock(rwlock_t
ret = __write_rt_trylock(rwlock);
if (ret) {
rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+ rcu_read_lock();
- } else {
- migrate_enable();
- sleeping_lock_dec();
-@@ -301,6 +303,7 @@ EXPORT_SYMBOL(rt_write_trylock);
- void __lockfunc rt_read_lock(rwlock_t *rwlock)
+ migrate_disable();
+ }
+ return ret;
+@@ -293,6 +295,7 @@ void __lockfunc rt_read_lock(rwlock_t *r
{
- sleeping_lock_inc();
-+ rcu_read_lock();
- migrate_disable();
rwlock_acquire_read(&rwlock->dep_map, 0, 0, _RET_IP_);
__read_rt_lock(rwlock);
-@@ -310,6 +313,7 @@ EXPORT_SYMBOL(rt_read_lock);
- void __lockfunc rt_write_lock(rwlock_t *rwlock)
- {
- sleeping_lock_inc();
+ rcu_read_lock();
migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_read_lock);
+@@ -301,6 +304,7 @@ void __lockfunc rt_write_lock(rwlock_t *
+ {
rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
__write_rt_lock(rwlock);
-@@ -321,6 +325,7 @@ void __lockfunc rt_read_unlock(rwlock_t
++ rcu_read_lock();
+ migrate_disable();
+ }
+ EXPORT_SYMBOL(rt_write_lock);
+@@ -309,6 +313,7 @@ void __lockfunc rt_read_unlock(rwlock_t
+ {
rwlock_release(&rwlock->dep_map, _RET_IP_);
- __read_rt_unlock(rwlock);
migrate_enable();
+ rcu_read_unlock();
- sleeping_lock_dec();
+ __read_rt_unlock(rwlock);
}
EXPORT_SYMBOL(rt_read_unlock);
-@@ -330,6 +335,7 @@ void __lockfunc rt_write_unlock(rwlock_t
+@@ -317,6 +322,7 @@ void __lockfunc rt_write_unlock(rwlock_t
+ {
rwlock_release(&rwlock->dep_map, _RET_IP_);
- __write_rt_unlock(rwlock);
migrate_enable();
+ rcu_read_unlock();
- sleeping_lock_dec();
+ __write_rt_unlock(rwlock);
}
EXPORT_SYMBOL(rt_write_unlock);
diff --git a/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch b/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
deleted file mode 100644
index 6fe10914cc99..000000000000
--- a/patches/locking-rt-mutex-Flush-block-plug-on-__down_read.patch
+++ /dev/null
@@ -1,39 +0,0 @@
-From: Scott Wood <swood@redhat.com>
-Date: Fri, 4 Jan 2019 15:33:21 -0500
-Subject: [PATCH] locking/rt-mutex: Flush block plug on __down_read()
-
-__down_read() bypasses the rtmutex frontend to call
-rt_mutex_slowlock_locked() directly, and thus it needs to call
-blk_schedule_flush_flug() itself.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Scott Wood <swood@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rwsem-rt.c | 9 +++++++++
- 1 file changed, 9 insertions(+)
-
---- a/kernel/locking/rwsem-rt.c
-+++ b/kernel/locking/rwsem-rt.c
-@@ -1,5 +1,6 @@
- /*
- */
-+#include <linux/blkdev.h>
- #include <linux/rwsem.h>
- #include <linux/sched/debug.h>
- #include <linux/sched/signal.h>
-@@ -87,6 +88,14 @@ static int __sched __down_read_common(st
-
- if (__down_read_trylock(sem))
- return 0;
-+ /*
-+ * If rt_mutex blocks, the function sched_submit_work will not call
-+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
-+ * We must call blk_schedule_flush_plug here, if we don't call it,
-+ * a deadlock in I/O may happen.
-+ */
-+ if (unlikely(blk_needs_flush_plug(current)))
-+ blk_schedule_flush_plug(current);
-
- might_sleep();
- raw_spin_lock_irq(&m->wait_lock);
diff --git a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
deleted file mode 100644
index dc31ba7fc8bf..000000000000
--- a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
+++ /dev/null
@@ -1,73 +0,0 @@
-From: Mikulas Patocka <mpatocka@redhat.com>
-Date: Mon, 13 Nov 2017 12:56:53 -0500
-Subject: [PATCH] locking/rt-mutex: fix deadlock in device mapper / block-IO
-
-When some block device driver creates a bio and submits it to another
-block device driver, the bio is added to current->bio_list (in order to
-avoid unbounded recursion).
-
-However, this queuing of bios can cause deadlocks, in order to avoid them,
-device mapper registers a function flush_current_bio_list. This function
-is called when device mapper driver blocks. It redirects bios queued on
-current->bio_list to helper workqueues, so that these bios can proceed
-even if the driver is blocked.
-
-The problem with CONFIG_PREEMPT_RT is that when the device mapper
-driver blocks, it won't call flush_current_bio_list (because
-tsk_is_pi_blocked returns true in sched_submit_work), so deadlocks in
-block device stack can happen.
-
-Note that we can't call blk_schedule_flush_plug if tsk_is_pi_blocked
-returns true - that would cause
-BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
-task_blocks_on_rt_mutex when flush_current_bio_list attempts to take a
-spinlock.
-
-So the proper fix is to call blk_schedule_flush_plug in rt_mutex_fastlock,
-when fast acquire failed and when the task is about to block.
-
-CC: stable-rt@vger.kernel.org
-[bigeasy: The deadlock is not device-mapper specific, it can also occur
- in plain EXT4]
-Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 13 +++++++++++++
- 1 file changed, 13 insertions(+)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -25,6 +25,7 @@
- #include <linux/sched/debug.h>
- #include <linux/timer.h>
- #include <linux/ww_mutex.h>
-+#include <linux/blkdev.h>
-
- #include "rtmutex_common.h"
-
-@@ -1911,6 +1912,15 @@ rt_mutex_fastlock(struct rt_mutex *lock,
- if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
-
-+ /*
-+ * If rt_mutex blocks, the function sched_submit_work will not call
-+ * blk_schedule_flush_plug (because tsk_is_pi_blocked would be true).
-+ * We must call blk_schedule_flush_plug here, if we don't call it,
-+ * a deadlock in I/O may happen.
-+ */
-+ if (unlikely(blk_needs_flush_plug(current)))
-+ blk_schedule_flush_plug(current);
-+
- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
- }
-
-@@ -1928,6 +1938,9 @@ rt_mutex_timed_fastlock(struct rt_mutex
- likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
- return 0;
-
-+ if (unlikely(blk_needs_flush_plug(current)))
-+ blk_schedule_flush_plug(current);
-+
- return slowfn(lock, state, timeout, chwalk, ww_ctx);
- }
-
diff --git a/patches/locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch b/patches/locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch
deleted file mode 100644
index f9cae59afc17..000000000000
--- a/patches/locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch
+++ /dev/null
@@ -1,91 +0,0 @@
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Mon, 30 Sep 2019 18:15:44 +0200
-Subject: [PATCH] locking/rtmutex: Clean ->pi_blocked_on in the error case
-
-The function rt_mutex_wait_proxy_lock() cleans ->pi_blocked_on in case
-of failure (timeout, signal). The same cleanup is required in
-__rt_mutex_start_proxy_lock().
-In both the cases the tasks was interrupted by a signal or timeout while
-acquiring the lock and after the interruption it longer blocks on the
-lock.
-
-Fixes: 1a1fb985f2e2b ("futex: Handle early deadlock return correctly")
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rtmutex.c | 43 +++++++++++++++++++++++++------------------
- 1 file changed, 25 insertions(+), 18 deletions(-)
-
---- a/kernel/locking/rtmutex.c
-+++ b/kernel/locking/rtmutex.c
-@@ -2138,6 +2138,26 @@ void rt_mutex_proxy_unlock(struct rt_mut
- rt_mutex_set_owner(lock, NULL);
- }
-
-+static void fixup_rt_mutex_blocked(struct rt_mutex *lock)
-+{
-+ struct task_struct *tsk = current;
-+ /*
-+ * RT has a problem here when the wait got interrupted by a timeout
-+ * or a signal. task->pi_blocked_on is still set. The task must
-+ * acquire the hash bucket lock when returning from this function.
-+ *
-+ * If the hash bucket lock is contended then the
-+ * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
-+ * task_blocks_on_rt_mutex() will trigger. This can be avoided by
-+ * clearing task->pi_blocked_on which removes the task from the
-+ * boosting chain of the rtmutex. That's correct because the task
-+ * is not longer blocked on it.
-+ */
-+ raw_spin_lock(&tsk->pi_lock);
-+ tsk->pi_blocked_on = NULL;
-+ raw_spin_unlock(&tsk->pi_lock);
-+}
-+
- /**
- * __rt_mutex_start_proxy_lock() - Start lock acquisition for another task
- * @lock: the rt_mutex to take
-@@ -2210,6 +2230,9 @@ int __rt_mutex_start_proxy_lock(struct r
- ret = 0;
- }
-
-+ if (ret)
-+ fixup_rt_mutex_blocked(lock);
-+
- debug_rt_mutex_print_deadlock(waiter);
-
- return ret;
-@@ -2290,7 +2313,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m
- struct hrtimer_sleeper *to,
- struct rt_mutex_waiter *waiter)
- {
-- struct task_struct *tsk = current;
- int ret;
-
- raw_spin_lock_irq(&lock->wait_lock);
-@@ -2302,23 +2324,8 @@ int rt_mutex_wait_proxy_lock(struct rt_m
- * have to fix that up.
- */
- fixup_rt_mutex_waiters(lock);
-- /*
-- * RT has a problem here when the wait got interrupted by a timeout
-- * or a signal. task->pi_blocked_on is still set. The task must
-- * acquire the hash bucket lock when returning from this function.
-- *
-- * If the hash bucket lock is contended then the
-- * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in
-- * task_blocks_on_rt_mutex() will trigger. This can be avoided by
-- * clearing task->pi_blocked_on which removes the task from the
-- * boosting chain of the rtmutex. That's correct because the task
-- * is not longer blocked on it.
-- */
-- if (ret) {
-- raw_spin_lock(&tsk->pi_lock);
-- tsk->pi_blocked_on = NULL;
-- raw_spin_unlock(&tsk->pi_lock);
-- }
-+ if (ret)
-+ fixup_rt_mutex_blocked(lock);
-
- raw_spin_unlock_irq(&lock->wait_lock);
-
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 533433eb0d70..45618e07cd05 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -249,7 +249,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -1309,6 +1310,12 @@ struct task_struct {
+@@ -1305,6 +1306,12 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
diff --git a/patches/mutex-Move-the-ww_mutext-definition-back-to-ww_mutex.patch b/patches/mutex-Move-the-ww_mutext-definition-back-to-ww_mutex.patch
deleted file mode 100644
index 752c9f4a2049..000000000000
--- a/patches/mutex-Move-the-ww_mutext-definition-back-to-ww_mutex.patch
+++ /dev/null
@@ -1,50 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 14 Aug 2020 16:51:58 +0200
-Subject: [PATCH 1/4] mutex: Move the ww_mutext definition back to ww_mutex.h
-
-PREEMPT_RT does not include `mutex.h' and provides another `mutex' based
-on rt_mutex. By avoiding `mutex.h' there is no definition for ww_mutex
-since the recent header untangling.
-
-Move the ww_mutex definition back ww_mutex.h
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/mutex.h | 8 --------
- include/linux/ww_mutex.h | 8 ++++++++
- 2 files changed, 8 insertions(+), 8 deletions(-)
-
---- a/include/linux/mutex.h
-+++ b/include/linux/mutex.h
-@@ -82,14 +82,6 @@ struct mutex {
- struct ww_class;
- struct ww_acquire_ctx;
-
--struct ww_mutex {
-- struct mutex base;
-- struct ww_acquire_ctx *ctx;
--#ifdef CONFIG_DEBUG_MUTEXES
-- struct ww_class *ww_class;
--#endif
--};
--
- /*
- * This is the control structure for tasks blocked on mutex,
- * which resides on the blocked task's kernel stack:
---- a/include/linux/ww_mutex.h
-+++ b/include/linux/ww_mutex.h
-@@ -28,6 +28,14 @@ struct ww_class {
- unsigned int is_wait_die;
- };
-
-+struct ww_mutex {
-+ struct mutex base;
-+ struct ww_acquire_ctx *ctx;
-+#ifdef CONFIG_DEBUG_MUTEXES
-+ struct ww_class *ww_class;
-+#endif
-+};
-+
- struct ww_acquire_ctx {
- struct task_struct *task;
- unsigned long stamp;
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 95da88ba855a..da26a45e1830 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -946,6 +946,10 @@ struct task_struct {
+@@ -954,6 +954,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index bfef8191a48c..39a9bb2d1483 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1849,6 +1849,44 @@ static inline int test_tsk_need_resched(
+@@ -1845,6 +1845,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -4777,6 +4823,30 @@ static void __sched notrace preempt_sche
+@@ -4774,6 +4820,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -4790,7 +4860,8 @@ asmlinkage __visible void __sched notrac
+@@ -4787,7 +4857,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -4817,6 +4888,9 @@ asmlinkage __visible void __sched notrac
+@@ -4827,6 +4898,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -6651,7 +6725,9 @@ void init_idle(struct task_struct *idle,
+@@ -6661,7 +6735,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -8613,6 +8689,7 @@ void migrate_disable(void)
+@@ -8623,6 +8699,7 @@ void migrate_disable(void)
if (++current->migrate_disable == 1) {
this_rq()->nr_pinned++;
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SCHED_DEBUG
WARN_ON_ONCE(current->pinned_on_cpu >= 0);
current->pinned_on_cpu = smp_processor_id();
-@@ -8694,6 +8771,7 @@ void migrate_enable(void)
+@@ -8704,6 +8781,7 @@ void migrate_enable(void)
}
out:
diff --git a/patches/preempt-nort-rt-variants.patch b/patches/preempt-nort-rt-variants.patch
index 5716478e4931..8a5b34e8957c 100644
--- a/patches/preempt-nort-rt-variants.patch
+++ b/patches/preempt-nort-rt-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -181,7 +181,11 @@ do { \
+@@ -185,7 +185,11 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -275,6 +279,18 @@ do { \
+@@ -279,6 +283,18 @@ do { \
set_preempt_need_resched(); \
} while (0)
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index b5b5f8324836..ef262039a7ba 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
-@@ -1830,6 +1826,51 @@ static inline int test_tsk_need_resched(
+@@ -1829,6 +1825,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index aec3b1194aad..6f061b770919 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -56,8 +56,8 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/delay.h | 6 ++++++
- kernel/time/hrtimer.c | 32 ++++++++++++++++++++++++++++++++
- 2 files changed, 38 insertions(+)
+ kernel/time/hrtimer.c | 30 ++++++++++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1993,6 +1993,38 @@ SYSCALL_DEFINE2(nanosleep_time32, struct
+@@ -1993,6 +1993,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct
}
#endif
@@ -96,9 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ chill_time = ktime_set(0, NSEC_PER_MSEC);
+
+ current->flags |= PF_NOFREEZE;
-+ sleeping_lock_inc();
+ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
-+ sleeping_lock_dec();
+ if (!freeze_flag)
+ current->flags &= ~PF_NOFREEZE;
+
diff --git a/patches/rwsem-Provide-down_read_non_owner-and-up_read_non_ow.patch b/patches/rwsem-Provide-down_read_non_owner-and-up_read_non_ow.patch
deleted file mode 100644
index f5bdb53b9c1d..000000000000
--- a/patches/rwsem-Provide-down_read_non_owner-and-up_read_non_ow.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 12 Jun 2020 16:51:50 +0200
-Subject: [PATCH] rwsem: Provide down_read_non_owner() and up_read_non_owner()
- for -RT
-
-The rwsem implementation on -RT allows multiple reader and there is no
-owner tracking anymore.
-We can provide down_read_non_owner() and up_read_non_owner() by skipping
-the owner check bits which are only available in the !RT implementation.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/locking/rwsem.c | 8 ++++----
- 1 file changed, 4 insertions(+), 4 deletions(-)
-
---- a/kernel/locking/rwsem.c
-+++ b/kernel/locking/rwsem.c
-@@ -1616,15 +1616,15 @@ void _down_write_nest_lock(struct rw_sem
- }
- EXPORT_SYMBOL(_down_write_nest_lock);
-
--#ifndef CONFIG_PREEMPT_RT
- void down_read_non_owner(struct rw_semaphore *sem)
- {
- might_sleep();
- __down_read(sem);
-+#ifndef CONFIG_PREEMPT_RT
- __rwsem_set_reader_owned(sem, NULL);
-+#endif
- }
- EXPORT_SYMBOL(down_read_non_owner);
--#endif
-
- void down_write_nested(struct rw_semaphore *sem, int subclass)
- {
-@@ -1649,13 +1649,13 @@ int __sched down_write_killable_nested(s
- }
- EXPORT_SYMBOL(down_write_killable_nested);
-
--#ifndef CONFIG_PREEMPT_RT
- void up_read_non_owner(struct rw_semaphore *sem)
- {
-+#ifndef CONFIG_PREEMPT_RT
- DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
-+#endif
- __up_read(sem);
- }
- EXPORT_SYMBOL(up_read_non_owner);
--#endif
-
- #endif
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 24ece7e7ce11..3cb9ed5f60f2 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7261,7 +7261,7 @@ void __init sched_init(void)
+@@ -7294,7 +7294,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate_enable-Remove-__schedule-call.patch b/patches/sched-migrate_enable-Remove-__schedule-call.patch
index bb0b71d369fd..dec1e60f04c7 100644
--- a/patches/sched-migrate_enable-Remove-__schedule-call.patch
+++ b/patches/sched-migrate_enable-Remove-__schedule-call.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -8691,7 +8691,6 @@ void migrate_enable(void)
+@@ -8701,7 +8701,6 @@ void migrate_enable(void)
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
arg, work);
diff --git a/patches/sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch b/patches/sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
index 29427b4c38d2..479889441968 100644
--- a/patches/sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
+++ b/patches/sched-migrate_enable-Use-per-cpu-cpu_stop_work.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -8632,6 +8632,9 @@ static void migrate_disabled_sched(struc
+@@ -8642,6 +8642,9 @@ static void migrate_disabled_sched(struc
p->migrate_disable_scheduled = 1;
}
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_enable(void)
{
struct task_struct *p = current;
-@@ -8670,22 +8673,25 @@ void migrate_enable(void)
+@@ -8680,22 +8683,25 @@ void migrate_enable(void)
WARN_ON(smp_processor_id() != cpu);
if (!is_cpu_allowed(p, cpu)) {
diff --git a/patches/sched-migrate_enable-Use-stop_one_cpu_nowait.patch b/patches/sched-migrate_enable-Use-stop_one_cpu_nowait.patch
index 9d26071a6c17..d57869e10257 100644
--- a/patches/sched-migrate_enable-Use-stop_one_cpu_nowait.patch
+++ b/patches/sched-migrate_enable-Use-stop_one_cpu_nowait.patch
@@ -13,9 +13,9 @@ Signed-off-by: Scott Wood <swood@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/stop_machine.h | 2 ++
- kernel/sched/core.c | 27 +++++++++++++++++----------
+ kernel/sched/core.c | 25 +++++++++++++++++--------
kernel/stop_machine.c | 7 +++++--
- 3 files changed, 24 insertions(+), 12 deletions(-)
+ 3 files changed, 24 insertions(+), 10 deletions(-)
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rq_unlock(rq, &rf);
raw_spin_unlock(&p->pi_lock);
-@@ -8664,7 +8670,8 @@ void migrate_enable(void)
+@@ -8674,7 +8680,8 @@ void migrate_enable(void)
WARN_ON(smp_processor_id() != cpu);
if (!is_cpu_allowed(p, cpu)) {
@@ -72,15 +72,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rq_flags rf;
rq = task_rq_lock(p, &rf);
-@@ -8672,13 +8679,13 @@ void migrate_enable(void)
+@@ -8682,11 +8689,13 @@ void migrate_enable(void)
arg.dest_cpu = select_fallback_rq(cpu, p);
task_rq_unlock(rq, p, &rf);
- preempt_enable();
-
-- sleeping_lock_inc();
- stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
-- sleeping_lock_dec();
- return;
-
+ stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 5cec179c5cf2..a9734a37ed9b 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3635,9 +3635,13 @@ static struct rq *finish_task_switch(str
+@@ -3688,9 +3688,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -6674,6 +6678,7 @@ void sched_setnuma(struct task_struct *p
+@@ -6737,6 +6741,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index 029e6798dcab..2963687c46a0 100644
--- a/patches/series
+++ b/patches/series
@@ -116,6 +116,31 @@ softirq--Replace-barrier---with-cpu_relax---in-tasklet_unlock_wait--.patch
tasklets--Avoid-cancel-kill-deadlock-on-RT.patch
tasklets-Use-static-line-for-functions.patch
+# RT-MUTEX
+0001-locking-rtmutex-Remove-cruft.patch
+0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
+0003-locking-rtmutex-Move-rt_mutex_init-outside-of-CONFIG.patch
+0004-locking-rtmutex-Remove-rt_mutex_timed_lock.patch
+0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
+0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+0007-locking-rtmutex-Add-rtmutex_lock_killable.patch
+0008-locking-rtmutex-Make-lock_killable-work.patch
+0009-locking-spinlock-Split-the-lock-types-header.patch
+0010-locking-rtmutex-Avoid-include-hell.patch
+0011-lockdep-Reduce-header-files-in-debug_locks.h.patch
+0012-locking-split-out-the-rbtree-definition.patch
+0013-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+0014-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
+0015-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+0016-locking-rtmutex-add-sleeping-lock-implementation.patch
+0017-locking-rtmutex-Allow-rt_mutex_trylock-on-PREEMPT_RT.patch
+0018-locking-rtmutex-add-mutex-implementation-based-on-rt.patch
+0019-locking-rtmutex-add-rwsem-implementation-based-on-rt.patch
+0020-locking-rtmutex-add-rwlock-implementation-based-on-r.patch
+0021-locking-rtmutex-wire-up-RT-s-locking.patch
+0022-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+0023-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+
###############################################################
# Stuff broken upstream and upstream wants something different
###############################################################
@@ -238,7 +263,6 @@ sched-mmdrop-delayed.patch
kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
# RT bulk - Revisit
-sched-rt-mutex-wakeup.patch
sched-might-sleep-do-not-account-rcu-depth.patch
sched-disable-ttwu-queue.patch
@@ -254,35 +278,10 @@ softirq-disable-softirq-stacks-for-rt.patch
net-core-use-local_bh_disable-in-netif_rx_ni.patch
# FUTEX/RTMUTEX
-rtmutex-futex-prepare-rt.patch
-futex-requeue-pi-fix.patch
futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
# RTMUTEX
pid.h-include-atomic.h.patch
-rtmutex-lock-killable.patch
-rtmutex-Make-lock_killable-work.patch
-spinlock-types-separate-raw.patch
-rtmutex-avoid-include-hell.patch
-locking-split-out-the-rbtree-definition.patch
-rtmutex-Provide-rt_mutex_slowlock_locked.patch
-rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
-rtmutex-add-sleeping-lock-implementation.patch
-cond-resched-lock-rt-tweak.patch
-locking-rtmutex-Clean-pi_blocked_on-in-the-error-cas.patch
-rtmutex-trylock-is-okay-on-RT.patch
-rtmutex-add-mutex-implementation-based-on-rtmutex.patch
-rtmutex-add-rwsem-implementation-based-on-rtmutex.patch
-rtmutex-add-rwlock-implementation-based-on-rtmutex.patch
-rtmutex-wire-up-RT-s-locking.patch
-rwsem-Provide-down_read_non_owner-and-up_read_non_ow.patch
-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
-mutex-Move-the-ww_mutext-definition-back-to-ww_mutex.patch
-
-# Allow to enable RT-FULL after sleeping spinlocks are wired up
-# XXX move flush_plug to slowpath's schedule()
-locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
-locking-rt-mutex-Flush-block-plug-on-__down_read.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
# MIGRATE DISABLE AND PER CPU
@@ -352,7 +351,6 @@ upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
lockdep-no-softirq-accounting-on-rt.patch
lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
-lockdep-Reduce-header-files-in-debug_locks.h.patch
# Fix lockdep selftest - talk to Peter - including lockdep branch
lockdep-disable-self-test.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 61d86883e6e8..c5487cf19766 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -963,6 +963,7 @@ struct task_struct {
+@@ -959,6 +959,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 0d4016a35a48..71e8be5513d0 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -183,8 +183,10 @@ do { \
+@@ -187,8 +187,10 @@ do { \
#ifdef CONFIG_PREEMPT_RT
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -255,6 +257,7 @@ do { \
+@@ -259,6 +261,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()