summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch58
-rw-r--r--patches/arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch93
-rw-r--r--patches/futex-requeue-pi-fix.patch10
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch31
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch2
-rw-r--r--patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch33
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch5
-rw-r--r--patches/kernel-softirq-unlock-with-irqs-on.patch27
-rw-r--r--patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch152
-rw-r--r--patches/localversion.patch4
-rw-r--r--patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch41
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch10
-rw-r--r--patches/rcu-disable-more-spots-of-rcu_bh.patch63
-rw-r--r--patches/rcu-more-swait-conversions.patch14
-rw-r--r--patches/rt-add-rt-locks.patch95
-rw-r--r--patches/rt-locking-Reenable-migration-accross-schedule.patch111
-rw-r--r--patches/rtmutex--Handle-non-enqueued-waiters-gracefully4
-rw-r--r--patches/rtmutex-Make-wait_lock-irq-safe.patch597
-rw-r--r--patches/rtmutex-Use-chainwalking-control-enum.patch4
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch34
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch24
-rw-r--r--patches/rtmutex-lock-killable.patch2
-rw-r--r--patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch271
-rw-r--r--patches/rtmutex-trylock-is-okay-on-RT.patch2
-rw-r--r--patches/series12
-rw-r--r--patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch12
-rw-r--r--patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch2
28 files changed, 1601 insertions, 114 deletions
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
new file mode 100644
index 000000000000..6d6aaa7f97fe
--- /dev/null
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -0,0 +1,58 @@
+From bdf6fc2d467f0db2122b1010106542dbc7a8398c Mon Sep 17 00:00:00 2001
+From: Josh Cartwright <joshc@ni.com>
+Date: Thu, 11 Feb 2016 11:54:01 -0600
+Subject: [PATCH 2/2] KVM: arm/arm64: downgrade preempt_disable()d region to
+ migrate_disable()
+
+kvm_arch_vcpu_ioctl_run() disables the use of preemption when updating
+the vgic and timer states to prevent the calling task from migrating to
+another CPU. It does so to prevent the task from writing to the
+incorrect per-CPU GIC distributor registers.
+
+On -rt kernels, it's possible to maintain the same guarantee with the
+use of migrate_{disable,enable}(), with the added benefit that the
+migrate-disabled region is preemptible. Update
+kvm_arch_vcpu_ioctl_run() to do so.
+
+Cc: Christoffer Dall <christoffer.dall@linaro.org>
+Reported-by: Manish Jaggi <Manish.Jaggi@caviumnetworks.com>
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/kvm/arm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
+index 4f5c42a0924c..2ce9cc2717ac 100644
+--- a/arch/arm/kvm/arm.c
++++ b/arch/arm/kvm/arm.c
+@@ -568,7 +568,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ * involves poking the GIC, which must be done in a
+ * non-preemptible context.
+ */
+- preempt_disable();
++ migrate_disable();
+ kvm_timer_flush_hwstate(vcpu);
+ kvm_vgic_flush_hwstate(vcpu);
+
+@@ -587,7 +587,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+ local_irq_enable();
+ kvm_timer_sync_hwstate(vcpu);
+ kvm_vgic_sync_hwstate(vcpu);
+- preempt_enable();
++ migrate_enable();
+ continue;
+ }
+
+@@ -641,7 +641,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
+
+ kvm_vgic_sync_hwstate(vcpu);
+
+- preempt_enable();
++ migrate_enable();
+
+ ret = handle_exit(vcpu, run, ret);
+ }
+--
+2.7.0
+
diff --git a/patches/arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch b/patches/arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch
new file mode 100644
index 000000000000..835fcae8ff92
--- /dev/null
+++ b/patches/arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch
@@ -0,0 +1,93 @@
+From: Yang Shi <yang.shi@linaro.org>
+Date: Mon, 8 Feb 2016 14:49:24 -0800
+Subject: arm64: replace read_lock to rcu lock in call_step_hook
+
+BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:917
+in_atomic(): 1, irqs_disabled(): 128, pid: 383, name: sh
+Preemption disabled at:[<ffff800000124c18>] kgdb_cpu_enter+0x158/0x6b8
+
+CPU: 3 PID: 383 Comm: sh Tainted: G W 4.1.13-rt13 #2
+Hardware name: Freescale Layerscape 2085a RDB Board (DT)
+Call trace:
+[<ffff8000000885e8>] dump_backtrace+0x0/0x128
+[<ffff800000088734>] show_stack+0x24/0x30
+[<ffff80000079a7c4>] dump_stack+0x80/0xa0
+[<ffff8000000bd324>] ___might_sleep+0x18c/0x1a0
+[<ffff8000007a20ac>] __rt_spin_lock+0x2c/0x40
+[<ffff8000007a2268>] rt_read_lock+0x40/0x58
+[<ffff800000085328>] single_step_handler+0x38/0xd8
+[<ffff800000082368>] do_debug_exception+0x58/0xb8
+Exception stack(0xffff80834a1e7c80 to 0xffff80834a1e7da0)
+7c80: ffffff9c ffffffff 92c23ba0 0000ffff 4a1e7e40 ffff8083 001bfcc4 ffff8000
+7ca0: f2000400 00000000 00000000 00000000 4a1e7d80 ffff8083 0049501c ffff8000
+7cc0: 00005402 00000000 00aaa210 ffff8000 4a1e7ea0 ffff8083 000833f4 ffff8000
+7ce0: ffffff9c ffffffff 92c23ba0 0000ffff 4a1e7ea0 ffff8083 001bfcc0 ffff8000
+7d00: 4a0fc400 ffff8083 00005402 00000000 4a1e7d40 ffff8083 00490324 ffff8000
+7d20: ffffff9c 00000000 92c23ba0 0000ffff 000a0000 00000000 00000000 00000000
+7d40: 00000008 00000000 00080000 00000000 92c23b8b 0000ffff 92c23b8e 0000ffff
+7d60: 00000038 00000000 00001cb2 00000000 00000005 00000000 92d7b498 0000ffff
+7d80: 01010101 01010101 92be9000 0000ffff 00000000 00000000 00000030 00000000
+[<ffff8000000833f4>] el1_dbg+0x18/0x6c
+
+This issue is similar with 62c6c61("arm64: replace read_lock to rcu lock in
+call_break_hook"), but comes to single_step_handler.
+
+This also solves kgdbts boot test silent hang issue on 4.4 -rt kernel.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Yang Shi <yang.shi@linaro.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/kernel/debug-monitors.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+--- a/arch/arm64/kernel/debug-monitors.c
++++ b/arch/arm64/kernel/debug-monitors.c
+@@ -186,20 +186,21 @@ static void clear_regs_spsr_ss(struct pt
+
+ /* EL1 Single Step Handler hooks */
+ static LIST_HEAD(step_hook);
+-static DEFINE_RWLOCK(step_hook_lock);
++static DEFINE_SPINLOCK(step_hook_lock);
+
+ void register_step_hook(struct step_hook *hook)
+ {
+- write_lock(&step_hook_lock);
+- list_add(&hook->node, &step_hook);
+- write_unlock(&step_hook_lock);
++ spin_lock(&step_hook_lock);
++ list_add_rcu(&hook->node, &step_hook);
++ spin_unlock(&step_hook_lock);
+ }
+
+ void unregister_step_hook(struct step_hook *hook)
+ {
+- write_lock(&step_hook_lock);
+- list_del(&hook->node);
+- write_unlock(&step_hook_lock);
++ spin_lock(&step_hook_lock);
++ list_del_rcu(&hook->node);
++ spin_unlock(&step_hook_lock);
++ synchronize_rcu();
+ }
+
+ /*
+@@ -213,15 +214,15 @@ static int call_step_hook(struct pt_regs
+ struct step_hook *hook;
+ int retval = DBG_HOOK_ERROR;
+
+- read_lock(&step_hook_lock);
++ rcu_read_lock();
+
+- list_for_each_entry(hook, &step_hook, node) {
++ list_for_each_entry_rcu(hook, &step_hook, node) {
+ retval = hook->fn(regs, esr);
+ if (retval == DBG_HOOK_HANDLED)
+ break;
+ }
+
+- read_unlock(&step_hook_lock);
++ rcu_read_unlock();
+
+ return retval;
+ }
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch
index 8bdb0b62e1bb..3a40911cbc61 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/futex-requeue-pi-fix.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1631,6 +1632,35 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1640,6 +1641,35 @@ int rt_mutex_start_proxy_lock(struct rt_
return 1;
}
@@ -88,14 +88,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+ * it will know that we are in the process of requeuing it.
+ */
-+ raw_spin_lock_irq(&task->pi_lock);
++ raw_spin_lock(&task->pi_lock);
+ if (task->pi_blocked_on) {
-+ raw_spin_unlock_irq(&task->pi_lock);
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+ return -EAGAIN;
+ }
+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
-+ raw_spin_unlock_irq(&task->pi_lock);
++ raw_spin_unlock(&task->pi_lock);
+#endif
+
/* We enforce deadlock detection for futexes */
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
new file mode 100644
index 000000000000..948594dc6ae8
--- /dev/null
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -0,0 +1,31 @@
+From 85b7f1606fb707c9da7984e052e47cfa12e85e67 Mon Sep 17 00:00:00 2001
+From: Josh Cartwright <joshc@ni.com>
+Date: Thu, 11 Feb 2016 11:54:00 -0600
+Subject: [PATCH 1/2] genirq: update irq_set_irqchip_state documentation
+
+On -rt kernels, the use of migrate_disable()/migrate_enable() is
+sufficient to guarantee a task isn't moved to another CPU. Update the
+irq_set_irqchip_state() documentation to reflect this.
+
+Signed-off-by: Josh Cartwright <joshc@ni.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/irq/manage.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
+index ba2a42a37025..8e89554aa345 100644
+--- a/kernel/irq/manage.c
++++ b/kernel/irq/manage.c
+@@ -2060,7 +2060,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state);
+ * This call sets the internal irqchip state of an interrupt,
+ * depending on the value of @which.
+ *
+- * This function should be called with preemption disabled if the
++ * This function should be called with migration disabled if the
+ * interrupt controller has per-cpu registers.
+ */
+ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which,
+--
+2.7.0
+
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index b3349045220c..586090ceb7b6 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return desc->status_use_accessors & _IRQ_PER_CPU;
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -576,6 +576,15 @@ void __local_bh_enable(void)
+@@ -578,6 +578,15 @@ void __local_bh_enable(void)
}
EXPORT_SYMBOL(__local_bh_enable);
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
new file mode 100644
index 000000000000..8aa639f9ea89
--- /dev/null
+++ b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
@@ -0,0 +1,33 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Feb 2016 18:18:01 +0100
+Subject: kernel: migrate_disable() do fastpath in atomic &
+ irqs-off
+
+With interrupts off it makes no sense to do the long path since we can't
+leave the CPU anyway. Also we might end up in a recursion with lockdep.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -3078,7 +3078,7 @@ void migrate_disable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic()) {
++ if (in_atomic() || irqs_disabled()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic++;
+ #endif
+@@ -3105,7 +3105,7 @@ void migrate_enable(void)
+ {
+ struct task_struct *p = current;
+
+- if (in_atomic()) {
++ if (in_atomic() || irqs_disabled()) {
+ #ifdef CONFIG_SCHED_DEBUG
+ p->migrate_disable_atomic--;
+ #endif
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 5658b8772de8..03daa83a38f1 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -9,13 +9,12 @@ Cc: stable-rt@vger.kernel.org
Reported-by: Yang Shi <yang.shi@linaro.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/events/core.c | 1 +
+ kernel/events/core.c | 1 +
1 file changed, 1 insertion(+)
-diff --git a/kernel/events/core.c b/kernel/events/core.c
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+@@ -802,6 +802,7 @@ static void __perf_mux_hrtimer_init(stru
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/kernel-softirq-unlock-with-irqs-on.patch b/patches/kernel-softirq-unlock-with-irqs-on.patch
new file mode 100644
index 000000000000..a54b5cb01efc
--- /dev/null
+++ b/patches/kernel-softirq-unlock-with-irqs-on.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 9 Feb 2016 18:17:18 +0100
+Subject: kernel: softirq: unlock with irqs on
+
+We unlock the lock while the interrupts are off. This isn't a problem
+now but will get because the migrate_disable() + enable are not
+symmetrical in regard to the status of interrupts.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -549,8 +549,10 @@ static void do_current_softirqs(void)
+ do_single_softirq(i);
+ }
+ softirq_clr_runner(i);
+- unlock_softirq(i);
+ WARN_ON(current->softirq_nestcnt != 1);
++ local_irq_enable();
++ unlock_softirq(i);
++ local_irq_disable();
+ }
+ }
+
diff --git a/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch b/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
new file mode 100644
index 000000000000..71d204ddd5d0
--- /dev/null
+++ b/patches/kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
@@ -0,0 +1,152 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Feb 2016 18:25:16 +0100
+Subject: kernel/stop_machine: partly revert "stop_machine: Use raw
+ spinlocks"
+
+With completion using swait and so rawlocks we don't need this anymore.
+Further, bisect thinks this patch is responsible for:
+
+|BUG: unable to handle kernel NULL pointer dereference at (null)
+|IP: [<ffffffff81082123>] sched_cpu_active+0x53/0x70
+|PGD 0
+|Oops: 0000 [#1] PREEMPT SMP DEBUG_PAGEALLOC
+|Dumping ftrace buffer:
+| (ftrace buffer empty)
+|Modules linked in:
+|CPU: 1 PID: 0 Comm: swapper/1 Not tainted 4.4.1+ #330
+|Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS Debian-1.8.2-1 04/01/2014
+|task: ffff88013ae64b00 ti: ffff88013ae74000 task.ti: ffff88013ae74000
+|RIP: 0010:[<ffffffff81082123>] [<ffffffff81082123>] sched_cpu_active+0x53/0x70
+|RSP: 0000:ffff88013ae77eb8 EFLAGS: 00010082
+|RAX: 0000000000000001 RBX: ffffffff81c2cf20 RCX: 0000001050fb52fb
+|RDX: 0000001050fb52fb RSI: 000000105117ca1e RDI: 00000000001c7723
+|RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000001
+|R10: 0000000000000000 R11: 0000000000000001 R12: 00000000ffffffff
+|R13: ffffffff81c2cee0 R14: 0000000000000000 R15: 0000000000000001
+|FS: 0000000000000000(0000) GS:ffff88013b200000(0000) knlGS:0000000000000000
+|CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+|CR2: 0000000000000000 CR3: 0000000001c09000 CR4: 00000000000006e0
+|Stack:
+| ffffffff810c446d ffff88013ae77f00 ffffffff8107d8dd 000000000000000a
+| 0000000000000001 0000000000000000 0000000000000000 0000000000000000
+| 0000000000000000 ffff88013ae77f10 ffffffff8107d90e ffff88013ae77f20
+|Call Trace:
+| [<ffffffff810c446d>] ? debug_lockdep_rcu_enabled+0x1d/0x20
+| [<ffffffff8107d8dd>] ? notifier_call_chain+0x5d/0x80
+| [<ffffffff8107d90e>] ? __raw_notifier_call_chain+0xe/0x10
+| [<ffffffff810598a3>] ? cpu_notify+0x23/0x40
+| [<ffffffff8105a7b8>] ? notify_cpu_starting+0x28/0x30
+
+during hotplug. The rawlocks need to remain however.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/stop_machine.c | 40 ++++++++--------------------------------
+ 1 file changed, 8 insertions(+), 32 deletions(-)
+
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -30,7 +30,7 @@ struct cpu_stop_done {
+ atomic_t nr_todo; /* nr left to execute */
+ bool executed; /* actually executed? */
+ int ret; /* collected return value */
+- struct task_struct *waiter; /* woken when nr_todo reaches 0 */
++ struct completion completion; /* fired if nr_todo reaches 0 */
+ };
+
+ /* the actual stopper, one per every possible cpu, enabled on online cpus */
+@@ -59,7 +59,7 @@ static void cpu_stop_init_done(struct cp
+ {
+ memset(done, 0, sizeof(*done));
+ atomic_set(&done->nr_todo, nr_todo);
+- done->waiter = current;
++ init_completion(&done->completion);
+ }
+
+ /* signal completion unless @done is NULL */
+@@ -68,10 +68,8 @@ static void cpu_stop_signal_done(struct
+ if (done) {
+ if (executed)
+ done->executed = true;
+- if (atomic_dec_and_test(&done->nr_todo)) {
+- wake_up_process(done->waiter);
+- done->waiter = NULL;
+- }
++ if (atomic_dec_and_test(&done->nr_todo))
++ complete(&done->completion);
+ }
+ }
+
+@@ -96,22 +94,6 @@ static void cpu_stop_queue_work(unsigned
+ raw_spin_unlock_irqrestore(&stopper->lock, flags);
+ }
+
+-static void wait_for_stop_done(struct cpu_stop_done *done)
+-{
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- while (atomic_read(&done->nr_todo)) {
+- schedule();
+- set_current_state(TASK_UNINTERRUPTIBLE);
+- }
+- /*
+- * We need to wait until cpu_stop_signal_done() has cleared
+- * done->waiter.
+- */
+- while (done->waiter)
+- cpu_relax();
+- set_current_state(TASK_RUNNING);
+-}
+-
+ /**
+ * stop_one_cpu - stop a cpu
+ * @cpu: cpu to stop
+@@ -143,7 +125,7 @@ int stop_one_cpu(unsigned int cpu, cpu_s
+
+ cpu_stop_init_done(&done, 1);
+ cpu_stop_queue_work(cpu, &work);
+- wait_for_stop_done(&done);
++ wait_for_completion(&done.completion);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -302,7 +284,7 @@ int stop_two_cpus(unsigned int cpu1, uns
+
+ preempt_enable_nort();
+
+- wait_for_stop_done(&done);
++ wait_for_completion(&done.completion);
+
+ return done.executed ? done.ret : -ENOENT;
+ }
+@@ -364,7 +346,7 @@ static int __stop_cpus(const struct cpum
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+ queue_stop_cpus_work(cpumask, fn, arg, &done, false);
+- wait_for_stop_done(&done);
++ wait_for_completion(&done.completion);
+ return done.executed ? done.ret : -ENOENT;
+ }
+
+@@ -495,13 +477,7 @@ static void cpu_stopper_thread(unsigned
+ kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL,
+ ksym_buf), arg);
+
+- /*
+- * Make sure that the wakeup and setting done->waiter
+- * to NULL is atomic.
+- */
+- local_irq_disable();
+ cpu_stop_signal_done(done, true);
+- local_irq_enable();
+ goto repeat;
+ }
+ }
+@@ -663,7 +639,7 @@ int stop_machine_from_inactive_cpu(cpu_s
+ ret = multi_cpu_stop(&msdata);
+
+ /* Busy wait for completion. */
+- while (atomic_read(&done.nr_todo))
++ while (!completion_done(&done.completion))
+ cpu_relax();
+
+ mutex_unlock(&stop_cpus_mutex);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 5443ec26c1a5..109437949aa9 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -1,4 +1,4 @@
-Subject: v4.4.1-rt5
+Subject: v4.4.1-rt6
From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 08 Jul 2011 20:25:16 +0200
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt6
diff --git a/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
new file mode 100644
index 000000000000..3c210118ea66
--- /dev/null
+++ b/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -0,0 +1,41 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 5 Feb 2016 12:17:14 +0100
+Subject: mm: backing-dev: don't disable IRQs in wb_congested_put()
+
+it triggers:
+|BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:930
+|in_atomic(): 0, irqs_disabled(): 1, pid: 12, name: rcuc/0
+|1 lock held by rcuc/0/12:
+| #0: (rcu_callback){......}, at: [<ffffffff810ce1a6>] rcu_cpu_kthread+0x376/0xb10
+|irq event stamp: 23636
+|hardirqs last enabled at (23635): [<ffffffff8173524c>] _raw_spin_unlock_irqrestore+0x6c/0x80
+|hardirqs last disabled at (23636): [<ffffffff81173918>] wb_congested_put+0x18/0x90
+| [<ffffffff81735434>] rt_spin_lock+0x24/0x60
+| [<ffffffff810afed2>] atomic_dec_and_spin_lock+0x52/0x90
+| [<ffffffff81173928>] wb_congested_put+0x28/0x90
+| [<ffffffff813b833e>] __blkg_release_rcu+0x5e/0x1e0
+| [<ffffffff813b8367>] ? __blkg_release_rcu+0x87/0x1e0
+| [<ffffffff813b82e0>] ? blkg_conf_finish+0x90/0x90
+| [<ffffffff810ce1e7>] rcu_cpu_kthread+0x3b7/0xb10
+
+due to cgwb_lock beeing taken with spin_lock_irqsave() usually.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/backing-dev.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/mm/backing-dev.c
++++ b/mm/backing-dev.c
+@@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeba
+ {
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_irq_save_nort(flags);
+ if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
+- local_irq_restore(flags);
++ local_irq_restore_nort(flags);
+ return;
+ }
+
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 0bfee01cd8cb..4fdad34ddfe2 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void raise_softirq(unsigned int nr);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -673,6 +673,27 @@ void __raise_softirq_irqoff(unsigned int
+@@ -675,6 +675,27 @@ void __raise_softirq_irqoff(unsigned int
}
/*
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index fc1c537d4576..73cf44e16818 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2956,18 +2961,17 @@ static void
+@@ -2960,18 +2965,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2979,18 +2983,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2983,18 +2987,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4609,7 +4700,6 @@ void __init rcu_init(void)
+@@ -4615,7 +4706,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
@@ -178,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -563,12 +563,10 @@ extern struct rcu_state rcu_bh_state;
+@@ -565,12 +565,10 @@ extern struct rcu_state rcu_bh_state;
extern struct rcu_state rcu_preempt_state;
#endif /* #ifdef CONFIG_PREEMPT_RCU */
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -588,10 +586,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -590,10 +588,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
diff --git a/patches/rcu-disable-more-spots-of-rcu_bh.patch b/patches/rcu-disable-more-spots-of-rcu_bh.patch
new file mode 100644
index 000000000000..3697ffc546f5
--- /dev/null
+++ b/patches/rcu-disable-more-spots-of-rcu_bh.patch
@@ -0,0 +1,63 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Feb 2016 18:30:56 +0100
+Subject: rcu: disable more spots of rcu_bh
+
+We don't use ru_bh on -RT but we still fork a thread for it and keep it
+as a flavour. No more.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/tree.c | 6 ++++++
+ kernel/rcu/tree.h | 2 ++
+ 2 files changed, 8 insertions(+)
+
+--- a/kernel/rcu/tree.c
++++ b/kernel/rcu/tree.c
+@@ -449,11 +449,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+ /*
+ * Return the number of RCU BH batches started thus far for debug & stats.
+ */
++#ifndef CONFIG_PREEMPT_RT_FULL
+ unsigned long rcu_batches_started_bh(void)
+ {
+ return rcu_bh_state.gpnum;
+ }
+ EXPORT_SYMBOL_GPL(rcu_batches_started_bh);
++#endif
+
+ /*
+ * Return the number of RCU batches completed thus far for debug & stats.
+@@ -558,9 +560,11 @@ void rcutorture_get_gp_data(enum rcutort
+ case RCU_FLAVOR:
+ rsp = rcu_state_p;
+ break;
++#ifndef CONFIG_PREEMPT_RT_FULL
+ case RCU_BH_FLAVOR:
+ rsp = &rcu_bh_state;
+ break;
++#endif
+ case RCU_SCHED_FLAVOR:
+ rsp = &rcu_sched_state;
+ break;
+@@ -4604,7 +4608,9 @@ void __init rcu_init(void)
+
+ rcu_bootup_announce();
+ rcu_init_geometry();
++#ifndef CONFIG_PREEMPT_RT_FULL
+ rcu_init_one(&rcu_bh_state, &rcu_bh_data);
++#endif
+ rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+ if (dump_tree)
+ rcu_dump_rcu_node_tree(&rcu_sched_state);
+--- a/kernel/rcu/tree.h
++++ b/kernel/rcu/tree.h
+@@ -556,7 +556,9 @@ extern struct list_head rcu_struct_flavo
+ */
+ extern struct rcu_state rcu_sched_state;
+
++#ifndef CONFIG_PREEMPT_RT_FULL
+ extern struct rcu_state rcu_bh_state;
++#endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+ extern struct rcu_state rcu_preempt_state;
diff --git a/patches/rcu-more-swait-conversions.patch b/patches/rcu-more-swait-conversions.patch
index 1b7e5657972b..779a02922c54 100644
--- a/patches/rcu-more-swait-conversions.patch
+++ b/patches/rcu-more-swait-conversions.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -1633,7 +1633,7 @@ static void rcu_gp_kthread_wake(struct r
+@@ -1637,7 +1637,7 @@ static void rcu_gp_kthread_wake(struct r
!READ_ONCE(rsp->gp_flags) ||
!rsp->gp_kthread)
return;
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2098,7 +2098,7 @@ static int __noreturn rcu_gp_kthread(voi
+@@ -2102,7 +2102,7 @@ static int __noreturn rcu_gp_kthread(voi
READ_ONCE(rsp->gpnum),
TPS("reqwait"));
rsp->gp_state = RCU_GP_WAIT_GPS;
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
READ_ONCE(rsp->gp_flags) &
RCU_GP_FLAG_INIT);
rsp->gp_state = RCU_GP_DONE_GPS;
-@@ -2128,7 +2128,7 @@ static int __noreturn rcu_gp_kthread(voi
+@@ -2132,7 +2132,7 @@ static int __noreturn rcu_gp_kthread(voi
READ_ONCE(rsp->gpnum),
TPS("fqswait"));
rsp->gp_state = RCU_GP_WAIT_FQS;
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_gp_fqs_check_wake(rsp, &gf), j);
rsp->gp_state = RCU_GP_DOING_FQS;
/* Locking provides needed memory barriers. */
-@@ -3550,7 +3550,7 @@ static void __rcu_report_exp_rnp(struct
+@@ -3554,7 +3554,7 @@ static void __rcu_report_exp_rnp(struct
raw_spin_unlock_irqrestore(&rnp->lock, flags);
if (wake) {
smp_mb(); /* EGP done before wake_up(). */
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
break;
}
-@@ -3807,7 +3807,7 @@ static void synchronize_sched_expedited_
+@@ -3811,7 +3811,7 @@ static void synchronize_sched_expedited_
jiffies_start = jiffies;
for (;;) {
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rsp->expedited_wq,
sync_rcu_preempt_exp_done(rnp_root),
jiffies_stall);
-@@ -3815,7 +3815,7 @@ static void synchronize_sched_expedited_
+@@ -3819,7 +3819,7 @@ static void synchronize_sched_expedited_
return;
if (ret < 0) {
/* Hit a signal, disable CPU stall warnings. */
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sync_rcu_preempt_exp_done(rnp_root));
return;
}
-@@ -4483,8 +4483,8 @@ static void __init rcu_init_one(struct r
+@@ -4487,8 +4487,8 @@ static void __init rcu_init_one(struct r
}
}
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index 776d49b31294..2d61aa2ac708 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -27,12 +27,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/futex.c | 10
kernel/locking/Makefile | 9
kernel/locking/rt.c | 476 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 423 +++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex.c | 422 +++++++++++++++++++++++++++++++--
kernel/locking/rtmutex_common.h | 14 +
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
kernel/sched/core.c | 7
- 23 files changed, 1598 insertions(+), 56 deletions(-)
+ 23 files changed, 1597 insertions(+), 56 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -924,7 +924,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -1268,7 +1269,8 @@ static int wake_futex_pi(u32 __user *uad
- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q,
@@ -1477,7 +1477,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -354,6 +359,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -356,6 +361,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -1492,7 +1492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -661,13 +674,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -662,13 +675,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -1508,10 +1508,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ lock_top_waiter = rt_mutex_top_waiter(lock);
+ if (prerequeue_top_waiter != lock_top_waiter)
+ rt_mutex_wake_waiter(lock_top_waiter);
- raw_spin_unlock(&lock->wait_lock);
+ raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -760,6 +776,25 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -761,6 +777,25 @@ static int rt_mutex_adjust_prio_chain(st
return ret;
}
@@ -1537,7 +1537,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Try to take an rt-mutex
*
-@@ -770,8 +805,9 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -771,8 +806,9 @@ static int rt_mutex_adjust_prio_chain(st
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
@@ -1547,9 +1547,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct task_struct *task,
+ struct rt_mutex_waiter *waiter, int mode)
{
- unsigned long flags;
-
-@@ -810,8 +846,10 @@ static int try_to_take_rt_mutex(struct r
+ /*
+ * Before testing whether we can acquire @lock, we set the
+@@ -809,8 +845,10 @@ static int try_to_take_rt_mutex(struct r
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
@@ -1561,7 +1561,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
-@@ -829,14 +867,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -828,14 +866,10 @@ static int try_to_take_rt_mutex(struct r
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
@@ -1579,7 +1579,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -885,6 +919,315 @@ static int try_to_take_rt_mutex(struct r
+@@ -884,6 +918,314 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -1642,9 +1642,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+#endif
+
-+# define pi_lock(lock) raw_spin_lock_irq(lock)
-+# define pi_unlock(lock) raw_spin_unlock_irq(lock)
-+
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+ struct task_struct *task,
@@ -1660,14 +1657,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+{
+ struct task_struct *lock_owner, *self = current;
+ struct rt_mutex_waiter waiter, *top_waiter;
++ unsigned long flags;
+ int ret;
+
+ rt_mutex_init_waiter(&waiter, true);
+
-+ raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return;
+ }
+
@@ -1679,10 +1677,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * as well. We are serialized via pi_lock against wakeups. See
+ * try_to_wake_up().
+ */
-+ pi_lock(&self->pi_lock);
++ raw_spin_lock(&self->pi_lock);
+ self->saved_state = self->state;
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-+ pi_unlock(&self->pi_lock);
++ raw_spin_unlock(&self->pi_lock);
+
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+ BUG_ON(ret);
@@ -1695,18 +1693,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ top_waiter = rt_mutex_top_waiter(lock);
+ lock_owner = rt_mutex_owner(lock);
+
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
+ schedule();
+
-+ raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
-+ pi_lock(&self->pi_lock);
++ raw_spin_lock(&self->pi_lock);
+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
-+ pi_unlock(&self->pi_lock);
++ raw_spin_unlock(&self->pi_lock);
+ }
+
+ /*
@@ -1716,10 +1714,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * happened while we were blocked. Clear saved_state so
+ * try_to_wakeup() does not get confused.
+ */
-+ pi_lock(&self->pi_lock);
++ raw_spin_lock(&self->pi_lock);
+ __set_current_state_no_track(self->saved_state);
+ self->saved_state = TASK_RUNNING;
-+ pi_unlock(&self->pi_lock);
++ raw_spin_unlock(&self->pi_lock);
+
+ /*
+ * try_to_take_rt_mutex() sets the waiter bit
@@ -1730,7 +1728,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
+
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ debug_rt_mutex_free_waiter(&waiter);
+}
@@ -1743,10 +1741,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ */
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
++ unsigned long flags;
+ WAKE_Q(wake_q);
+ WAKE_Q(wake_sleeper_q);
+
-+ raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+ debug_rt_mutex_unlock(lock);
+
@@ -1754,13 +1753,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+ if (!rt_mutex_has_waiters(lock)) {
+ lock->owner = NULL;
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return;
+ }
+
+ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock);
+
-+ raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ wake_up_q(&wake_q);
+ wake_up_q_sleeper(&wake_sleeper_q);
+
@@ -1895,17 +1894,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -998,6 +1341,7 @@ static int task_blocks_on_rt_mutex(struc
- * Called with lock->wait_lock held.
+@@ -996,6 +1338,7 @@ static int task_blocks_on_rt_mutex(struc
+ * Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q,
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1027,7 +1371,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1024,7 +1367,10 @@ static void mark_wakeup_next_waiter(stru
- raw_spin_unlock_irqrestore(&current->pi_lock, flags);
+ raw_spin_unlock(&current->pi_lock);
- wake_q_add(wake_q, waiter->task);
+ if (waiter->savestate)
@@ -1915,7 +1914,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1109,11 +1456,11 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1105,11 +1451,11 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -1928,8 +1927,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
next_lock, NULL, task);
}
-@@ -1199,9 +1546,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
- struct rt_mutex_waiter waiter;
+@@ -1196,9 +1542,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ unsigned long flags;
int ret = 0;
- debug_rt_mutex_init_waiter(&waiter);
@@ -1937,9 +1936,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- RB_CLEAR_NODE(&waiter.tree_entry);
+ rt_mutex_init_waiter(&waiter, false);
- raw_spin_lock(&lock->wait_lock);
-
-@@ -1286,7 +1631,8 @@ static inline int rt_mutex_slowtrylock(s
+ /*
+ * Technically we could use raw_spin_[un]lock_irq() here, but this can
+@@ -1292,7 +1636,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to undo a potential priority boosting.
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -1947,18 +1946,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q)
{
- raw_spin_lock(&lock->wait_lock);
+ unsigned long flags;
-@@ -1339,7 +1685,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1348,7 +1693,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
- mark_wakeup_next_waiter(wake_q, lock);
+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock);
- raw_spin_unlock(&lock->wait_lock);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1396,17 +1742,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1405,17 +1750,20 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -1981,7 +1980,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Undo pi boosting if necessary: */
if (deboost)
-@@ -1543,13 +1892,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1552,13 +1900,14 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
* required or not.
*/
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1998,7 +1997,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1582,13 +1932,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1591,13 +1940,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -2013,7 +2012,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1603,7 +1952,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1612,7 +1960,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -2022,7 +2021,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
rt_mutex_deadlock_account_lock(lock, proxy_owner);
-@@ -1765,3 +2114,25 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1774,3 +2122,25 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/rt-locking-Reenable-migration-accross-schedule.patch b/patches/rt-locking-Reenable-migration-accross-schedule.patch
new file mode 100644
index 000000000000..f552a2d58bb1
--- /dev/null
+++ b/patches/rt-locking-Reenable-migration-accross-schedule.patch
@@ -0,0 +1,111 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 8 Feb 2016 16:15:28 +0100
+Subject: rt/locking: Reenable migration accross schedule
+
+We currently disable migration across lock acquisition. That includes the part
+where we block on the lock and schedule out. We cannot disable migration after
+taking the lock as that would cause a possible lock inversion.
+
+But we can be smart and enable migration when we block and schedule out. That
+allows the scheduler to place the task freely at least if this is the first
+migrate disable level. For nested locking this does not help at all.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/locking/rtmutex.c | 32 ++++++++++++++++++++------------
+ 1 file changed, 20 insertions(+), 12 deletions(-)
+
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -924,14 +924,19 @@ static int __try_to_take_rt_mutex(struct
+ * preemptible spin_lock functions:
+ */
+ static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
+- void (*slowfn)(struct rt_mutex *lock))
++ void (*slowfn)(struct rt_mutex *lock,
++ bool mg_off),
++ bool do_mig_dis)
+ {
+ might_sleep_no_state_check();
+
++ if (do_mig_dis)
++ migrate_disable();
++
+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
+ rt_mutex_deadlock_account_lock(lock, current);
+ else
+- slowfn(lock);
++ slowfn(lock, do_mig_dis);
+ }
+
+ static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+@@ -989,7 +994,8 @@ static int task_blocks_on_rt_mutex(struc
+ * We store the current state under p->pi_lock in p->saved_state and
+ * the try_to_wake_up() code handles this accordingly.
+ */
+-static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock,
++ bool mg_off)
+ {
+ struct task_struct *lock_owner, *self = current;
+ struct rt_mutex_waiter waiter, *top_waiter;
+@@ -1033,8 +1039,13 @@ static void noinline __sched rt_spin_lo
+
+ debug_rt_mutex_print_deadlock(&waiter);
+
+- if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) {
++ if (mg_off)
++ migrate_enable();
+ schedule();
++ if (mg_off)
++ migrate_disable();
++ }
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+@@ -1105,38 +1116,35 @@ static void noinline __sched rt_spin_lo
+
+ void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
+ {
+- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ }
+ EXPORT_SYMBOL(rt_spin_lock__no_mg);
+
+ void __lockfunc rt_spin_lock(spinlock_t *lock)
+ {
+- migrate_disable();
+- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ }
+ EXPORT_SYMBOL(rt_spin_lock);
+
+ void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+ {
+- migrate_disable();
+- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true);
+ }
+ EXPORT_SYMBOL(__rt_spin_lock);
+
+ void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
+ {
+- rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false);
+ }
+ EXPORT_SYMBOL(__rt_spin_lock__no_mg);
+
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+ {
+- migrate_disable();
+- rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true);
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nested);
+ #endif
diff --git a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
index 4da5c7acc354..7223153e73cf 100644
--- a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
+++ b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully
@@ -21,7 +21,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1617,7 +1617,7 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1626,7 +1626,7 @@ int rt_mutex_start_proxy_lock(struct rt_
ret = 0;
}
@@ -29,4 +29,4 @@ Cc: stable-rt@vger.kernel.org
+ if (ret && rt_mutex_has_waiters(lock))
remove_waiter(lock, waiter);
- raw_spin_unlock(&lock->wait_lock);
+ raw_spin_unlock_irq(&lock->wait_lock);
diff --git a/patches/rtmutex-Make-wait_lock-irq-safe.patch b/patches/rtmutex-Make-wait_lock-irq-safe.patch
new file mode 100644
index 000000000000..7befb3baf979
--- /dev/null
+++ b/patches/rtmutex-Make-wait_lock-irq-safe.patch
@@ -0,0 +1,597 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 13 Jan 2016 11:25:38 +0100
+Subject: rtmutex: Make wait_lock irq safe
+
+Sasha reported a lockdep splat about a potential deadlock between RCU boosting
+rtmutex and the posix timer it_lock.
+
+CPU0 CPU1
+
+rtmutex_lock(&rcu->rt_mutex)
+ spin_lock(&rcu->rt_mutex.wait_lock)
+ local_irq_disable()
+ spin_lock(&timer->it_lock)
+ spin_lock(&rcu->mutex.wait_lock)
+--> Interrupt
+ spin_lock(&timer->it_lock)
+
+This is caused by the following code sequence on CPU1
+
+ rcu_read_lock()
+ x = lookup();
+ if (x)
+ spin_lock_irqsave(&x->it_lock);
+ rcu_read_unlock();
+ return x;
+
+We could fix that in the posix timer code by keeping rcu read locked across
+the spinlocked and irq disabled section, but the above sequence is common and
+there is no reason not to support it.
+
+Taking rt_mutex.wait_lock irq safe prevents the deadlock.
+
+Reported-by: Sasha Levin <sasha.levin@oracle.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Paul McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/futex.c | 18 +++---
+ kernel/locking/rtmutex.c | 135 +++++++++++++++++++++++++----------------------
+ 2 files changed, 81 insertions(+), 72 deletions(-)
+
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -1223,7 +1223,7 @@ static int wake_futex_pi(u32 __user *uad
+ if (pi_state->owner != current)
+ return -EINVAL;
+
+- raw_spin_lock(&pi_state->pi_mutex.wait_lock);
++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
+ new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
+
+ /*
+@@ -1249,22 +1249,22 @@ static int wake_futex_pi(u32 __user *uad
+ else if (curval != uval)
+ ret = -EINVAL;
+ if (ret) {
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+ return ret;
+ }
+
+- raw_spin_lock_irq(&pi_state->owner->pi_lock);
++ raw_spin_lock(&pi_state->owner->pi_lock);
+ WARN_ON(list_empty(&pi_state->list));
+ list_del_init(&pi_state->list);
+- raw_spin_unlock_irq(&pi_state->owner->pi_lock);
++ raw_spin_unlock(&pi_state->owner->pi_lock);
+
+- raw_spin_lock_irq(&new_owner->pi_lock);
++ raw_spin_lock(&new_owner->pi_lock);
+ WARN_ON(!list_empty(&pi_state->list));
+ list_add(&pi_state->list, &new_owner->pi_state_list);
+ pi_state->owner = new_owner;
+- raw_spin_unlock_irq(&new_owner->pi_lock);
++ raw_spin_unlock(&new_owner->pi_lock);
+
+- raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
+
+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
+
+@@ -2129,11 +2129,11 @@ static int fixup_owner(u32 __user *uaddr
+ * we returned due to timeout or signal without taking the
+ * rt_mutex. Too late.
+ */
+- raw_spin_lock(&q->pi_state->pi_mutex.wait_lock);
++ raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock);
+ owner = rt_mutex_owner(&q->pi_state->pi_mutex);
+ if (!owner)
+ owner = rt_mutex_next_owner(&q->pi_state->pi_mutex);
+- raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock);
++ raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock);
+ ret = fixup_pi_state_owner(uaddr, q, owner);
+ goto out;
+ }
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -99,13 +99,14 @@ static inline void mark_rt_mutex_waiters
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
++ unsigned long flags)
+ __releases(lock->wait_lock)
+ {
+ struct task_struct *owner = rt_mutex_owner(lock);
+
+ clear_rt_mutex_waiters(lock);
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ /*
+ * If a new waiter comes in between the unlock and the cmpxchg
+ * we have two situations:
+@@ -147,11 +148,12 @@ static inline void mark_rt_mutex_waiters
+ /*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
++ unsigned long flags)
+ __releases(lock->wait_lock)
+ {
+ lock->owner = NULL;
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return true;
+ }
+ #endif
+@@ -433,7 +435,6 @@ static int rt_mutex_adjust_prio_chain(st
+ int ret = 0, depth = 0;
+ struct rt_mutex *lock;
+ bool detect_deadlock;
+- unsigned long flags;
+ bool requeue = true;
+
+ detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
+@@ -476,7 +477,7 @@ static int rt_mutex_adjust_prio_chain(st
+ /*
+ * [1] Task cannot go away as we did a get_task() before !
+ */
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
++ raw_spin_lock_irq(&task->pi_lock);
+
+ /*
+ * [2] Get the waiter on which @task is blocked on.
+@@ -560,7 +561,7 @@ static int rt_mutex_adjust_prio_chain(st
+ * operations.
+ */
+ if (!raw_spin_trylock(&lock->wait_lock)) {
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock_irq(&task->pi_lock);
+ cpu_relax();
+ goto retry;
+ }
+@@ -591,7 +592,7 @@ static int rt_mutex_adjust_prio_chain(st
+ /*
+ * No requeue[7] here. Just release @task [8]
+ */
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock(&task->pi_lock);
+ put_task_struct(task);
+
+ /*
+@@ -599,14 +600,14 @@ static int rt_mutex_adjust_prio_chain(st
+ * If there is no owner of the lock, end of chain.
+ */
+ if (!rt_mutex_owner(lock)) {
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+ return 0;
+ }
+
+ /* [10] Grab the next task, i.e. owner of @lock */
+ task = rt_mutex_owner(lock);
+ get_task_struct(task);
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
++ raw_spin_lock(&task->pi_lock);
+
+ /*
+ * No requeue [11] here. We just do deadlock detection.
+@@ -621,8 +622,8 @@ static int rt_mutex_adjust_prio_chain(st
+ top_waiter = rt_mutex_top_waiter(lock);
+
+ /* [13] Drop locks */
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ /* If owner is not blocked, end of chain. */
+ if (!next_lock)
+@@ -643,7 +644,7 @@ static int rt_mutex_adjust_prio_chain(st
+ rt_mutex_enqueue(lock, waiter);
+
+ /* [8] Release the task */
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock(&task->pi_lock);
+ put_task_struct(task);
+
+ /*
+@@ -661,14 +662,14 @@ static int rt_mutex_adjust_prio_chain(st
+ */
+ if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
+ wake_up_process(rt_mutex_top_waiter(lock)->task);
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+ return 0;
+ }
+
+ /* [10] Grab the next task, i.e. the owner of @lock */
+ task = rt_mutex_owner(lock);
+ get_task_struct(task);
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
++ raw_spin_lock(&task->pi_lock);
+
+ /* [11] requeue the pi waiters if necessary */
+ if (waiter == rt_mutex_top_waiter(lock)) {
+@@ -722,8 +723,8 @@ static int rt_mutex_adjust_prio_chain(st
+ top_waiter = rt_mutex_top_waiter(lock);
+
+ /* [13] Drop the locks */
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock(&task->pi_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ /*
+ * Make the actual exit decisions [12], based on the stored
+@@ -746,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(st
+ goto again;
+
+ out_unlock_pi:
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock_irq(&task->pi_lock);
+ out_put_task:
+ put_task_struct(task);
+
+@@ -756,7 +757,7 @@ static int rt_mutex_adjust_prio_chain(st
+ /*
+ * Try to take an rt-mutex
+ *
+- * Must be called with lock->wait_lock held.
++ * Must be called with lock->wait_lock held and interrupts disabled
+ *
+ * @lock: The lock to be acquired.
+ * @task: The task which wants to acquire the lock
+@@ -766,8 +767,6 @@ static int rt_mutex_adjust_prio_chain(st
+ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+ struct rt_mutex_waiter *waiter)
+ {
+- unsigned long flags;
+-
+ /*
+ * Before testing whether we can acquire @lock, we set the
+ * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
+@@ -852,7 +851,7 @@ static int try_to_take_rt_mutex(struct r
+ * case, but conditionals are more expensive than a redundant
+ * store.
+ */
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
++ raw_spin_lock(&task->pi_lock);
+ task->pi_blocked_on = NULL;
+ /*
+ * Finish the lock acquisition. @task is the new owner. If
+@@ -861,7 +860,7 @@ static int try_to_take_rt_mutex(struct r
+ */
+ if (rt_mutex_has_waiters(lock))
+ rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock(&task->pi_lock);
+
+ takeit:
+ /* We got the lock. */
+@@ -883,7 +882,7 @@ static int try_to_take_rt_mutex(struct r
+ *
+ * Prepare waiter and propagate pi chain
+ *
+- * This must be called with lock->wait_lock held.
++ * This must be called with lock->wait_lock held and interrupts disabled
+ */
+ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+ struct rt_mutex_waiter *waiter,
+@@ -894,7 +893,6 @@ static int task_blocks_on_rt_mutex(struc
+ struct rt_mutex_waiter *top_waiter = waiter;
+ struct rt_mutex *next_lock;
+ int chain_walk = 0, res;
+- unsigned long flags;
+
+ /*
+ * Early deadlock detection. We really don't want the task to
+@@ -908,7 +906,7 @@ static int task_blocks_on_rt_mutex(struc
+ if (owner == task)
+ return -EDEADLK;
+
+- raw_spin_lock_irqsave(&task->pi_lock, flags);
++ raw_spin_lock(&task->pi_lock);
+ __rt_mutex_adjust_prio(task);
+ waiter->task = task;
+ waiter->lock = lock;
+@@ -921,12 +919,12 @@ static int task_blocks_on_rt_mutex(struc
+
+ task->pi_blocked_on = waiter;
+
+- raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock(&task->pi_lock);
+
+ if (!owner)
+ return 0;
+
+- raw_spin_lock_irqsave(&owner->pi_lock, flags);
++ raw_spin_lock(&owner->pi_lock);
+ if (waiter == rt_mutex_top_waiter(lock)) {
+ rt_mutex_dequeue_pi(owner, top_waiter);
+ rt_mutex_enqueue_pi(owner, waiter);
+@@ -941,7 +939,7 @@ static int task_blocks_on_rt_mutex(struc
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
+
+- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
++ raw_spin_unlock(&owner->pi_lock);
+ /*
+ * Even if full deadlock detection is on, if the owner is not
+ * blocked itself, we can avoid finding this out in the chain
+@@ -957,12 +955,12 @@ static int task_blocks_on_rt_mutex(struc
+ */
+ get_task_struct(owner);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
+ next_lock, waiter, task);
+
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irq(&lock->wait_lock);
+
+ return res;
+ }
+@@ -971,15 +969,14 @@ static int task_blocks_on_rt_mutex(struc
+ * Remove the top waiter from the current tasks pi waiter tree and
+ * queue it up.
+ *
+- * Called with lock->wait_lock held.
++ * Called with lock->wait_lock held and interrupts disabled.
+ */
+ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
+ struct rt_mutex *lock)
+ {
+ struct rt_mutex_waiter *waiter;
+- unsigned long flags;
+
+- raw_spin_lock_irqsave(&current->pi_lock, flags);
++ raw_spin_lock(&current->pi_lock);
+
+ waiter = rt_mutex_top_waiter(lock);
+
+@@ -1001,7 +998,7 @@ static void mark_wakeup_next_waiter(stru
+ */
+ lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
+
+- raw_spin_unlock_irqrestore(&current->pi_lock, flags);
++ raw_spin_unlock(&current->pi_lock);
+
+ wake_q_add(wake_q, waiter->task);
+ }
+@@ -1009,7 +1006,7 @@ static void mark_wakeup_next_waiter(stru
+ /*
+ * Remove a waiter from a lock and give up
+ *
+- * Must be called with lock->wait_lock held and
++ * Must be called with lock->wait_lock held and interrupts disabled. I must
+ * have just failed to try_to_take_rt_mutex().
+ */
+ static void remove_waiter(struct rt_mutex *lock,
+@@ -1018,12 +1015,11 @@ static void remove_waiter(struct rt_mute
+ bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
+ struct task_struct *owner = rt_mutex_owner(lock);
+ struct rt_mutex *next_lock;
+- unsigned long flags;
+
+- raw_spin_lock_irqsave(&current->pi_lock, flags);
++ raw_spin_lock(&current->pi_lock);
+ rt_mutex_dequeue(lock, waiter);
+ current->pi_blocked_on = NULL;
+- raw_spin_unlock_irqrestore(&current->pi_lock, flags);
++ raw_spin_unlock(&current->pi_lock);
+
+ /*
+ * Only update priority if the waiter was the highest priority
+@@ -1032,7 +1028,7 @@ static void remove_waiter(struct rt_mute
+ if (!owner || !is_top_waiter)
+ return;
+
+- raw_spin_lock_irqsave(&owner->pi_lock, flags);
++ raw_spin_lock(&owner->pi_lock);
+
+ rt_mutex_dequeue_pi(owner, waiter);
+
+@@ -1044,7 +1040,7 @@ static void remove_waiter(struct rt_mute
+ /* Store the lock on which owner is blocked or NULL */
+ next_lock = task_blocked_on_lock(owner);
+
+- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
++ raw_spin_unlock(&owner->pi_lock);
+
+ /*
+ * Don't walk the chain, if the owner task is not blocked
+@@ -1056,12 +1052,12 @@ static void remove_waiter(struct rt_mute
+ /* gets dropped in rt_mutex_adjust_prio_chain()! */
+ get_task_struct(owner);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
+ next_lock, NULL, current);
+
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irq(&lock->wait_lock);
+ }
+
+ /*
+@@ -1097,11 +1093,11 @@ void rt_mutex_adjust_pi(struct task_stru
+ * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
+ * @lock: the rt_mutex to take
+ * @state: the state the task should block in (TASK_INTERRUPTIBLE
+- * or TASK_UNINTERRUPTIBLE)
++ * or TASK_UNINTERRUPTIBLE)
+ * @timeout: the pre-initialized and started timer, or NULL for none
+ * @waiter: the pre-initialized rt_mutex_waiter
+ *
+- * lock->wait_lock must be held by the caller.
++ * Must be called with lock->wait_lock held and interrupts disabled
+ */
+ static int __sched
+ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
+@@ -1129,13 +1125,13 @@ static int __sched
+ break;
+ }
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(waiter);
+
+ schedule();
+
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irq(&lock->wait_lock);
+ set_current_state(state);
+ }
+
+@@ -1172,17 +1168,26 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ enum rtmutex_chainwalk chwalk)
+ {
+ struct rt_mutex_waiter waiter;
++ unsigned long flags;
+ int ret = 0;
+
+ debug_rt_mutex_init_waiter(&waiter);
+ RB_CLEAR_NODE(&waiter.pi_tree_entry);
+ RB_CLEAR_NODE(&waiter.tree_entry);
+
+- raw_spin_lock(&lock->wait_lock);
++ /*
++ * Technically we could use raw_spin_[un]lock_irq() here, but this can
++ * be called in early boot if the cmpxchg() fast path is disabled
++ * (debug, no architecture support). In this case we will acquire the
++ * rtmutex with lock->wait_lock held. But we cannot unconditionally
++ * enable interrupts in that early boot case. So we need to use the
++ * irqsave/restore variants.
++ */
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+ /* Try to acquire the lock again: */
+ if (try_to_take_rt_mutex(lock, current, NULL)) {
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+ return 0;
+ }
+
+@@ -1211,7 +1216,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ */
+ fixup_rt_mutex_waiters(lock);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ /* Remove pending timer: */
+ if (unlikely(timeout))
+@@ -1227,6 +1232,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ */
+ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
+ {
++ unsigned long flags;
+ int ret;
+
+ /*
+@@ -1238,10 +1244,10 @@ static inline int rt_mutex_slowtrylock(s
+ return 0;
+
+ /*
+- * The mutex has currently no owner. Lock the wait lock and
+- * try to acquire the lock.
++ * The mutex has currently no owner. Lock the wait lock and try to
++ * acquire the lock. We use irqsave here to support early boot calls.
+ */
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+ ret = try_to_take_rt_mutex(lock, current, NULL);
+
+@@ -1251,7 +1257,7 @@ static inline int rt_mutex_slowtrylock(s
+ */
+ fixup_rt_mutex_waiters(lock);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ return ret;
+ }
+@@ -1263,7 +1269,10 @@ static inline int rt_mutex_slowtrylock(s
+ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
+ struct wake_q_head *wake_q)
+ {
+- raw_spin_lock(&lock->wait_lock);
++ unsigned long flags;
++
++ /* irqsave required to support early boot calls */
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+
+ debug_rt_mutex_unlock(lock);
+
+@@ -1302,10 +1311,10 @@ static bool __sched rt_mutex_slowunlock(
+ */
+ while (!rt_mutex_has_waiters(lock)) {
+ /* Drops lock->wait_lock ! */
+- if (unlock_rt_mutex_safe(lock) == true)
++ if (unlock_rt_mutex_safe(lock, flags) == true)
+ return false;
+ /* Relock the rtmutex and try again */
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ }
+
+ /*
+@@ -1316,7 +1325,7 @@ static bool __sched rt_mutex_slowunlock(
+ */
+ mark_wakeup_next_waiter(wake_q, lock);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
+
+ /* check PI boosting */
+ return true;
+@@ -1596,10 +1605,10 @@ int rt_mutex_start_proxy_lock(struct rt_
+ {
+ int ret;
+
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irq(&lock->wait_lock);
+
+ if (try_to_take_rt_mutex(lock, task, NULL)) {
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+ return 1;
+ }
+
+@@ -1620,7 +1629,7 @@ int rt_mutex_start_proxy_lock(struct rt_
+ if (unlikely(ret))
+ remove_waiter(lock, waiter);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ debug_rt_mutex_print_deadlock(waiter);
+
+@@ -1668,7 +1677,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+ {
+ int ret;
+
+- raw_spin_lock(&lock->wait_lock);
++ raw_spin_lock_irq(&lock->wait_lock);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+
+@@ -1684,7 +1693,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+ */
+ fixup_rt_mutex_waiters(lock);
+
+- raw_spin_unlock(&lock->wait_lock);
++ raw_spin_unlock_irq(&lock->wait_lock);
+
+ return ret;
+ }
diff --git a/patches/rtmutex-Use-chainwalking-control-enum.patch b/patches/rtmutex-Use-chainwalking-control-enum.patch
index 322db42d54b0..9d45aa2dd1b0 100644
--- a/patches/rtmutex-Use-chainwalking-control-enum.patch
+++ b/patches/rtmutex-Use-chainwalking-control-enum.patch
@@ -16,9 +16,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1020,7 +1020,7 @@ static void noinline __sched rt_spin_lo
+@@ -1017,7 +1017,7 @@ static void noinline __sched rt_spin_lo
__set_current_state_no_track(TASK_UNINTERRUPTIBLE);
- pi_unlock(&self->pi_lock);
+ raw_spin_unlock(&self->pi_lock);
- ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK);
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index b279c7249395..70852937256a 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
#include "rtmutex_common.h"
-@@ -1221,6 +1222,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1219,6 +1220,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1478,7 +1513,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1473,7 +1508,8 @@ void rt_mutex_adjust_pi(struct task_stru
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1501,6 +1537,12 @@ static int __sched
+@@ -1496,6 +1532,12 @@ static int __sched
break;
}
@@ -96,10 +96,10 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+ break;
+ }
+
- raw_spin_unlock(&lock->wait_lock);
+ raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1535,13 +1577,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1530,13 +1572,90 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -190,17 +190,17 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+ struct ww_acquire_ctx *ww_ctx)
{
struct rt_mutex_waiter waiter;
- int ret = 0;
-@@ -1552,6 +1671,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ unsigned long flags;
+@@ -1556,6 +1675,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
+ if (ww_ctx)
+ ww_mutex_account_lock(lock, ww_ctx);
- raw_spin_unlock(&lock->wait_lock);
+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0;
}
-@@ -1566,13 +1687,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1570,13 +1691,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -226,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1701,31 +1832,36 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1709,31 +1840,36 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1772,7 +1908,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1780,7 +1916,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1789,7 +1925,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1797,7 +1933,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -285,7 +285,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1802,7 +1938,7 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1810,7 +1946,7 @@ int rt_mutex_timed_futex_lock(struct rt_
might_sleep();
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
@@ -294,7 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
-@@ -1821,7 +1957,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1829,7 +1965,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1845,6 +1981,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1853,6 +1989,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -311,7 +311,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2099,7 +2236,7 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2107,7 +2244,7 @@ int rt_mutex_finish_proxy_lock(struct rt
set_current_state(TASK_INTERRUPTIBLE);
/* sleep on the mutex */
@@ -320,7 +320,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
if (unlikely(ret))
remove_waiter(lock, waiter);
-@@ -2115,24 +2252,88 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -2123,24 +2260,88 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 01c724efcfd2..8907f8ccbe93 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -355,7 +360,8 @@ int max_lock_depth = 1024;
+@@ -357,7 +362,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -155,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -492,7 +498,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -493,7 +499,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -164,10 +164,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -909,6 +915,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -907,6 +913,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
- raw_spin_lock_irqsave(&task->pi_lock, flags);
+ raw_spin_lock(&task->pi_lock);
+
+ /*
+ * In the case of futex requeue PI, this will be a proxy
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ * the task if PI_WAKEUP_INPROGRESS is set.
+ */
+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
-+ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++ raw_spin_unlock(&task->pi_lock);
+ return -EAGAIN;
+ }
+
@@ -188,7 +188,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -932,7 +955,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -930,7 +953,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
__rt_mutex_adjust_prio(owner);
@@ -197,16 +197,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1017,7 +1040,7 @@ static void remove_waiter(struct rt_mute
+@@ -1014,7 +1037,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
- struct rt_mutex *next_lock;
+ struct rt_mutex *next_lock = NULL;
- unsigned long flags;
- raw_spin_lock_irqsave(&current->pi_lock, flags);
-@@ -1042,7 +1065,8 @@ static void remove_waiter(struct rt_mute
+ raw_spin_lock(&current->pi_lock);
+ rt_mutex_dequeue(lock, waiter);
+@@ -1038,7 +1061,8 @@ static void remove_waiter(struct rt_mute
__rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -214,9 +214,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (rt_mutex_real_waiter(owner->pi_blocked_on))
+ next_lock = task_blocked_on_lock(owner);
- raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+ raw_spin_unlock(&owner->pi_lock);
-@@ -1078,7 +1102,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1074,7 +1098,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
diff --git a/patches/rtmutex-lock-killable.patch b/patches/rtmutex-lock-killable.patch
index 7edd08a80e30..aa9a5956e191 100644
--- a/patches/rtmutex-lock-killable.patch
+++ b/patches/rtmutex-lock-killable.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1458,6 +1458,25 @@ int rt_mutex_timed_futex_lock(struct rt_
+@@ -1467,6 +1467,25 @@ int rt_mutex_timed_futex_lock(struct rt_
}
/**
diff --git a/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch b/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
new file mode 100644
index 000000000000..9bb1840d48d2
--- /dev/null
+++ b/patches/rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
@@ -0,0 +1,271 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 5 Feb 2016 18:26:11 +0100
+Subject: rtmutex: push down migrate_disable() into rt_spin_lock()
+
+No point in having the migrate disable/enable invocations in all the
+macro/inlines. That's just more code for no win as we do a function
+call anyway. Move it to the core code and save quite some text size.
+
+ text data bss dec filename
+11034127 3676912 14901248 29612287 vmlinux.before
+10990437 3676848 14901248 29568533 vmlinux.after
+
+~-40KiB
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/locallock.h | 6 +++---
+ include/linux/spinlock_rt.h | 25 +++++++------------------
+ kernel/cpu.c | 4 ++--
+ kernel/locking/lglock.c | 2 +-
+ kernel/locking/rt.c | 2 --
+ kernel/locking/rtmutex.c | 44 +++++++++++++++++++++++++++++++++++++++++---
+ 6 files changed, 54 insertions(+), 29 deletions(-)
+
+--- a/include/linux/locallock.h
++++ b/include/linux/locallock.h
+@@ -43,9 +43,9 @@ struct local_irq_lock {
+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
+ */
+ #ifdef CONFIG_PREEMPT_RT_FULL
+-# define spin_lock_local(lock) rt_spin_lock(lock)
+-# define spin_trylock_local(lock) rt_spin_trylock(lock)
+-# define spin_unlock_local(lock) rt_spin_unlock(lock)
++# define spin_lock_local(lock) rt_spin_lock__no_mg(lock)
++# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock)
++# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock)
+ #else
+ # define spin_lock_local(lock) spin_lock(lock)
+ # define spin_trylock_local(lock) spin_trylock(lock)
+--- a/include/linux/spinlock_rt.h
++++ b/include/linux/spinlock_rt.h
+@@ -18,6 +18,10 @@ do { \
+ __rt_spin_lock_init(slock, #slock, &__key); \
+ } while (0)
+
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock);
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock);
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock);
++
+ extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+ extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+ extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+@@ -32,20 +36,16 @@ extern int atomic_dec_and_spin_lock(atom
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
+ */
++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock);
+ extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
+ extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+ extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
+
+-#define spin_lock(lock) \
+- do { \
+- migrate_disable(); \
+- rt_spin_lock(lock); \
+- } while (0)
++#define spin_lock(lock) rt_spin_lock(lock)
+
+ #define spin_lock_bh(lock) \
+ do { \
+ local_bh_disable(); \
+- migrate_disable(); \
+ rt_spin_lock(lock); \
+ } while (0)
+
+@@ -56,24 +56,19 @@ extern int __lockfunc __rt_spin_trylock(
+ #define spin_trylock(lock) \
+ ({ \
+ int __locked; \
+- migrate_disable(); \
+ __locked = spin_do_trylock(lock); \
+- if (!__locked) \
+- migrate_enable(); \
+ __locked; \
+ })
+
+ #ifdef CONFIG_LOCKDEP
+ # define spin_lock_nested(lock, subclass) \
+ do { \
+- migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
+ #define spin_lock_bh_nested(lock, subclass) \
+ do { \
+ local_bh_disable(); \
+- migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+
+@@ -81,7 +76,6 @@ extern int __lockfunc __rt_spin_trylock(
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = 0; \
+- migrate_disable(); \
+ rt_spin_lock_nested(lock, subclass); \
+ } while (0)
+ #else
+@@ -117,16 +111,11 @@ static inline unsigned long spin_lock_tr
+ /* FIXME: we need rt_spin_lock_nest_lock */
+ #define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+
+-#define spin_unlock(lock) \
+- do { \
+- rt_spin_unlock(lock); \
+- migrate_enable(); \
+- } while (0)
++#define spin_unlock(lock) rt_spin_unlock(lock)
+
+ #define spin_unlock_bh(lock) \
+ do { \
+ rt_spin_unlock(lock); \
+- migrate_enable(); \
+ local_bh_enable(); \
+ } while (0)
+
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -127,8 +127,8 @@ struct hotplug_pcp {
+ };
+
+ #ifdef CONFIG_PREEMPT_RT_FULL
+-# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
+-# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock)
++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock)
+ #else
+ # define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+ # define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
+--- a/kernel/locking/lglock.c
++++ b/kernel/locking/lglock.c
+@@ -10,7 +10,7 @@
+ # define lg_do_unlock(l) arch_spin_unlock(l)
+ #else
+ # define lg_lock_ptr struct rt_mutex
+-# define lg_do_lock(l) __rt_spin_lock(l)
++# define lg_do_lock(l) __rt_spin_lock__no_mg(l)
+ # define lg_do_unlock(l) __rt_spin_unlock(l)
+ #endif
+ /*
+--- a/kernel/locking/rt.c
++++ b/kernel/locking/rt.c
+@@ -235,7 +235,6 @@ EXPORT_SYMBOL(rt_read_trylock);
+ void __lockfunc rt_write_lock(rwlock_t *rwlock)
+ {
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+- migrate_disable();
+ __rt_spin_lock(&rwlock->lock);
+ }
+ EXPORT_SYMBOL(rt_write_lock);
+@@ -249,7 +248,6 @@ void __lockfunc rt_read_lock(rwlock_t *r
+ * recursive read locks succeed when current owns the lock
+ */
+ if (rt_mutex_owner(lock) != current) {
+- migrate_disable();
+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+ __rt_spin_lock(lock);
+ }
+--- a/kernel/locking/rtmutex.c
++++ b/kernel/locking/rtmutex.c
+@@ -1103,8 +1103,16 @@ static void noinline __sched rt_spin_lo
+ rt_mutex_adjust_prio(current);
+ }
+
++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
++{
++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
++}
++EXPORT_SYMBOL(rt_spin_lock__no_mg);
++
+ void __lockfunc rt_spin_lock(spinlock_t *lock)
+ {
++ migrate_disable();
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+ }
+@@ -1112,24 +1120,41 @@ EXPORT_SYMBOL(rt_spin_lock);
+
+ void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+ {
++ migrate_disable();
+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+ }
+ EXPORT_SYMBOL(__rt_spin_lock);
+
++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock)
++{
++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
++}
++EXPORT_SYMBOL(__rt_spin_lock__no_mg);
++
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+ {
++ migrate_disable();
+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+ }
+ EXPORT_SYMBOL(rt_spin_lock_nested);
+ #endif
+
++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock)
++{
++ /* NOTE: we always pass in '1' for nested, for simplicity */
++ spin_release(&lock->dep_map, 1, _RET_IP_);
++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++}
++EXPORT_SYMBOL(rt_spin_unlock__no_mg);
++
+ void __lockfunc rt_spin_unlock(spinlock_t *lock)
+ {
+ /* NOTE: we always pass in '1' for nested, for simplicity */
+ spin_release(&lock->dep_map, 1, _RET_IP_);
+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(rt_spin_unlock);
+
+@@ -1156,12 +1181,27 @@ int __lockfunc __rt_spin_trylock(struct
+ return rt_mutex_trylock(lock);
+ }
+
++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock)
++{
++ int ret;
++
++ ret = rt_mutex_trylock(&lock->lock);
++ if (ret)
++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ return ret;
++}
++EXPORT_SYMBOL(rt_spin_trylock__no_mg);
++
+ int __lockfunc rt_spin_trylock(spinlock_t *lock)
+ {
+- int ret = rt_mutex_trylock(&lock->lock);
++ int ret;
+
++ migrate_disable();
++ ret = rt_mutex_trylock(&lock->lock);
+ if (ret)
+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
++ else
++ migrate_enable();
+ return ret;
+ }
+ EXPORT_SYMBOL(rt_spin_trylock);
+@@ -1200,12 +1240,10 @@ int atomic_dec_and_spin_lock(atomic_t *a
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+- migrate_disable();
+ rt_spin_lock(lock);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ rt_spin_unlock(lock);
+- migrate_enable();
+ return 0;
+ }
+ EXPORT_SYMBOL(atomic_dec_and_spin_lock);
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/rtmutex-trylock-is-okay-on-RT.patch
index 55d58870f389..7f5cce537744 100644
--- a/patches/rtmutex-trylock-is-okay-on-RT.patch
+++ b/patches/rtmutex-trylock-is-okay-on-RT.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1469,7 +1469,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1478,7 +1478,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
diff --git a/patches/series b/patches/series
index 389bac0609f9..d74c34bd5902 100644
--- a/patches/series
+++ b/patches/series
@@ -5,6 +5,8 @@
############################################################
# UPSTREAM changes queued
############################################################
+rtmutex-Make-wait_lock-irq-safe.patch
+arm64-replace-read_lock-to-rcu-lock-in-call_step_hoo.patch
############################################################
# UPSTREAM FIXES, patches pending
@@ -234,6 +236,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch
mm-page-alloc-use-local-lock-on-target-cpu.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
+mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
# RADIX TREE
radix-tree-rt-aware.patch
@@ -299,6 +302,8 @@ tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
softirq-preempt-fix-3-re.patch
softirq-disable-softirq-stacks-for-rt.patch
softirq-split-locks.patch
+kernel-softirq-unlock-with-irqs-on.patch
+kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
softirq-split-timer-softirqs-out-of-ksoftirqd.patch
rtmutex-trylock-is-okay-on-RT.patch
@@ -334,6 +339,7 @@ peter_zijlstra-frob-rcu.patch
rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+rcu-disable-more-spots-of-rcu_bh.patch
# LGLOCKS - lovely
lglocks-rt.patch
@@ -341,6 +347,7 @@ lglocks-rt.patch
# STOP machine (depend on lglock & rtmutex)
stomp-machine-create-lg_global_trylock_relax-primiti.patch
stomp-machine-use-lg_global_trylock_relax-to-dead-wi.patch
+kernel-stop_machine-partly-revert-stop_machine-Use-r.patch
# DRIVERS SERIAL
drivers-tty-fix-omap-lock-crap.patch
@@ -445,6 +452,8 @@ powerpc-ps3-device-init.c-adapt-to-completions-using.patch
arm-at91-tclib-default-to-tclib-timer-for-rt.patch
arm-unwind-use_raw_lock.patch
ARM-enable-irq-in-translation-section-permission-fau.patch
+genirq-update-irq_set_irqchip_state-documentation.patch
+KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
# ARM64
arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -506,6 +515,9 @@ kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
cpu_down_move_migrate_enable_back.patch
hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+rtmutex-push-down-migrate_disable-into-rt_spin_lock.patch
+rt-locking-Reenable-migration-accross-schedule.patch
+
# SCSCI QLA2xxx
scsi-qla2xxx-fix-bug-sleeping-function-called-from-invalid-context.patch
diff --git a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
index 213b6e0b4a57..449b78f2505d 100644
--- a/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+++ b/patches/softirq-split-timer-softirqs-out-of-ksoftirqd.patch
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* !PREEMPT_RT_FULL */
-@@ -618,8 +632,12 @@ void thread_do_softirq(void)
+@@ -620,8 +634,12 @@ void thread_do_softirq(void)
static void do_raise_softirq_irqoff(unsigned int nr)
{
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are not in a hard interrupt and inside a bh disabled
-@@ -628,16 +646,30 @@ static void do_raise_softirq_irqoff(unsi
+@@ -630,16 +648,30 @@ static void do_raise_softirq_irqoff(unsi
* delegate it to ksoftirqd.
*/
if (!in_irq() && current->softirq_nestcnt)
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -663,7 +695,7 @@ void raise_softirq_irqoff(unsigned int n
+@@ -665,7 +697,7 @@ void raise_softirq_irqoff(unsigned int n
* raise a WARN() if the condition is met.
*/
if (!current->softirq_nestcnt)
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int ksoftirqd_softirq_pending(void)
-@@ -676,22 +708,37 @@ static inline void _local_bh_enable_nort
+@@ -678,22 +710,37 @@ static inline void _local_bh_enable_nort
static inline void ksoftirqd_set_sched_params(unsigned int cpu)
{
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* PREEMPT_RT_FULL */
/*
* Enter an interrupt context.
-@@ -741,6 +788,9 @@ static inline void invoke_softirq(void)
+@@ -743,6 +790,9 @@ static inline void invoke_softirq(void)
if (__this_cpu_read(ksoftirqd) &&
__this_cpu_read(ksoftirqd)->softirqs_raised)
wakeup_softirqd();
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_irq_restore(flags);
#endif
}
-@@ -1173,17 +1223,30 @@ static struct notifier_block cpu_nfb = {
+@@ -1175,17 +1225,30 @@ static struct notifier_block cpu_nfb = {
static struct smp_hotplug_thread softirq_threads = {
.store = &ksoftirqd,
.setup = ksoftirqd_set_sched_params,
diff --git a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
index 29d7622f5aed..e6f243fc0a29 100644
--- a/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
+++ b/patches/stomp-machine-create-lg_global_trylock_relax-primiti.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1153,6 +1153,11 @@ void __lockfunc rt_spin_unlock_wait(spin
+@@ -1151,6 +1151,11 @@ void __lockfunc rt_spin_unlock_wait(spin
}
EXPORT_SYMBOL(rt_spin_unlock_wait);