summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-08-16 17:19:29 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-08-16 17:19:29 +0200
commit1b6748883c1a533f9dba8f64248f678cf8ec2f8c (patch)
treeacfc6a8d536e465c184c9723083b5028251154a6
parentef6ada466428041b65040e04320f29ec88b6db2b (diff)
downloadlinux-rt-1b6748883c1a533f9dba8f64248f678cf8ec2f8c.tar.gz
[ANNOUNCE] v5.2.9-rt3v5.2.9-rt3-patches
Dear RT folks! I'm pleased to announce the v5.2.9-rt3 patch set. Changes since v5.2.9-rt2: - The exynos5 i2c controller disabled IRQ threading as reported by Benjamin Rouxel. The hix5hd2 i2c controller did the same. - A timer related to the deadline scheduler now fires in hard-irq context. Patch by Juri Lelli. - A lock used the x86's thermal exception uses a raw_spinlock_t. Patch by Clark Williams. - The DMA-reservation code is using now a sequence lock instead a sequence counter. Yann Collette reported warnings from that area with an AMD GPU. - Two kvm related timer on arm64 expire now hard-irq context. Reported by Julien Grall, patched by Thomas Gleixner. - Lazy preemption was broken in a case on arm64, reported by Paul Thomas. While investigating another lazy-preempt bug was fixed on arm64 and x86. Known issues - rcutorture is currently broken on -RT. Reported by Juri Lelli. The delta patch against v5.2.9-rt2 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/incr/patch-5.2.9-rt2-rt3.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.2.9-rt3 The RT patch against v5.2.9 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patch-5.2.9-rt3.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.2/older/patches-5.2.9-rt3.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch46
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/arm64-preempt-Check-preemption-level-before-looking-.patch24
-rw-r--r--patches/arm64-preempt-Fixup-lazy-preempt.patch26
-rw-r--r--patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch6
-rw-r--r--patches/completion-use-simple-wait-queues.patch2
-rw-r--r--patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch268
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch8
-rw-r--r--patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch6
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch4
-rw-r--r--patches/i2c-exynos5-Remove-IRQF_ONESHOT.patch38
-rw-r--r--patches/i2c-hix5hd2-Remove-IRQF_ONESHOT.patch32
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch34
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch4
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch14
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch2
-rw-r--r--patches/mm-scatterlist-dont-disable-irqs-on-RT.patch2
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch4
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch2
-rw-r--r--patches/preempt-lazy-support.patch18
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/random-make-it-work-on-rt.patch2
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch2
-rw-r--r--patches/sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch40
-rw-r--r--patches/sched-delay-put-task.patch2
-rw-r--r--patches/sched-fair-Make-the-hrtimers-non-hard-again.patch2
-rw-r--r--patches/sched-rt-mutex-wakeup.patch2
-rw-r--r--patches/series9
-rw-r--r--patches/signal-revert-ptrace-preempt-magic.patch2
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/skbufhead-raw-lock.patch14
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch4
-rw-r--r--patches/softirq-preempt-fix-3-re.patch6
-rw-r--r--patches/thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch109
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-preempt-Check-preemption-level-before-looking-at.patch24
-rw-r--r--patches/x86-preempt-lazy.patch2
46 files changed, 711 insertions, 95 deletions
diff --git a/patches/KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch b/patches/KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch
new file mode 100644
index 000000000000..bc84f1c3d20b
--- /dev/null
+++ b/patches/KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch
@@ -0,0 +1,46 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 13 Aug 2019 14:29:41 +0200
+Subject: [PATCH] KVM: arm/arm64: Let the timer expire in hardirq context
+ on RT
+
+The timers are canceled from an preempt-notifier which is invoked with
+disabled preemption which is not allowed on PREEMPT_RT.
+The timer callback is short so in could be invoked in hard-IRQ context
+on -RT.
+
+Let the timer expire on hard-IRQ context even on -RT.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Marc Zyngier <maz@kernel.org>
+Tested-by: Julien Grall <julien.grall@arm.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ virt/kvm/arm/arch_timer.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/virt/kvm/arm/arch_timer.c
++++ b/virt/kvm/arm/arch_timer.c
+@@ -80,7 +80,7 @@ static inline bool userspace_irqchip(str
+ static void soft_timer_start(struct hrtimer *hrt, u64 ns)
+ {
+ hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns),
+- HRTIMER_MODE_ABS);
++ HRTIMER_MODE_ABS_HARD);
+ }
+
+ static void soft_timer_cancel(struct hrtimer *hrt)
+@@ -697,11 +697,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu
+ update_vtimer_cntvoff(vcpu, kvm_phys_timer_read());
+ ptimer->cntvoff = 0;
+
+- hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+ timer->bg_timer.function = kvm_bg_timer_expire;
+
+- hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+- hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
++ hrtimer_init(&vtimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
++ hrtimer_init(&ptimer->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
+ vtimer->hrtimer.function = kvm_hrtimer_expire;
+ ptimer->hrtimer.function = kvm_hrtimer_expire;
+
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 9572530dfd0d..50708ff16c22 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
-@@ -691,7 +691,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -702,7 +702,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -740,7 +740,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -751,7 +751,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -818,7 +818,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -829,7 +829,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, run, ret);
diff --git a/patches/arm64-preempt-Check-preemption-level-before-looking-.patch b/patches/arm64-preempt-Check-preemption-level-before-looking-.patch
new file mode 100644
index 000000000000..4bc416e23cb8
--- /dev/null
+++ b/patches/arm64-preempt-Check-preemption-level-before-looking-.patch
@@ -0,0 +1,24 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Aug 2019 17:08:58 +0200
+Subject: [PATCH] arm64: preempt: Check preemption level before looking at
+ lazy-preempt
+
+Before evaluating the lazy-preempt state it must be ensure that the
+preempt-count is zero.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/include/asm/preempt.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm64/include/asm/preempt.h
++++ b/arch/arm64/include/asm/preempt.h
+@@ -73,6 +73,8 @@ static inline bool __preempt_count_dec_a
+ if (!pc || !READ_ONCE(ti->preempt_count))
+ return true;
+ #ifdef CONFIG_PREEMPT_LAZY
++ if ((pc & ~PREEMPT_NEED_RESCHED))
++ return false;
+ if (current_thread_info()->preempt_lazy_count)
+ return false;
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
diff --git a/patches/arm64-preempt-Fixup-lazy-preempt.patch b/patches/arm64-preempt-Fixup-lazy-preempt.patch
new file mode 100644
index 000000000000..5e8d3fe9726e
--- /dev/null
+++ b/patches/arm64-preempt-Fixup-lazy-preempt.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 13 Aug 2019 11:53:23 +0200
+Subject: [PATCH] arm64: preempt: Fixup lazy preempt
+
+The irq-exit assmebly checks preempt count and need-resched flag
+followed by lazy-preempt status. This is a bug. It should schedule if
+preempt-count is zero _and_ need_resched is set _or_ preempt-count is
+zero and check the lazy-preempt status.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/kernel/entry.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/entry.S
++++ b/arch/arm64/kernel/entry.S
+@@ -680,7 +680,8 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
+ orr x24, x24, x0
+ alternative_else_nop_endif
+
+- cbnz x24, 2f // preempt count != 0
++ cbz x24, 1f // (need_resched + count) == 0
++ cbnz w24, 2f // count != 0
+
+ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
+ cbnz w24, 2f // preempt lazy count != 0
diff --git a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
index 47325387f4fa..3c9b152fdb8b 100644
--- a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
+++ b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -403,12 +403,21 @@ void blk_queue_exit(struct request_queue
+@@ -404,12 +404,21 @@ void blk_queue_exit(struct request_queue
percpu_ref_put(&q->q_usage_counter);
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(struct timer_list *t)
-@@ -479,6 +488,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -480,6 +489,7 @@ struct request_queue *blk_alloc_queue_no
spin_lock_init(&q->queue_lock);
init_waitqueue_head(&q->mq_freeze_wq);
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/pagemap.h>
#include <linux/backing-dev-defs.h>
#include <linux/wait.h>
-@@ -554,6 +555,7 @@ struct request_queue {
+@@ -559,6 +560,7 @@ struct request_queue {
#endif
struct rcu_head rcu_head;
wait_queue_head_t mq_freeze_wq;
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 350baf287a7a..50d193fe8299 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default:
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1705,7 +1705,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1708,7 +1708,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
diff --git a/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch b/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
new file mode 100644
index 000000000000..de5701a4622e
--- /dev/null
+++ b/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
@@ -0,0 +1,268 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Aug 2019 16:38:43 +0200
+Subject: [PATCH] dma-buf: Use seqlock_t instread disabling preemption
+
+"dma reservation" disables preemption while acquiring the write access
+for "seqcount" and then may acquire a spinlock_t.
+
+Replace the seqcount with a seqlock_t which provides seqcount like
+semantic and lock for writer.
+
+Link: https://lkml.kernel.org/r/f410b429-db86-f81c-7c67-f563fa808b62@free.fr
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/dma-buf/dma-buf.c | 8 ++--
+ drivers/dma-buf/reservation.c | 40 ++++++++---------------
+ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 +--
+ drivers/gpu/drm/i915/i915_gem.c | 10 ++---
+ include/linux/reservation.h | 4 +-
+ 5 files changed, 28 insertions(+), 40 deletions(-)
+
+--- a/drivers/dma-buf/dma-buf.c
++++ b/drivers/dma-buf/dma-buf.c
+@@ -168,7 +168,7 @@ static __poll_t dma_buf_poll(struct file
+ return 0;
+
+ retry:
+- seq = read_seqcount_begin(&resv->seq);
++ seq = read_seqbegin(&resv->seq);
+ rcu_read_lock();
+
+ fobj = rcu_dereference(resv->fence);
+@@ -177,7 +177,7 @@ static __poll_t dma_buf_poll(struct file
+ else
+ shared_count = 0;
+ fence_excl = rcu_dereference(resv->fence_excl);
+- if (read_seqcount_retry(&resv->seq, seq)) {
++ if (read_seqretry(&resv->seq, seq)) {
+ rcu_read_unlock();
+ goto retry;
+ }
+@@ -1034,12 +1034,12 @@ static int dma_buf_debug_show(struct seq
+
+ robj = buf_obj->resv;
+ while (true) {
+- seq = read_seqcount_begin(&robj->seq);
++ seq = read_seqbegin(&robj->seq);
+ rcu_read_lock();
+ fobj = rcu_dereference(robj->fence);
+ shared_count = fobj ? fobj->shared_count : 0;
+ fence = rcu_dereference(robj->fence_excl);
+- if (!read_seqcount_retry(&robj->seq, seq))
++ if (!read_seqretry(&robj->seq, seq))
+ break;
+ rcu_read_unlock();
+ }
+--- a/drivers/dma-buf/reservation.c
++++ b/drivers/dma-buf/reservation.c
+@@ -110,15 +110,13 @@ int reservation_object_reserve_shared(st
+ new->shared_count = j;
+ new->shared_max = max;
+
+- preempt_disable();
+- write_seqcount_begin(&obj->seq);
++ write_seqlock(&obj->seq);
+ /*
+ * RCU_INIT_POINTER can be used here,
+ * seqcount provides the necessary barriers
+ */
+ RCU_INIT_POINTER(obj->fence, new);
+- write_seqcount_end(&obj->seq);
+- preempt_enable();
++ write_sequnlock(&obj->seq);
+
+ if (!old)
+ return 0;
+@@ -158,8 +156,7 @@ void reservation_object_add_shared_fence
+ fobj = reservation_object_get_list(obj);
+ count = fobj->shared_count;
+
+- preempt_disable();
+- write_seqcount_begin(&obj->seq);
++ write_seqlock(&obj->seq);
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *old_fence;
+@@ -181,8 +178,7 @@ void reservation_object_add_shared_fence
+ /* pointer update must be visible before we extend the shared_count */
+ smp_store_mb(fobj->shared_count, count);
+
+- write_seqcount_end(&obj->seq);
+- preempt_enable();
++ write_sequnlock(&obj->seq);
+ }
+ EXPORT_SYMBOL(reservation_object_add_shared_fence);
+
+@@ -209,14 +205,11 @@ void reservation_object_add_excl_fence(s
+ if (fence)
+ dma_fence_get(fence);
+
+- preempt_disable();
+- write_seqcount_begin(&obj->seq);
+- /* write_seqcount_begin provides the necessary memory barrier */
++ write_seqlock(&obj->seq);
+ RCU_INIT_POINTER(obj->fence_excl, fence);
+ if (old)
+ old->shared_count = 0;
+- write_seqcount_end(&obj->seq);
+- preempt_enable();
++ write_sequnlock(&obj->seq);
+
+ /* inplace update, no shared fences */
+ while (i--)
+@@ -298,13 +291,10 @@ int reservation_object_copy_fences(struc
+ src_list = reservation_object_get_list(dst);
+ old = reservation_object_get_excl(dst);
+
+- preempt_disable();
+- write_seqcount_begin(&dst->seq);
+- /* write_seqcount_begin provides the necessary memory barrier */
++ write_seqlock(&dst->seq);
+ RCU_INIT_POINTER(dst->fence_excl, new);
+ RCU_INIT_POINTER(dst->fence, dst_list);
+- write_seqcount_end(&dst->seq);
+- preempt_enable();
++ write_sequnlock(&dst->seq);
+
+ if (src_list)
+ kfree_rcu(src_list, rcu);
+@@ -345,7 +335,7 @@ int reservation_object_get_fences_rcu(st
+ shared_count = i = 0;
+
+ rcu_read_lock();
+- seq = read_seqcount_begin(&obj->seq);
++ seq = read_seqbegin(&obj->seq);
+
+ fence_excl = rcu_dereference(obj->fence_excl);
+ if (fence_excl && !dma_fence_get_rcu(fence_excl))
+@@ -394,7 +384,7 @@ int reservation_object_get_fences_rcu(st
+ }
+ }
+
+- if (i != shared_count || read_seqcount_retry(&obj->seq, seq)) {
++ if (i != shared_count || read_seqretry(&obj->seq, seq)) {
+ while (i--)
+ dma_fence_put(shared[i]);
+ dma_fence_put(fence_excl);
+@@ -443,7 +433,7 @@ long reservation_object_wait_timeout_rcu
+
+ retry:
+ shared_count = 0;
+- seq = read_seqcount_begin(&obj->seq);
++ seq = read_seqbegin(&obj->seq);
+ rcu_read_lock();
+ i = -1;
+
+@@ -490,7 +480,7 @@ long reservation_object_wait_timeout_rcu
+
+ rcu_read_unlock();
+ if (fence) {
+- if (read_seqcount_retry(&obj->seq, seq)) {
++ if (read_seqretry(&obj->seq, seq)) {
+ dma_fence_put(fence);
+ goto retry;
+ }
+@@ -546,7 +536,7 @@ bool reservation_object_test_signaled_rc
+ retry:
+ ret = true;
+ shared_count = 0;
+- seq = read_seqcount_begin(&obj->seq);
++ seq = read_seqbegin(&obj->seq);
+
+ if (test_all) {
+ unsigned i;
+@@ -567,7 +557,7 @@ bool reservation_object_test_signaled_rc
+ break;
+ }
+
+- if (read_seqcount_retry(&obj->seq, seq))
++ if (read_seqretry(&obj->seq, seq))
+ goto retry;
+ }
+
+@@ -580,7 +570,7 @@ bool reservation_object_test_signaled_rc
+ if (ret < 0)
+ goto retry;
+
+- if (read_seqcount_retry(&obj->seq, seq))
++ if (read_seqretry(&obj->seq, seq))
+ goto retry;
+ }
+ }
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+@@ -250,11 +250,9 @@ static int amdgpu_amdkfd_remove_eviction
+ new->shared_count = k;
+
+ /* Install the new fence list, seqcount provides the barriers */
+- preempt_disable();
+- write_seqcount_begin(&resv->seq);
++ write_seqlock(&resv->seq);
+ RCU_INIT_POINTER(resv->fence, new);
+- write_seqcount_end(&resv->seq);
+- preempt_enable();
++ write_sequnlock(&resv->seq);
+
+ /* Drop the references to the removed fences or move them to ef_list */
+ for (i = j, k = 0; i < old->shared_count; ++i) {
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -449,7 +449,7 @@ i915_gem_object_wait_reservation(struct
+ unsigned int flags,
+ long timeout)
+ {
+- unsigned int seq = __read_seqcount_begin(&resv->seq);
++ unsigned int seq = read_seqbegin(&resv->seq);
+ struct dma_fence *excl;
+ bool prune_fences = false;
+
+@@ -500,9 +500,9 @@ i915_gem_object_wait_reservation(struct
+ * signaled and that the reservation object has not been changed (i.e.
+ * no new fences have been added).
+ */
+- if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
++ if (prune_fences && !read_seqretry(&resv->seq, seq)) {
+ if (reservation_object_trylock(resv)) {
+- if (!__read_seqcount_retry(&resv->seq, seq))
++ if (!read_seqretry(&resv->seq, seq))
+ reservation_object_add_excl_fence(resv, NULL);
+ reservation_object_unlock(resv);
+ }
+@@ -3943,7 +3943,7 @@ i915_gem_busy_ioctl(struct drm_device *d
+ *
+ */
+ retry:
+- seq = raw_read_seqcount(&obj->resv->seq);
++ seq = read_seqbegin(&obj->resv->seq);
+
+ /* Translate the exclusive fence to the READ *and* WRITE engine */
+ args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
+@@ -3961,7 +3961,7 @@ i915_gem_busy_ioctl(struct drm_device *d
+ }
+ }
+
+- if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
++ if (args->busy && read_seqretry(&obj->resv->seq, seq))
+ goto retry;
+
+ err = 0;
+--- a/include/linux/reservation.h
++++ b/include/linux/reservation.h
+@@ -71,7 +71,7 @@ struct reservation_object_list {
+ */
+ struct reservation_object {
+ struct ww_mutex lock;
+- seqcount_t seq;
++ seqlock_t seq;
+
+ struct dma_fence __rcu *fence_excl;
+ struct reservation_object_list __rcu *fence;
+@@ -90,7 +90,7 @@ reservation_object_init(struct reservati
+ {
+ ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+- __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class);
++ seqlock_init(&obj->seq);
+ RCU_INIT_POINTER(obj->fence, NULL);
+ RCU_INIT_POINTER(obj->fence_excl, NULL);
+ }
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index a54794d9804e..70b9c49068d1 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -ENOENT;
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -448,7 +448,7 @@ static
+@@ -441,7 +441,7 @@ static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
-@@ -1570,7 +1570,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1492,7 +1492,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode)
{
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
-@@ -1874,7 +1874,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1892,7 +1892,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@@ -174,7 +174,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -694,7 +694,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -698,7 +698,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index c4d7a486e151..09b81b3bd884 100644
--- a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -1890,7 +1890,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1812,7 +1812,11 @@ int nfs_rmdir(struct inode *dir, struct
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
-@@ -1900,7 +1904,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1822,7 +1826,11 @@ int nfs_rmdir(struct inode *dir, struct
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_nfs_rmdir_exit(dir, dentry, error);
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
-@@ -2089,7 +2089,11 @@ static void init_once(void *foo)
+@@ -2090,7 +2090,11 @@ static void init_once(void *foo)
atomic_long_set(&nfsi->nrequests, 0);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 4b37da9c5c02..830012d645ea 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2636,7 +2636,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2684,7 +2684,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index cb3dc5bbeefe..d63857c7a228 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
timer->function = perf_mux_hrtimer_handler;
}
-@@ -9488,7 +9488,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -9511,7 +9511,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4945,9 +4945,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4999,9 +4999,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
diff --git a/patches/i2c-exynos5-Remove-IRQF_ONESHOT.patch b/patches/i2c-exynos5-Remove-IRQF_ONESHOT.patch
new file mode 100644
index 000000000000..7a90a947dcf4
--- /dev/null
+++ b/patches/i2c-exynos5-Remove-IRQF_ONESHOT.patch
@@ -0,0 +1,38 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 13 Aug 2019 12:30:12 +0200
+Subject: [PATCH] i2c: exynos5: Remove IRQF_ONESHOT
+
+The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ
+is masked while the primary is handler is invoked independently of
+IRQF_ONESHOT.
+With IRQF_ONESHOT the core code will not force-thread the interrupt and
+this is probably not intended. I *assume* that the original author copied
+the IRQ registration from another driver which passed a primary and
+secondary handler and removed the secondary handler but keeping the
+ONESHOT flag.
+
+Remove IRQF_ONESHOT.
+
+Reported-by: Benjamin Rouxel <benjamin.rouxel@uva.nl>
+Tested-by: Benjamin Rouxel <benjamin.rouxel@uva.nl>
+Cc: Kukjin Kim <kgene@kernel.org>
+Cc: Krzysztof Kozlowski <krzk@kernel.org>
+Cc: linux-samsung-soc@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/i2c/busses/i2c-exynos5.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-exynos5.c
++++ b/drivers/i2c/busses/i2c-exynos5.c
+@@ -791,9 +791,7 @@ static int exynos5_i2c_probe(struct plat
+ }
+
+ ret = devm_request_irq(&pdev->dev, i2c->irq, exynos5_i2c_irq,
+- IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- dev_name(&pdev->dev), i2c);
+-
++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), i2c);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", i2c->irq);
+ goto err_clk;
diff --git a/patches/i2c-hix5hd2-Remove-IRQF_ONESHOT.patch b/patches/i2c-hix5hd2-Remove-IRQF_ONESHOT.patch
new file mode 100644
index 000000000000..671f9b0cb3cb
--- /dev/null
+++ b/patches/i2c-hix5hd2-Remove-IRQF_ONESHOT.patch
@@ -0,0 +1,32 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 13 Aug 2019 12:30:37 +0200
+Subject: [PATCH] i2c: hix5hd2: Remove IRQF_ONESHOT
+
+The drivers sets IRQF_ONESHOT and passes only a primary handler. The IRQ
+is masked while the primary is handler is invoked independently of
+IRQF_ONESHOT.
+With IRQF_ONESHOT the core code will not force-thread the interrupt and
+this is probably not intended. I *assume* that the original author copied
+the IRQ registration from another driver which passed a primary and
+secondary handler and removed the secondary handler but keeping the
+ONESHOT flag.
+
+Remove IRQF_ONESHOT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/i2c/busses/i2c-hix5hd2.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/i2c/busses/i2c-hix5hd2.c
++++ b/drivers/i2c/busses/i2c-hix5hd2.c
+@@ -445,8 +445,7 @@ static int hix5hd2_i2c_probe(struct plat
+ hix5hd2_i2c_init(priv);
+
+ ret = devm_request_irq(&pdev->dev, irq, hix5hd2_i2c_irq,
+- IRQF_NO_SUSPEND | IRQF_ONESHOT,
+- dev_name(&pdev->dev), priv);
++ IRQF_NO_SUSPEND, dev_name(&pdev->dev), priv);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot request HS-I2C IRQ %d\n", irq);
+ goto err_clk;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index aec536727813..b5a7a32cb92d 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -259,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1402,7 +1403,7 @@ extern struct pid *cad_pid;
+@@ -1410,7 +1411,7 @@ extern struct pid *cad_pid;
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
@@ -535,7 +535,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!task_on_rq_queued(task))) {
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -1621,7 +1621,7 @@ static void task_numa_compare(struct tas
+@@ -1653,7 +1653,7 @@ static void task_numa_compare(struct tas
* be incurred if the tasks were swapped.
*/
/* Skip this swap candidate if cannot move to the source cpu */
@@ -544,7 +544,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
/*
-@@ -1718,7 +1718,7 @@ static void task_numa_find_cpu(struct ta
+@@ -1751,7 +1751,7 @@ static void task_numa_find_cpu(struct ta
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
@@ -553,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
env->dst_cpu = cpu;
-@@ -5831,7 +5831,7 @@ find_idlest_group(struct sched_domain *s
+@@ -5885,7 +5885,7 @@ find_idlest_group(struct sched_domain *s
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@@ -562,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5963,7 +5963,7 @@ find_idlest_group_cpu(struct sched_group
+@@ -6017,7 +6017,7 @@ find_idlest_group_cpu(struct sched_group
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@@ -571,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (available_idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -6003,7 +6003,7 @@ static inline int find_idlest_cpu(struct
+@@ -6057,7 +6057,7 @@ static inline int find_idlest_cpu(struct
{
int new_cpu = cpu;
@@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prev_cpu;
/*
-@@ -6120,7 +6120,7 @@ static int select_idle_core(struct task_
+@@ -6174,7 +6174,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -6154,7 +6154,7 @@ static int select_idle_smt(struct task_s
+@@ -6208,7 +6208,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
return cpu;
-@@ -6217,7 +6217,7 @@ static int select_idle_cpu(struct task_s
+@@ -6271,7 +6271,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
break;
-@@ -6254,7 +6254,7 @@ static int select_idle_sibling(struct ta
+@@ -6308,7 +6308,7 @@ static int select_idle_sibling(struct ta
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6600,7 +6600,7 @@ static int find_energy_efficient_cpu(str
+@@ -6654,7 +6654,7 @@ static int find_energy_efficient_cpu(str
int max_spare_cap_cpu = -1;
for_each_cpu_and(cpu, perf_domain_span(pd), sched_domain_span(sd)) {
@@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
/* Skip CPUs that will be overutilized. */
-@@ -6689,7 +6689,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6743,7 +6743,7 @@ select_task_rq_fair(struct task_struct *
}
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
@@ -634,7 +634,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7445,14 +7445,14 @@ int can_migrate_task(struct task_struct
+@@ -7499,14 +7499,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7472,7 +7472,7 @@ int can_migrate_task(struct task_struct
+@@ -7526,7 +7526,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -8099,7 +8099,7 @@ static inline int check_misfit_status(st
+@@ -8153,7 +8153,7 @@ static inline int check_misfit_status(st
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8768,7 +8768,7 @@ static struct sched_group *find_busiest_
+@@ -8822,7 +8822,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -678,7 +678,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -9210,7 +9210,7 @@ static int load_balance(int this_cpu, st
+@@ -9264,7 +9264,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 279489a1d145..e36eb4b6666a 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt2
++-rt3
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 50c00347284a..603e0b7fc07b 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
defined(CONFIG_PREEMPT_TRACER)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -4227,6 +4227,7 @@ static void check_flags(unsigned long fl
+@@ -4244,6 +4244,7 @@ static void check_flags(unsigned long fl
}
}
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -4241,6 +4242,7 @@ static void check_flags(unsigned long fl
+@@ -4258,6 +4259,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 580c6e5c6579..d8b2b7bf0fdf 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <asm/page.h>
struct notifier_block;
-@@ -328,6 +329,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -329,6 +330,7 @@ extern unsigned long nr_free_pagecache_p
/* linux/mm/swap.c */
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -2226,10 +2226,12 @@ compact_zone(struct compact_control *cc,
+@@ -2229,10 +2229,12 @@ compact_zone(struct compact_control *cc,
block_start_pfn(cc->migrate_pfn, cc->order);
if (last_migrated_pfn < current_block_start) {
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 4a392c2ffdd8..2fe4f8cceca3 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2159,7 +2159,7 @@ static void drain_all_stock(struct mem_c
+@@ -2167,7 +2167,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2179,7 +2179,7 @@ static void drain_all_stock(struct mem_c
+@@ -2187,7 +2187,7 @@ static void drain_all_stock(struct mem_c
}
css_put(&memcg->css);
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index acb584a56d32..c30521722571 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4967,12 +4970,12 @@ static int mem_cgroup_move_account(struc
+@@ -4976,12 +4979,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6086,10 +6089,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -6095,10 +6098,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6158,7 +6161,7 @@ static void uncharge_batch(const struct
+@@ -6167,7 +6170,7 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-@@ -6166,7 +6169,7 @@ static void uncharge_batch(const struct
+@@ -6175,7 +6178,7 @@ static void uncharge_batch(const struct
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -6329,10 +6332,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -6338,10 +6341,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -6524,6 +6527,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6533,6 +6536,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -6569,13 +6573,17 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6578,13 +6582,17 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index d676f56df3f2..1accad2f5df1 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -247,7 +247,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -1209,6 +1210,12 @@ struct task_struct {
+@@ -1217,6 +1218,12 @@ struct task_struct {
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
#endif
diff --git a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 6c0ec893b66d..e7599998418c 100644
--- a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
-@@ -800,7 +800,7 @@ void sg_miter_stop(struct sg_mapping_ite
+@@ -801,7 +801,7 @@ void sg_miter_stop(struct sg_mapping_ite
flush_kernel_dcache_page(miter->page);
if (miter->__flags & SG_MITER_ATOMIC) {
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index 08048720523b..ef84397d10a2 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -1400,7 +1400,7 @@ static void *new_vmap_block(unsigned int
+@@ -1406,7 +1406,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -1443,11 +1443,12 @@ static void *new_vmap_block(unsigned int
+@@ -1449,11 +1449,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -1516,6 +1517,7 @@ static void *vb_alloc(unsigned long size
+@@ -1522,6 +1523,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -1530,7 +1532,8 @@ static void *vb_alloc(unsigned long size
+@@ -1536,7 +1538,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1553,7 +1556,7 @@ static void *vb_alloc(unsigned long size
+@@ -1559,7 +1562,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index fdf50095cf53..894638c9c377 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6026,6 +6026,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -6031,6 +6031,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -6037,6 +6038,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -6042,6 +6043,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
index f13a3aa73ff3..1f4eb7833e4c 100644
--- a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
+++ b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4562,11 +4562,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4567,11 +4567,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 7c39f44838a3..cca9d25c9d5c 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned int sas_ss_flags;
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1274,8 +1274,8 @@ int do_send_sig_info(int sig, struct ker
+@@ -1287,8 +1287,8 @@ int do_send_sig_info(int sig, struct ker
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1304,6 +1304,39 @@ force_sig_info(int sig, struct kernel_si
+@@ -1317,6 +1317,39 @@ force_sig_info(int sig, struct kernel_si
return ret;
}
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 986b4db28159..0660485f136b 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -386,7 +386,7 @@ menu "Kernel options"
+@@ -387,7 +387,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index a211cb965717..5e1587b08695 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -213,6 +213,7 @@ config PPC
+@@ -214,6 +214,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 57f0e090517b..cc8b9eeeefc5 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1736,6 +1736,44 @@ static inline int test_tsk_need_resched(
+@@ -1744,6 +1744,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -404,7 +404,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4050,7 +4050,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4104,7 +4104,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -413,7 +413,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -4074,7 +4074,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -4128,7 +4128,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -422,7 +422,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -4216,7 +4216,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4270,7 +4270,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -431,7 +431,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4400,7 +4400,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4454,7 +4454,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -440,7 +440,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -5110,7 +5110,7 @@ static void hrtick_start_fair(struct rq
+@@ -5164,7 +5164,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -449,7 +449,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6950,7 +6950,7 @@ static void check_preempt_wakeup(struct
+@@ -7004,7 +7004,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -458,7 +458,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -10222,7 +10222,7 @@ static void task_fork_fair(struct task_s
+@@ -10276,7 +10276,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -467,7 +467,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -10246,7 +10246,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -10300,7 +10300,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 4321d1452b23..6149dd5b14e0 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1723,6 +1719,51 @@ static inline int test_tsk_need_resched(
+@@ -1731,6 +1727,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 9adfca4caeba..91983820b4c0 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1075,6 +1075,12 @@ static int irq_thread(void *data)
+@@ -1101,6 +1101,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index b63094bcaf87..6b8a8f4d4c24 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
#ifdef CONFIG_PREEMPT_RCU
-@@ -1816,6 +1825,23 @@ static __always_inline bool need_resched
+@@ -1824,6 +1833,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
diff --git a/patches/sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch b/patches/sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch
new file mode 100644
index 000000000000..9c96acd3651e
--- /dev/null
+++ b/patches/sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch
@@ -0,0 +1,40 @@
+From: Juri Lelli <juri.lelli@redhat.com>
+Date: Wed, 31 Jul 2019 12:37:15 +0200
+Subject: [PATCH] sched/deadline: Ensure inactive_timer runs in hardirq
+ context
+
+SCHED_DEADLINE inactive timer needs to run in hardirq context (as
+dl_task_timer already does) on PREEMPT_RT
+
+Change the mode to HRTIMER_MODE_REL_HARD.
+
+[ tglx: Fixed up the start site, so mode debugging works ]
+
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20190731103715.4047-1-juri.lelli@redhat.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/sched/deadline.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -287,7 +287,7 @@ static void task_non_contending(struct t
+
+ dl_se->dl_non_contending = 1;
+ get_task_struct(p);
+- hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL);
++ hrtimer_start(timer, ns_to_ktime(zerolag_time), HRTIMER_MODE_REL_HARD);
+ }
+
+ static void task_contending(struct sched_dl_entity *dl_se, int flags)
+@@ -1292,7 +1292,7 @@ void init_dl_inactive_task_timer(struct
+ {
+ struct hrtimer *timer = &dl_se->inactive_timer;
+
+- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
+ timer->function = inactive_task_timer;
+ }
+
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 12021d8578b6..d5c48f28a331 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1191,6 +1191,9 @@ struct task_struct {
+@@ -1199,6 +1199,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
diff --git a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
index 212b75b85825..25d85f505354 100644
--- a/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
+++ b/patches/sched-fair-Make-the-hrtimers-non-hard-again.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -4945,9 +4945,9 @@ void init_cfs_bandwidth(struct cfs_bandw
+@@ -4999,9 +4999,9 @@ void init_cfs_bandwidth(struct cfs_bandw
cfs_b->period = ns_to_ktime(default_cfs_period());
INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index fdb5b15c58b4..3fa34db0314d 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1630,6 +1632,7 @@ extern struct task_struct *find_get_task
+@@ -1638,6 +1640,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
diff --git a/patches/series b/patches/series
index b7f09d5e9f21..e37bc72e4497 100644
--- a/patches/series
+++ b/patches/series
@@ -62,11 +62,16 @@ drm-i915-Don-t-disable-interrupts-independently-of-t.patch
locking-lockdep-Don-t-complain-about-incorrect-name-.patch
arm-imx6-cpuidle-Use-raw_spinlock_t.patch
x86-ldt-Initialize-the-context-lock-for-init_mm.patch
+i2c-exynos5-Remove-IRQF_ONESHOT.patch
+i2c-hix5hd2-Remove-IRQF_ONESHOT.patch
+sched-deadline-Ensure-inactive_timer-runs-in-hardirq.patch
+thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
############################################################
# Ready for posting
############################################################
efi-Allow-efi-runtime.patch
+dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
softirq-Add-preemptible-softirq.patch
sched-swait-Add-swait_event_lock_irq.patch
@@ -218,6 +223,7 @@ hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
hrtimer-by-timers-by-default-into-the-softirq-context.patch
sched-fair-Make-the-hrtimers-non-hard-again.patch
hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
+KVM-arm-arm64-Let-the-timer-expire-in-hardirq-contex.patch
# POSIX-CPU-TIMERS
posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -383,9 +389,12 @@ lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
preempt-lazy-support.patch
ftrace-Fix-trace-header-alignment.patch
x86-preempt-lazy.patch
+x86-preempt-Check-preemption-level-before-looking-at.patch
arm-preempt-lazy-support.patch
powerpc-preempt-lazy-support.patch
arch-arm64-Add-lazy-preempt-support.patch
+arm64-preempt-Fixup-lazy-preempt.patch
+arm64-preempt-Check-preemption-level-before-looking-.patch
# DRIVERS
connector-cn_proc-Protect-send_msg-with-a-local-lock.patch
diff --git a/patches/signal-revert-ptrace-preempt-magic.patch b/patches/signal-revert-ptrace-preempt-magic.patch
index b7e1379b8fc6..9361ab52c124 100644
--- a/patches/signal-revert-ptrace-preempt-magic.patch
+++ b/patches/signal-revert-ptrace-preempt-magic.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2103,15 +2103,7 @@ static void ptrace_stop(int exit_code, i
+@@ -2147,15 +2147,7 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 795e45f055d0..d42483b0ad44 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1756,7 +1816,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1800,7 +1860,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index b91297e78eec..0b920853a839 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -5318,7 +5318,7 @@ static void flush_backlog(struct work_st
+@@ -5323,7 +5323,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -5328,11 +5328,14 @@ static void flush_backlog(struct work_st
+@@ -5333,11 +5333,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -5932,7 +5935,9 @@ static int process_backlog(struct napi_s
+@@ -5937,7 +5940,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5940,9 +5945,9 @@ static int process_backlog(struct napi_s
+@@ -5945,9 +5950,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -6407,13 +6412,21 @@ static __latent_entropy void net_rx_acti
+@@ -6412,13 +6417,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -9524,10 +9537,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -9529,10 +9542,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -9836,8 +9852,9 @@ static int __init net_dev_init(void)
+@@ -9843,8 +9859,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index 741084fce284..c95d954f8f6a 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -106,7 +106,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -1032,6 +1032,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -1056,6 +1056,7 @@ EXPORT_SYMBOL(native_load_gs_index)
jmp 2b
.previous
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -1042,6 +1043,7 @@ ENTRY(do_softirq_own_stack)
+@@ -1066,6 +1067,7 @@ ENTRY(do_softirq_own_stack)
leaveq
ret
ENDPROC(do_softirq_own_stack)
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index f85f81c02641..0c5a0196f9ad 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5894,12 +5897,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5899,12 +5902,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5977,6 +5982,7 @@ void __napi_schedule(struct napi_struct
+@@ -5982,6 +5987,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -9506,6 +9512,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -9511,6 +9517,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch b/patches/thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
new file mode 100644
index 000000000000..32f803f16e71
--- /dev/null
+++ b/patches/thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
@@ -0,0 +1,109 @@
+From: Clark Williams <williams@redhat.com>
+Date: Mon, 15 Jul 2019 15:25:00 -0500
+Subject: [PATCH] thermal/x86_pkg_temp: make pkg_temp_lock a raw spinlock
+
+The spinlock pkg_temp_lock has the potential of being taken in atomic
+context on v5.2-rt PREEMPT_RT. It's static and limited scope so
+go ahead and make it a raw spinlock.
+
+Signed-off-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/thermal/intel/x86_pkg_temp_thermal.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/drivers/thermal/intel/x86_pkg_temp_thermal.c
++++ b/drivers/thermal/intel/x86_pkg_temp_thermal.c
+@@ -63,7 +63,7 @@ static int max_packages __read_mostly;
+ /* Array of package pointers */
+ static struct pkg_device **packages;
+ /* Serializes interrupt notification, work and hotplug */
+-static DEFINE_SPINLOCK(pkg_temp_lock);
++static DEFINE_RAW_SPINLOCK(pkg_temp_lock);
+ /* Protects zone operation in the work function against hotplug removal */
+ static DEFINE_MUTEX(thermal_zone_mutex);
+
+@@ -279,12 +279,12 @@ static void pkg_temp_thermal_threshold_w
+ u64 msr_val, wr_val;
+
+ mutex_lock(&thermal_zone_mutex);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ ++pkg_work_cnt;
+
+ pkgdev = pkg_temp_thermal_get_dev(cpu);
+ if (!pkgdev) {
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ mutex_unlock(&thermal_zone_mutex);
+ return;
+ }
+@@ -298,7 +298,7 @@ static void pkg_temp_thermal_threshold_w
+ }
+
+ enable_pkg_thres_interrupt();
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /*
+ * If tzone is not NULL, then thermal_zone_mutex will prevent the
+@@ -323,7 +323,7 @@ static int pkg_thermal_notify(u64 msr_va
+ struct pkg_device *pkgdev;
+ unsigned long flags;
+
+- spin_lock_irqsave(&pkg_temp_lock, flags);
++ raw_spin_lock_irqsave(&pkg_temp_lock, flags);
+ ++pkg_interrupt_cnt;
+
+ disable_pkg_thres_interrupt();
+@@ -335,7 +335,7 @@ static int pkg_thermal_notify(u64 msr_va
+ pkg_thermal_schedule_work(pkgdev->cpu, &pkgdev->work);
+ }
+
+- spin_unlock_irqrestore(&pkg_temp_lock, flags);
++ raw_spin_unlock_irqrestore(&pkg_temp_lock, flags);
+ return 0;
+ }
+
+@@ -381,9 +381,9 @@ static int pkg_temp_thermal_device_add(u
+ pkgdev->msr_pkg_therm_high);
+
+ cpumask_set_cpu(cpu, &pkgdev->cpumask);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ packages[pkgid] = pkgdev;
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ return 0;
+ }
+
+@@ -420,7 +420,7 @@ static int pkg_thermal_cpu_offline(unsig
+ }
+
+ /* Protect against work and interrupts */
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+
+ /*
+ * Check whether this cpu was the current target and store the new
+@@ -452,9 +452,9 @@ static int pkg_thermal_cpu_offline(unsig
+ * To cancel the work we need to drop the lock, otherwise
+ * we might deadlock if the work needs to be flushed.
+ */
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+ cancel_delayed_work_sync(&pkgdev->work);
+- spin_lock_irq(&pkg_temp_lock);
++ raw_spin_lock_irq(&pkg_temp_lock);
+ /*
+ * If this is not the last cpu in the package and the work
+ * did not run after we dropped the lock above, then we
+@@ -465,7 +465,7 @@ static int pkg_thermal_cpu_offline(unsig
+ pkg_thermal_schedule_work(target, &pkgdev->work);
+ }
+
+- spin_unlock_irq(&pkg_temp_lock);
++ raw_spin_unlock_irq(&pkg_temp_lock);
+
+ /* Final cleanup if this is the last cpu */
+ if (lastcpu)
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index e070896843c8..e9cf029458f5 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4510,7 +4510,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4515,7 +4515,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -4520,14 +4520,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4525,14 +4525,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 7d32bc78064a..47c6d6e0ce42 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -7013,6 +7013,14 @@ int kvm_arch_init(void *opaque)
+@@ -7017,6 +7017,14 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-preempt-Check-preemption-level-before-looking-at.patch b/patches/x86-preempt-Check-preemption-level-before-looking-at.patch
new file mode 100644
index 000000000000..ac47d1a95a7d
--- /dev/null
+++ b/patches/x86-preempt-Check-preemption-level-before-looking-at.patch
@@ -0,0 +1,24 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Aug 2019 17:08:58 +0200
+Subject: [PATCH] x86: preempt: Check preemption level before looking at
+ lazy-preempt
+
+Before evaluating the lazy-preempt state it must be ensure that the
+preempt-count is zero.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/preempt.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -99,6 +99,8 @@ static __always_inline bool __preempt_co
+ if (____preempt_count_dec_and_test())
+ return true;
+ #ifdef CONFIG_PREEMPT_LAZY
++ if (preempt_count())
++ return false;
+ if (current_thread_info()->preempt_lazy_count)
+ return false;
+ return test_thread_flag(TIF_NEED_RESCHED_LAZY);
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index fca6ad9adc9f..d888892ffbb3 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -646,7 +646,23 @@ GLOBAL(swapgs_restore_regs_and_return_to
+@@ -670,7 +670,23 @@ GLOBAL(swapgs_restore_regs_and_return_to
btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
cmpl $0, PER_CPU_VAR(__preempt_count)