summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-12-13 21:53:24 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-12-13 21:53:24 +0100
commitea6b54ce3e869fbfb051049fc5ebcb00fadf1441 (patch)
tree4c41d2e4a2b3723fb37e5b7ba33c787a1cc3ceef
parentb841de08a011b44d94f0829ea5a7a37bba0d17ca (diff)
downloadlinux-rt-ea6b54ce3e869fbfb051049fc5ebcb00fadf1441.tar.gz
[ANNOUNCE] v4.19.8-rt6v4.19.8-rt6-patches
Dear RT folks! I'm pleased to announce the v4.19.8-rt6 patch set. Changes since v4.19.8-rt5: - Move a state change in do_nanosleep() do avoid a warning in hrtimer_cancel(). - Disable tracing events on i915. The tracing events are using spin_locks() which is not working on -RT. Reported by Luca Abeni. - A recently added preemption check on x86 in __flush_tlb_all() triggers on RT. Disabling preemption to get similar environment like in !RT. - A bugfix in __fpu__restore_sig() on x86 requires extra care on RT. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. The delta patch against v4.19.8-rt5 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/incr/patch-4.19.8-rt5-rt6.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.19.8-rt6 The RT patch against v4.19.8 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patch-4.19.8-rt6.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.8-rt6.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch10
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch8
-rw-r--r--patches/add_migrate_disable.patch2
-rw-r--r--patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch6
-rw-r--r--patches/completion-use-simple-wait-queues.patch4
-rw-r--r--patches/cpu-hotplug--Implement-CPU-pinning.patch12
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch2
-rw-r--r--patches/drm-i915-disable-tracing-on-RT.patch40
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch16
-rw-r--r--patches/hotplug-light-get-online-cpus.patch6
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch2
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch2
-rw-r--r--patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch44
-rw-r--r--patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch4
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch14
-rw-r--r--patches/kgb-serial-hackaround.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch2
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch4
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch4
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch2
-rw-r--r--patches/preempt-lazy-support.patch18
-rw-r--r--patches/printk-kill.patch4
-rw-r--r--patches/printk-rt-aware.patch14
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch2
-rw-r--r--patches/random-make-it-work-on-rt.patch2
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch6
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch14
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch4
-rw-r--r--patches/sched-delay-put-task.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch4
-rw-r--r--patches/sched-mmdrop-delayed.patch2
-rw-r--r--patches/sched-rt-mutex-wakeup.patch4
-rw-r--r--patches/series4
-rw-r--r--patches/skbufhead-raw-lock.patch12
-rw-r--r--patches/softirq-preempt-fix-3-re.patch6
-rw-r--r--patches/softirq-split-locks.patch4
-rw-r--r--patches/srcu-use-cpu_online-instead-custom-check.patch4
-rw-r--r--patches/x86-fpu-Disable-preemption-around-local_bh_disable.patch31
-rw-r--r--patches/x86-mm-pat-disable-preemption-__split_large_page-aft.patch54
45 files changed, 284 insertions, 111 deletions
diff --git a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index 444e3b7c9698..ffbe53703a21 100644
--- a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-@@ -3887,9 +3887,9 @@ static int __do_tune_cpucache(struct kme
+@@ -3891,9 +3891,9 @@ static int __do_tune_cpucache(struct kme
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
@@ -335,7 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
free_percpu(prev);
-@@ -4014,9 +4014,9 @@ static void drain_array(struct kmem_cach
+@@ -4018,9 +4018,9 @@ static void drain_array(struct kmem_cach
return;
}
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -4100,7 +4100,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4104,7 +4104,7 @@ void get_slabinfo(struct kmem_cache *cac
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -356,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
-@@ -4109,7 +4109,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4113,7 +4113,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
@@ -365,7 +365,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
-@@ -4324,13 +4324,13 @@ static int leaks_show(struct seq_file *m
+@@ -4328,13 +4328,13 @@ static int leaks_show(struct seq_file *m
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 80534af12fa6..0e0148abb1df 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1415,12 +1415,23 @@ static int syslog_print_all(char __user
+@@ -1420,12 +1420,23 @@ static int syslog_print_all(char __user
u64 next_seq;
u64 seq;
u32 idx;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
-@@ -1433,6 +1444,14 @@ static int syslog_print_all(char __user
+@@ -1438,6 +1449,14 @@ static int syslog_print_all(char __user
len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1444,6 +1463,14 @@ static int syslog_print_all(char __user
+@@ -1449,6 +1468,14 @@ static int syslog_print_all(char __user
len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1481,6 +1508,7 @@ static int syslog_print_all(char __user
+@@ -1486,6 +1513,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index d3264f234656..8aa2caf6a1b1 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7053,3 +7082,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7059,3 +7088,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
diff --git a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
index 95c89ea5e088..a23254b1ae6d 100644
--- a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
+++ b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -969,12 +969,21 @@ void blk_queue_exit(struct request_queue
+@@ -968,12 +968,21 @@ void blk_queue_exit(struct request_queue
percpu_ref_put(&q->q_usage_counter);
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(struct timer_list *t)
-@@ -1067,6 +1076,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -1066,6 +1075,7 @@ struct request_queue *blk_alloc_queue_no
queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q);
init_waitqueue_head(&q->mq_freeze_wq);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Init percpu_ref in atomic mode so that it's faster to shutdown.
-@@ -3957,6 +3967,8 @@ int __init blk_dev_init(void)
+@@ -3956,6 +3966,8 @@ int __init blk_dev_init(void)
if (!kblockd_workqueue)
panic("Failed to create kblockd\n");
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index 07f01ab9d42e..a1beb88db1ba 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7102,7 +7102,10 @@ void migrate_disable(void)
+@@ -7108,7 +7108,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7132,7 +7135,10 @@ void migrate_enable(void)
+@@ -7138,7 +7141,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch
index 0d86bf078762..739a6920e4b4 100644
--- a/patches/cpu-hotplug--Implement-CPU-pinning.patch
+++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# endif
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -74,6 +74,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+@@ -75,6 +75,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
.fail = CPUHP_INVALID,
};
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
static struct lockdep_map cpuhp_state_up_map =
STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
-@@ -285,7 +290,28 @@ static int cpu_hotplug_disabled;
+@@ -286,7 +291,28 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -293,6 +319,13 @@ void pin_current_cpu(void)
+@@ -294,6 +320,13 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -846,6 +879,7 @@ static int take_cpu_down(void *_param)
+@@ -853,6 +886,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -858,11 +892,14 @@ static int takedown_cpu(unsigned int cpu
+@@ -865,11 +899,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -881,6 +918,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -888,6 +925,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index 9c5d9049329e..87887d374e97 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -938,7 +938,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
+@@ -934,7 +934,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
diff --git a/patches/drm-i915-disable-tracing-on-RT.patch b/patches/drm-i915-disable-tracing-on-RT.patch
new file mode 100644
index 000000000000..634ce8c06b0d
--- /dev/null
+++ b/patches/drm-i915-disable-tracing-on-RT.patch
@@ -0,0 +1,40 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 6 Dec 2018 09:52:20 +0100
+Subject: [PATCH] drm/i915: disable tracing on -RT
+
+Luca Abeni reported this:
+| BUG: scheduling while atomic: kworker/u8:2/15203/0x00000003
+| CPU: 1 PID: 15203 Comm: kworker/u8:2 Not tainted 4.19.1-rt3 #10
+| Call Trace:
+| rt_spin_lock+0x3f/0x50
+| gen6_read32+0x45/0x1d0 [i915]
+| g4x_get_vblank_counter+0x36/0x40 [i915]
+| trace_event_raw_event_i915_pipe_update_start+0x7d/0xf0 [i915]
+
+The tracing events use trace_i915_pipe_update_start() among other events
+use functions acquire spin locks. A few trace points use
+intel_get_crtc_scanline(), others use ->get_vblank_counter() wich also
+might acquire a sleeping lock.
+
+Based on this I don't see any other way than disable trace points on RT.
+
+Cc: stable-rt@vger.kernel.org
+Reported-by: Luca Abeni <lucabe72@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_trace.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_trace.h
++++ b/drivers/gpu/drm/i915/i915_trace.h
+@@ -2,6 +2,10 @@
+ #if !defined(_I915_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+ #define _I915_TRACE_H_
+
++#ifdef CONFIG_PREEMPT_RT_BASE
++#define NOTRACE
++#endif
++
+ #include <linux/stringify.h>
+ #include <linux/types.h>
+ #include <linux/tracepoint.h>
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index b76cd28a20e5..a2f12d933b2c 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2274,7 +2274,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
index 243ab1d244f2..fb642b9c8b46 100644
--- a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
+++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -74,7 +74,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+@@ -75,7 +75,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
.fail = CPUHP_INVALID,
};
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
__RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
#endif
-@@ -290,6 +290,7 @@ static int cpu_hotplug_disabled;
+@@ -291,6 +291,7 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin;
unsigned int cpu;
int ret;
-@@ -312,6 +313,7 @@ void pin_current_cpu(void)
+@@ -313,6 +314,7 @@ void pin_current_cpu(void)
goto again;
}
current->pinned_on_cpu = cpu;
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -319,6 +321,7 @@ void pin_current_cpu(void)
+@@ -320,6 +322,7 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
-@@ -326,6 +329,7 @@ void unpin_current_cpu(void)
+@@ -327,6 +330,7 @@ void unpin_current_cpu(void)
current->pinned_on_cpu = -1;
__read_rt_unlock(cpuhp_pin);
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -879,7 +883,9 @@ static int take_cpu_down(void *_param)
+@@ -886,7 +890,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -892,14 +898,18 @@ static int takedown_cpu(unsigned int cpu
+@@ -899,14 +905,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -918,7 +928,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -925,7 +935,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 9645f75c9d6d..0a7b6988255f 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Wrappers which go away once all code is converted */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -280,6 +280,21 @@ static int cpu_hotplug_disabled;
+@@ -281,6 +281,21 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7197,6 +7197,7 @@ void migrate_disable(void)
+@@ -7203,6 +7203,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7262,12 +7263,15 @@ void migrate_enable(void)
+@@ -7268,12 +7269,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index 4a1fb25269bb..b25d26fc0cb8 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -2245,7 +2245,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -2250,7 +2250,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
apic->vcpu = vcpu;
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 544ce46ce102..5e50d43e2f0e 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -3115,10 +3115,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+@@ -3116,10 +3116,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;
diff --git a/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
new file mode 100644
index 000000000000..dbfcc1623814
--- /dev/null
+++ b/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
@@ -0,0 +1,44 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 6 Dec 2018 10:15:13 +0100
+Subject: [PATCH] hrtimer: move state change before hrtimer_cancel in
+ do_nanosleep()
+
+There is a small window between setting t->task to NULL and waking the
+task up (which would set TASK_RUNNING). So the timer would fire, run and
+set ->task to NULL while the other side/do_nanosleep() wouldn't enter
+freezable_schedule(). After all we are peemptible here (in
+do_nanosleep() and on the timer wake up path) and on KVM/virt the
+virt-CPU might get preempted.
+So do_nanosleep() wouldn't enter freezable_schedule() but cancel the
+timer which is still running and wait for it via
+hrtimer_wait_for_timer(). Then wait_event()/might_sleep() would complain
+that it is invoked with state != TASK_RUNNING.
+This isn't a problem since it would be reset to TASK_RUNNING later
+anyway and we don't rely on the previous state.
+
+Move the state update to TASK_RUNNING before hrtimer_cancel() so there
+are no complains from might_sleep() about wrong state.
+
+Cc: stable-rt@vger.kernel.org
+Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1785,12 +1785,12 @@ static int __sched do_nanosleep(struct h
+ if (likely(t->task))
+ freezable_schedule();
+
++ __set_current_state(TASK_RUNNING);
+ hrtimer_cancel(&t->timer);
+ mode = HRTIMER_MODE_ABS;
+
+ } while (t->task && !signal_pending(current));
+
+- __set_current_state(TASK_RUNNING);
+
+ if (!t->task)
+ return 0;
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index 245f3a6ee1cc..8b917b609fbe 100644
--- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1777,6 +1777,11 @@ static void call_console_drivers(const c
+@@ -1782,6 +1782,11 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
-@@ -2535,6 +2540,11 @@ void console_unblank(void)
+@@ -2540,6 +2545,11 @@ void console_unblank(void)
{
struct console *c;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index c27c4c44324d..b409f1ed624a 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -259,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1389,7 +1390,7 @@ extern struct pid *cad_pid;
+@@ -1390,7 +1391,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
@@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6317,7 +6317,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6351,7 +6351,7 @@ select_task_rq_fair(struct task_struct *
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7056,14 +7056,14 @@ int can_migrate_task(struct task_struct
+@@ -7090,14 +7090,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -642,7 +642,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7083,7 +7083,7 @@ int can_migrate_task(struct task_struct
+@@ -7117,7 +7117,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7704,7 +7704,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -7738,7 +7738,7 @@ check_cpu_capacity(struct rq *rq, struct
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8319,7 +8319,7 @@ static struct sched_group *find_busiest_
+@@ -8353,7 +8353,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8715,7 +8715,7 @@ static int load_balance(int this_cpu, st
+@@ -8749,7 +8749,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index cce7b29249f9..6794740eadba 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -62,7 +62,7 @@ Jason.
char *help, short minlen) { return 0; }
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
-@@ -854,9 +854,11 @@ int kdb_printf(const char *fmt, ...)
+@@ -857,9 +857,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 72cdd2b3c760..4c1841b6475d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt6
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 7c29e8d42337..69dc3c2c43fe 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7184,8 +7184,9 @@ void __init free_area_init(unsigned long
+@@ -7182,8 +7182,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 76bdfd8f6f34..7c44cd84644c 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8062,7 +8085,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8068,7 +8091,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8071,7 +8094,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8077,7 +8100,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 43d695ae42c4..b488847bb21c 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5927,6 +5927,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -5931,6 +5931,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -5938,6 +5939,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -5942,6 +5943,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 124ecdec12e8..e50ddb6f1d94 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1208,6 +1208,9 @@ struct task_struct {
+@@ -1209,6 +1209,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
@@ -231,7 +231,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (dev_xmit_complete(rc)) {
HARD_TX_UNLOCK(dev, txq);
goto out;
-@@ -8360,7 +8365,7 @@ static void netdev_init_one_queue(struct
+@@ -8367,7 +8372,7 @@ static void netdev_init_one_queue(struct
/* Initialize queue lock */
spin_lock_init(&queue->_xmit_lock);
netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index e94de9470dc2..5a5b2f4fbd92 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -6354,7 +6354,7 @@ static __latent_entropy void net_rx_acti
+@@ -6361,7 +6361,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
index e40178dc0c0d..5cc67753eb6d 100644
--- a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
+++ b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
-@@ -130,46 +130,52 @@ void of_populate_phandle_cache(void)
+@@ -132,46 +132,52 @@ void of_populate_phandle_cache(void)
u32 cache_entries;
struct device_node *np;
u32 phandles = 0;
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index c399b0aa4718..8d40c9e02220 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1715,6 +1715,44 @@ static inline int test_tsk_need_resched(
+@@ -1725,6 +1725,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -234,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -303,11 +303,13 @@ void pin_current_cpu(void)
+@@ -304,11 +304,13 @@ void pin_current_cpu(void)
return;
}
cpu = smp_processor_id();
@@ -379,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7176,6 +7252,7 @@ void migrate_disable(void)
+@@ -7182,6 +7258,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -387,7 +387,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7243,6 +7320,7 @@ void migrate_enable(void)
+@@ -7249,6 +7326,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -395,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7251,6 +7329,7 @@ void migrate_enable(void)
+@@ -7257,6 +7335,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6578,7 +6578,7 @@ static void check_preempt_wakeup(struct
+@@ -6612,7 +6612,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -9689,7 +9689,7 @@ static void task_fork_fair(struct task_s
+@@ -9723,7 +9723,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9713,7 +9713,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9747,7 +9747,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -491,7 +491,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1641,6 +1641,15 @@ extern void reweight_task(struct task_st
+@@ -1639,6 +1639,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index bc904b60a3a9..a9d397ddf4aa 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1892,6 +1944,13 @@ asmlinkage int vprintk_emit(int facility
+@@ -1897,6 +1949,13 @@ asmlinkage int vprintk_emit(int facility
bool in_sched = false;
unsigned long flags;
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -2032,26 +2091,6 @@ static bool suppress_message_printing(in
+@@ -2037,26 +2096,6 @@ static bool suppress_message_printing(in
#endif /* CONFIG_PRINTK */
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index ea3024c28ea1..7293dcf1a4f9 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1612,6 +1612,7 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1617,6 +1617,7 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Special console_lock variants that help to reduce the risk of soft-lockups.
* They allow to pass console_lock to another printk() call using a busy wait.
-@@ -1752,6 +1753,15 @@ static int console_trylock_spinning(void
+@@ -1757,6 +1758,15 @@ static int console_trylock_spinning(void
return 1;
}
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Call the console drivers, asking them to write out
* log_buf[start] to log_buf[end - 1].
-@@ -1767,6 +1777,7 @@ static void call_console_drivers(const c
+@@ -1772,6 +1782,7 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1782,6 +1793,7 @@ static void call_console_drivers(const c
+@@ -1787,6 +1798,7 @@ static void call_console_drivers(const c
else
con->write(con, text, len);
}
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
int printk_delay_msec __read_mostly;
-@@ -1973,20 +1985,30 @@ asmlinkage int vprintk_emit(int facility
+@@ -1978,20 +1990,30 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
wake_up_klogd();
-@@ -2439,6 +2461,10 @@ void console_unlock(void)
+@@ -2444,6 +2466,10 @@ void console_unlock(void)
console_seq++;
raw_spin_unlock(&logbuf_lock);
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* While actively printing out messages, if another printk()
* were to occur on another CPU, it may wait for this one to
-@@ -2457,6 +2483,7 @@ void console_unlock(void)
+@@ -2462,6 +2488,7 @@ void console_unlock(void)
}
printk_safe_exit_irqrestore(flags);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 3eb86b98a5a1..49e33c54e4d9 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1699,6 +1695,51 @@ static inline int test_tsk_need_resched(
+@@ -1709,6 +1705,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index ebab3ee7b681..c498574b87da 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1072,6 +1072,12 @@ static int irq_thread(void *data)
+@@ -1076,6 +1076,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index e186175bf504..68d378818c2d 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2879,18 +2886,17 @@ static void
+@@ -2888,18 +2895,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -2902,18 +2908,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2911,18 +2917,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4179,7 +4272,6 @@ void __init rcu_init(void)
+@@ -4188,7 +4281,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index cf24cb7360c9..c2f60ca7daaa 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -243,7 +243,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -3040,6 +3058,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3049,6 +3067,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -3067,6 +3086,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3076,6 +3095,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -259,7 +259,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3152,6 +3172,7 @@ void synchronize_sched(void)
+@@ -3161,6 +3181,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -267,7 +267,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3178,6 +3199,7 @@ void synchronize_rcu_bh(void)
+@@ -3187,6 +3208,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -275,7 +275,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3485,6 +3507,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3494,6 +3516,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -283,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3493,6 +3516,7 @@ void rcu_barrier_bh(void)
+@@ -3502,6 +3525,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4140,7 +4164,9 @@ void __init rcu_init(void)
+@@ -4149,7 +4173,9 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index 0be872f90c11..68770bc080c9 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7133,6 +7133,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7139,6 +7139,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7156,10 +7197,9 @@ void migrate_disable(void)
+@@ -7162,10 +7203,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7191,9 +7231,8 @@ void migrate_enable(void)
+@@ -7197,9 +7237,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index d3f292def978..9c96bc56e8a6 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
#ifdef CONFIG_PREEMPT_RCU
-@@ -1792,6 +1801,23 @@ static __always_inline bool need_resched
+@@ -1802,6 +1811,23 @@ static __always_inline bool need_resched
return unlikely(tif_need_resched());
}
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7302,4 +7302,49 @@ void migrate_enable(void)
+@@ -7308,4 +7308,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index b416b66f1b14..9dd233ca3771 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1185,6 +1185,9 @@ struct task_struct {
+@@ -1186,6 +1186,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index ae58e0520646..7398ab2a60e8 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6149,7 +6149,7 @@ void __init sched_init(void)
+@@ -6155,7 +6155,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
index 5dbfdae7e323..ae024f27ca81 100644
--- a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
+++ b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
-@@ -7158,7 +7158,7 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7164,7 +7164,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
update_nr_migratory(struct task_struct *p, long delta)
-@@ -7306,45 +7306,44 @@ EXPORT_SYMBOL(migrate_enable);
+@@ -7312,45 +7312,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index f19de139845f..d4b2df873e4f 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5882,6 +5892,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5885,6 +5895,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 1c303e10aaf7..5117425814c5 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1603,6 +1605,7 @@ extern struct task_struct *find_get_task
+@@ -1613,6 +1615,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1446,6 +1446,7 @@ static inline int task_on_rq_migrating(s
+@@ -1444,6 +1444,7 @@ static inline int task_on_rq_migrating(s
#define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x4 /* Internal use, task got migrated */
diff --git a/patches/series b/patches/series
index 55976c4bf80c..7838ece48b53 100644
--- a/patches/series
+++ b/patches/series
@@ -162,6 +162,7 @@ slub-disable-SLUB_CPU_PARTIAL.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+x86-mm-pat-disable-preemption-__split_large_page-aft.patch
# RADIX TREE
radix-tree-use-local-locks.patch
@@ -187,6 +188,7 @@ hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
hrtimers-prepare-full-preemption.patch
hrtimer-by-timers-by-default-into-the-softirq-context.patch
hrtimer-Move-schedule_work-call-to-helper-thread.patch
+hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
# POSIX-CPU-TIMERS
posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -279,6 +281,7 @@ squashfs-make-use-of-local-lock-in-multi_cpu-decompr.patch
# X86
thermal-Defer-thermal-wakups-to-threads.patch
+x86-fpu-Disable-preemption-around-local_bh_disable.patch
# CPU get light
epoll-use-get-cpu-light.patch
@@ -402,6 +405,7 @@ watchdog-prevent-deferral-of-watchdogd-wakeup-on-RT.patch
# I915
drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+drm-i915-disable-tracing-on-RT.patch
# CGROUPS
cgroups-use-simple-wait-in-css_release.patch
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 98d46e41739f..bc6dc5e3cb8d 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1675,6 +1676,12 @@ static inline void skb_queue_head_init(s
+@@ -1691,6 +1692,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -5828,7 +5831,9 @@ static int process_backlog(struct napi_s
+@@ -5832,7 +5835,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5836,9 +5841,9 @@ static int process_backlog(struct napi_s
+@@ -5840,9 +5845,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -6300,13 +6305,21 @@ static __latent_entropy void net_rx_acti
+@@ -6307,13 +6312,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -9292,10 +9305,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -9299,10 +9312,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -9604,8 +9620,9 @@ static int __init net_dev_init(void)
+@@ -9611,8 +9627,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 95f8d3edcb46..069ad7a16db0 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5790,12 +5793,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5794,12 +5797,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5873,6 +5878,7 @@ void __napi_schedule(struct napi_struct
+@@ -5877,6 +5882,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -9274,6 +9280,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -9281,6 +9287,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index afd0bcff2fcb..fe46eb113464 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1189,6 +1189,8 @@ struct task_struct {
+@@ -1190,6 +1190,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -1386,6 +1388,7 @@ extern struct pid *cad_pid;
+@@ -1387,6 +1389,7 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
diff --git a/patches/srcu-use-cpu_online-instead-custom-check.patch b/patches/srcu-use-cpu_online-instead-custom-check.patch
index af2e8c6ae603..b35ad87f789a 100644
--- a/patches/srcu-use-cpu_online-instead-custom-check.patch
+++ b/patches/srcu-use-cpu_online-instead-custom-check.patch
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -3767,8 +3767,6 @@ int rcutree_online_cpu(unsigned int cpu)
+@@ -3776,8 +3776,6 @@ int rcutree_online_cpu(unsigned int cpu)
rnp->ffmask |= rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
-@@ -3796,8 +3794,6 @@ int rcutree_offline_cpu(unsigned int cpu
+@@ -3805,8 +3803,6 @@ int rcutree_offline_cpu(unsigned int cpu
}
rcutree_affinity_setting(cpu, cpu);
diff --git a/patches/x86-fpu-Disable-preemption-around-local_bh_disable.patch b/patches/x86-fpu-Disable-preemption-around-local_bh_disable.patch
new file mode 100644
index 000000000000..39ea6a63764c
--- /dev/null
+++ b/patches/x86-fpu-Disable-preemption-around-local_bh_disable.patch
@@ -0,0 +1,31 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 11 Dec 2018 15:10:33 +0100
+Subject: [PATCH] x86/fpu: Disable preemption around local_bh_disable()
+
+__fpu__restore_sig() restores the content of the FPU state in the CPUs
+and in order to avoid concurency it disbles BH. On !RT it also disables
+preemption but on RT we can get preempted in BH.
+
+Add preempt_disable() while the FPU state is restored.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -344,10 +344,12 @@ static int __fpu__restore_sig(void __use
+ sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+ }
+
++ preempt_disable();
+ local_bh_disable();
+ fpu->initialized = 1;
+ fpu__restore(fpu);
+ local_bh_enable();
++ preempt_enable();
+
+ return err;
+ } else {
diff --git a/patches/x86-mm-pat-disable-preemption-__split_large_page-aft.patch b/patches/x86-mm-pat-disable-preemption-__split_large_page-aft.patch
new file mode 100644
index 000000000000..ec5c4253353e
--- /dev/null
+++ b/patches/x86-mm-pat-disable-preemption-__split_large_page-aft.patch
@@ -0,0 +1,54 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 11 Dec 2018 21:53:43 +0100
+Subject: [PATCH] x86/mm/pat: disable preemption __split_large_page() after
+ spin_lock()
+
+Commit "x86/mm/pat: Disable preemption around __flush_tlb_all()" added a
+warning if __flush_tlb_all() is invoked in preemptible context. On !RT
+the warning does not trigger because a spin lock is acquired which
+disables preemption. On RT the spin lock does not disable preemption and
+so the warning is seen.
+
+Disable preemption to avoid the warning __flush_tlb_all().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/mm/pageattr.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/x86/mm/pageattr.c
++++ b/arch/x86/mm/pageattr.c
+@@ -688,11 +688,17 @@ static int
+
+ spin_lock(&pgd_lock);
+ /*
++ * Keep preemption disabled after __flush_tlb_all() which expects not be
++ * preempted during the flush of the local TLB.
++ */
++ preempt_disable();
++ /*
+ * Check for races, another CPU might have split this page
+ * up for us already:
+ */
+ tmp = _lookup_address_cpa(cpa, address, &level);
+ if (tmp != kpte) {
++ preempt_enable();
+ spin_unlock(&pgd_lock);
+ return 1;
+ }
+@@ -726,6 +732,7 @@ static int
+ break;
+
+ default:
++ preempt_enable();
+ spin_unlock(&pgd_lock);
+ return 1;
+ }
+@@ -764,6 +771,7 @@ static int
+ * going on.
+ */
+ __flush_tlb_all();
++ preempt_enable();
+ spin_unlock(&pgd_lock);
+
+ return 0;