summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-02-21 21:52:09 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-02-21 21:52:09 +0100
commit1f68dffd808d13372f25ef8a1f4cd930e1a6663f (patch)
tree94da616748dd648616a009c96ae2147e9e5439cd
parent3ae89b7e239f564438f52b15cac73e52377c32d7 (diff)
downloadlinux-rt-1f68dffd808d13372f25ef8a1f4cd930e1a6663f.tar.gz
[ANNOUNCE] v4.19.23-rt14v4.19.23-rt14-patches
Dear RT folks! I'm pleased to announce the v4.19.23-rt14 patch set. Changes since v4.19.23-rt13: - Use the specified preempt mask in should_resched() on x86. Otherwise a scheduling opportunity of non RT tasks could be missed. - Preserve the task state in cpu_chill() - Add two more cases to consider before warning about pending softirqs. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. The delta patch against v4.19.23-rt13 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/incr/patch-4.19.23-rt13-rt14.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.19.23-rt14 The RT patch against v4.19.23 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patch-4.19.23-rt14.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19.23-rt14.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch14
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch20
-rw-r--r--patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch66
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/cpu-hotplug--Implement-CPU-pinning.patch6
-rw-r--r--patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch6
-rw-r--r--patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch6
-rw-r--r--patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch8
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch4
-rw-r--r--patches/futex-requeue-pi-fix.patch2
-rw-r--r--patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch6
-rw-r--r--patches/hrtimer-Don-t-lose-state-in-cpu_chill.patch41
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch2
-rw-r--r--patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch14
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch20
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch4
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch4
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch34
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch6
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch4
-rw-r--r--patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch2
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch4
-rw-r--r--patches/preempt-lazy-support.patch6
-rw-r--r--patches/random-make-it-work-on-rt.patch4
-rw-r--r--patches/rtmutex-add-sleeping-lock-implementation.patch6
-rw-r--r--patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch4
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch10
-rw-r--r--patches/series4
-rw-r--r--patches/signal-revert-ptrace-preempt-magic.patch2
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/skbufhead-raw-lock.patch6
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/softirq-Avoid-local_softirq_pending-messages-if-ksof.patch105
-rw-r--r--patches/softirq-Avoid-local_softirq_pending-messages-if-task.patch34
-rw-r--r--patches/softirq-preempt-fix-3-re.patch2
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-lazy-preempt-properly-check-against-preempt-mask.patch25
42 files changed, 357 insertions, 148 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index acf6c398014a..f0996b587219 100644
--- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1067,7 +1067,7 @@ static inline void prefetch_buddy(struct
+@@ -1095,7 +1095,7 @@ static inline void prefetch_buddy(struct
}
/*
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1078,14 +1078,41 @@ static inline void prefetch_buddy(struct
+@@ -1106,14 +1106,41 @@ static inline void prefetch_buddy(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (count) {
struct list_head *list;
-@@ -1117,7 +1144,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1145,7 +1172,7 @@ static void free_pcppages_bulk(struct zo
if (bulkfree_pcp_prepare(page))
continue;
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are going to put the page back to the global
-@@ -1132,26 +1159,6 @@ static void free_pcppages_bulk(struct zo
+@@ -1160,26 +1187,6 @@ static void free_pcppages_bulk(struct zo
prefetch_buddy(page);
} while (--count && --batch_free && !list_empty(list));
}
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2515,13 +2522,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2536,13 +2543,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2537,14 +2549,21 @@ static void drain_pages_zone(unsigned in
+@@ -2558,14 +2570,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2766,7 +2785,10 @@ static void free_unref_page_commit(struc
+@@ -2787,7 +2806,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 1d584c3feb26..e9b94d119493 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1077,8 +1077,8 @@ static inline void prefetch_buddy(struct
+@@ -1105,8 +1105,8 @@ static inline void prefetch_buddy(struct
* And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic.
*/
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
bool isolated_pageblocks;
struct page *page, *tmp;
-@@ -1093,12 +1093,27 @@ static void free_pcppages_bulk(struct zo
+@@ -1121,12 +1121,27 @@ static void free_pcppages_bulk(struct zo
*/
list_for_each_entry_safe(page, tmp, head, lru) {
int mt = get_pcppage_migratetype(page);
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2533,7 +2548,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2554,7 +2569,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2563,7 +2578,7 @@ static void drain_pages_zone(unsigned in
+@@ -2584,7 +2599,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2756,7 +2771,8 @@ static bool free_unref_page_prepare(stru
+@@ -2777,7 +2792,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -2785,10 +2801,8 @@ static void free_unref_page_commit(struc
+@@ -2806,10 +2822,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2799,13 +2813,17 @@ void free_unref_page(struct page *page)
+@@ -2820,13 +2834,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2816,6 +2834,11 @@ void free_unref_page_list(struct list_he
+@@ -2837,6 +2855,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -2828,10 +2851,12 @@ void free_unref_page_list(struct list_he
+@@ -2849,10 +2872,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -2844,6 +2869,21 @@ void free_unref_page_list(struct list_he
+@@ -2865,6 +2890,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index ffbe53703a21..f0bc5d128c73 100644
--- a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -728,7 +728,7 @@ static void __drain_alien_cache(struct k
+@@ -730,7 +730,7 @@ static void __drain_alien_cache(struct k
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
-@@ -739,7 +739,7 @@ static void __drain_alien_cache(struct k
+@@ -741,7 +741,7 @@ static void __drain_alien_cache(struct k
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -812,9 +812,9 @@ static int __cache_free_alien(struct kme
+@@ -814,9 +814,9 @@ static int __cache_free_alien(struct kme
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, page_node);
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
return 1;
-@@ -855,10 +855,10 @@ static int init_cache_node(struct kmem_c
+@@ -857,10 +857,10 @@ static int init_cache_node(struct kmem_c
*/
n = get_node(cachep, node);
if (n) {
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -937,7 +937,7 @@ static int setup_kmem_cache_node(struct
+@@ -939,7 +939,7 @@ static int setup_kmem_cache_node(struct
goto fail;
n = get_node(cachep, node);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
-@@ -955,7 +955,7 @@ static int setup_kmem_cache_node(struct
+@@ -957,7 +957,7 @@ static int setup_kmem_cache_node(struct
new_alien = NULL;
}
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
/*
-@@ -994,7 +994,7 @@ static void cpuup_canceled(long cpu)
+@@ -996,7 +996,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
-@@ -1007,7 +1007,7 @@ static void cpuup_canceled(long cpu)
+@@ -1009,7 +1009,7 @@ static void cpuup_canceled(long cpu)
}
if (!cpumask_empty(mask)) {
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto free_slab;
}
-@@ -1021,7 +1021,7 @@ static void cpuup_canceled(long cpu)
+@@ -1023,7 +1023,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kfree(shared);
if (alien) {
-@@ -1205,7 +1205,7 @@ static void __init init_list(struct kmem
+@@ -1207,7 +1207,7 @@ static void __init init_list(struct kmem
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
-@@ -1376,11 +1376,11 @@ slab_out_of_memory(struct kmem_cache *ca
+@@ -1378,11 +1378,11 @@ slab_out_of_memory(struct kmem_cache *ca
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
-@@ -2173,7 +2173,7 @@ static void check_spinlock_acquired(stru
+@@ -2175,7 +2175,7 @@ static void check_spinlock_acquired(stru
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2181,7 +2181,7 @@ static void check_spinlock_acquired_node
+@@ -2183,7 +2183,7 @@ static void check_spinlock_acquired_node
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2221,9 +2221,9 @@ static void do_drain(void *arg)
+@@ -2223,9 +2223,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail = 0;
}
-@@ -2241,9 +2241,9 @@ static void drain_cpu_caches(struct kmem
+@@ -2243,9 +2243,9 @@ static void drain_cpu_caches(struct kmem
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -2265,10 +2265,10 @@ static int drain_freelist(struct kmem_ca
+@@ -2267,10 +2267,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
-@@ -2281,7 +2281,7 @@ static int drain_freelist(struct kmem_ca
+@@ -2283,7 +2283,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
n->free_objects -= cache->num;
@@ -214,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_destroy(cache, page);
nr_freed++;
}
-@@ -2729,7 +2729,7 @@ static void cache_grow_end(struct kmem_c
+@@ -2731,7 +2731,7 @@ static void cache_grow_end(struct kmem_c
INIT_LIST_HEAD(&page->lru);
n = get_node(cachep, page_to_nid(page));
@@ -223,7 +223,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
n->total_slabs++;
if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free));
-@@ -2739,7 +2739,7 @@ static void cache_grow_end(struct kmem_c
+@@ -2741,7 +2741,7 @@ static void cache_grow_end(struct kmem_c
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
}
-@@ -2907,7 +2907,7 @@ static struct page *get_first_slab(struc
+@@ -2909,7 +2909,7 @@ static struct page *get_first_slab(struc
{
struct page *page;
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
if (!page) {
n->free_touched = 1;
-@@ -2933,10 +2933,10 @@ static noinline void *cache_alloc_pfmema
+@@ -2935,10 +2935,10 @@ static noinline void *cache_alloc_pfmema
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
@@ -254,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -2945,7 +2945,7 @@ static noinline void *cache_alloc_pfmema
+@@ -2947,7 +2947,7 @@ static noinline void *cache_alloc_pfmema
fixup_slab_list(cachep, n, page, &list);
@@ -263,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
return obj;
-@@ -3004,7 +3004,7 @@ static void *cache_alloc_refill(struct k
+@@ -3006,7 +3006,7 @@ static void *cache_alloc_refill(struct k
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
@@ -272,7 +272,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
-@@ -3028,7 +3028,7 @@ static void *cache_alloc_refill(struct k
+@@ -3030,7 +3030,7 @@ static void *cache_alloc_refill(struct k
must_grow:
n->free_objects -= ac->avail;
alloc_done:
@@ -281,7 +281,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
direct_grow:
-@@ -3253,7 +3253,7 @@ static void *____cache_alloc_node(struct
+@@ -3255,7 +3255,7 @@ static void *____cache_alloc_node(struct
BUG_ON(!n);
check_irq_off();
@@ -290,7 +290,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = get_first_slab(n, false);
if (!page)
goto must_grow;
-@@ -3271,12 +3271,12 @@ static void *____cache_alloc_node(struct
+@@ -3273,12 +3273,12 @@ static void *____cache_alloc_node(struct
fixup_slab_list(cachep, n, page, &list);
@@ -305,7 +305,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (page) {
/* This slab isn't counted yet so don't update free_objects */
-@@ -3452,7 +3452,7 @@ static void cache_flusharray(struct kmem
+@@ -3454,7 +3454,7 @@ static void cache_flusharray(struct kmem
check_irq_off();
n = get_node(cachep, node);
@@ -314,7 +314,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
-@@ -3481,7 +3481,7 @@ static void cache_flusharray(struct kmem
+@@ -3483,7 +3483,7 @@ static void cache_flusharray(struct kmem
STATS_SET_FREEABLE(cachep, i);
}
#endif
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-@@ -3891,9 +3891,9 @@ static int __do_tune_cpucache(struct kme
+@@ -3893,9 +3893,9 @@ static int __do_tune_cpucache(struct kme
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
@@ -335,7 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
free_percpu(prev);
-@@ -4018,9 +4018,9 @@ static void drain_array(struct kmem_cach
+@@ -4020,9 +4020,9 @@ static void drain_array(struct kmem_cach
return;
}
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -4104,7 +4104,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4106,7 +4106,7 @@ void get_slabinfo(struct kmem_cache *cac
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -356,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
-@@ -4113,7 +4113,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4115,7 +4115,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
@@ -365,7 +365,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
-@@ -4328,13 +4328,13 @@ static int leaks_show(struct seq_file *m
+@@ -4330,13 +4330,13 @@ static int leaks_show(struct seq_file *m
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index 818944745bec..5a7896a1f7e3 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
-@@ -700,7 +700,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -699,7 +699,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -748,7 +748,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
local_irq_enable();
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -827,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -826,7 +826,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
/* Exit types that need handling before we can be preempted */
handle_exit_early(vcpu, run, ret);
diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch
index 739a6920e4b4..2841608b7ef4 100644
--- a/patches/cpu-hotplug--Implement-CPU-pinning.patch
+++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -853,6 +886,7 @@ static int take_cpu_down(void *_param)
+@@ -828,6 +861,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -865,11 +899,14 @@ static int takedown_cpu(unsigned int cpu
+@@ -840,11 +874,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -888,6 +925,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -863,6 +900,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index e5617a809cb6..e6ea828d37ab 100644
--- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -1016,6 +1016,7 @@ static int __zram_bvec_read(struct zram
+@@ -1026,6 +1026,7 @@ static int __zram_bvec_read(struct zram
unsigned long handle;
unsigned int size;
void *src, *dst;
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (zram_wb_enabled(zram)) {
zram_slot_lock(zram, index);
-@@ -1050,6 +1051,7 @@ static int __zram_bvec_read(struct zram
+@@ -1060,6 +1061,7 @@ static int __zram_bvec_read(struct zram
size = zram_get_obj_size(zram, index);
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -1057,14 +1059,13 @@ static int __zram_bvec_read(struct zram
+@@ -1067,14 +1069,13 @@ static int __zram_bvec_read(struct zram
kunmap_atomic(dst);
ret = 0;
} else {
diff --git a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
index 9dcb00b80e8d..6c2589977175 100644
--- a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
+++ b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -3062,6 +3062,8 @@ static int __init set_dhash_entries(char
+@@ -3058,6 +3058,8 @@ static int __init set_dhash_entries(char
static void __init dcache_init_early(void)
{
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* If hashes are distributed across NUMA nodes, defer
* hash allocation until vmalloc space is available.
*/
-@@ -3078,11 +3080,16 @@ static void __init dcache_init_early(voi
+@@ -3074,11 +3076,16 @@ static void __init dcache_init_early(voi
NULL,
0,
0);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* A constructor could be added for stable state like the lists,
* but it is probably not worth it because of the cache nature
-@@ -3106,6 +3113,10 @@ static void __init dcache_init(void)
+@@ -3102,6 +3109,10 @@ static void __init dcache_init(void)
NULL,
0,
0);
diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index 8835c69049d0..063fe84a6824 100644
--- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash);
+@@ -2400,9 +2400,10 @@ EXPORT_SYMBOL(d_rehash);
static inline unsigned start_dir_add(struct inode *dir)
{
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return n;
cpu_relax();
}
-@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(str
+@@ -2410,7 +2411,8 @@ static inline unsigned start_dir_add(str
static inline void end_dir_add(struct inode *dir, unsigned n)
{
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void d_wait_lookup(struct dentry *dentry)
-@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct d
+@@ -2443,7 +2445,7 @@ struct dentry *d_alloc_parallel(struct d
retry:
rcu_read_lock();
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
r_seq = read_seqbegin(&rename_lock);
dentry = __d_lookup_rcu(parent, name, &d_seq);
if (unlikely(dentry)) {
-@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct d
+@@ -2471,7 +2473,7 @@ struct dentry *d_alloc_parallel(struct d
}
hlist_bl_lock(b);
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 41879c4d5c38..c056af3ea935 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct in
+@@ -2417,21 +2417,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2546,7 +2549,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch
index 42b8eefd7a1b..7522ec9ce204 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/futex-requeue-pi-fix.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1763,6 +1764,34 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1784,6 +1785,34 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
index fb642b9c8b46..fa9c3485b5a0 100644
--- a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
+++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -886,7 +890,9 @@ static int take_cpu_down(void *_param)
+@@ -861,7 +865,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -899,14 +905,18 @@ static int takedown_cpu(unsigned int cpu
+@@ -874,14 +880,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -925,7 +935,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -900,7 +910,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/hrtimer-Don-t-lose-state-in-cpu_chill.patch b/patches/hrtimer-Don-t-lose-state-in-cpu_chill.patch
new file mode 100644
index 000000000000..482fe7452930
--- /dev/null
+++ b/patches/hrtimer-Don-t-lose-state-in-cpu_chill.patch
@@ -0,0 +1,41 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 19 Feb 2019 16:59:15 +0100
+Subject: [PATCH] hrtimer: Don't lose state in cpu_chill()
+
+In cpu_chill() the state is set to TASK_UNINTERRUPTIBLE and a timer is
+programmed. On return the state is always TASK_RUNNING which means we
+lose the state if it was something other than RUNNING. Also
+set_current_state() sets ->task_state_change to within cpu_chill() which
+is not expected.
+
+Save the task state on entry and restore it on return. Simply set the
+state in order to avoid updating ->task_state_change.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1902,15 +1902,18 @@ void cpu_chill(void)
+ {
+ ktime_t chill_time;
+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
++ long saved_state;
+
++ saved_state = current->state;
+ chill_time = ktime_set(0, NSEC_PER_MSEC);
+- set_current_state(TASK_UNINTERRUPTIBLE);
++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE);
+ current->flags |= PF_NOFREEZE;
+ sleeping_lock_inc();
+ schedule_hrtimeout(&chill_time, HRTIMER_MODE_REL_HARD);
+ sleeping_lock_dec();
+ if (!freeze_flag)
+ current->flags &= ~PF_NOFREEZE;
++ __set_current_state_no_track(saved_state);
+ }
+ EXPORT_SYMBOL(cpu_chill);
+ #endif
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 4aa3e759d010..54a6a10023e0 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -142,7 +142,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
hrtimer_set_expires(&to->timer, *time);
}
-@@ -3201,10 +3199,9 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3209,10 +3207,9 @@ static int futex_wait_requeue_pi(u32 __u
if (abs_time) {
to = &timeout;
diff --git a/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
index 216cb2763cd1..3041a5b8c222 100644
--- a/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
+++ b/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
-@@ -173,6 +173,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
+@@ -179,6 +179,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock);
static DEFINE_IDA(its_vpeid_ida);
#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
-@@ -1622,7 +1623,7 @@ static void its_free_prop_table(struct p
+@@ -1628,7 +1629,7 @@ static void its_free_prop_table(struct p
get_order(LPI_PROPBASE_SZ));
}
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
phys_addr_t paddr;
-@@ -1945,30 +1946,47 @@ static void its_free_pending_table(struc
+@@ -1951,30 +1952,47 @@ static void its_free_pending_table(struc
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
}
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* set PROPBASE */
val = (page_to_phys(gic_rdists->prop_page) |
GICR_PROPBASER_InnerShareable |
-@@ -2020,6 +2038,10 @@ static void its_cpu_init_lpis(void)
+@@ -2026,6 +2044,10 @@ static void its_cpu_init_lpis(void)
/* Make sure the GIC has seen the above */
dsb(sy);
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void its_cpu_init_collection(struct its_node *its)
-@@ -3498,16 +3520,6 @@ static int redist_disable_lpis(void)
+@@ -3521,16 +3543,6 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
-@@ -3517,7 +3529,18 @@ static int redist_disable_lpis(void)
+@@ -3540,7 +3552,18 @@ static int redist_disable_lpis(void)
if (!(val & GICR_CTLR_ENABLE_LPIS))
return 0;
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
smp_processor_id());
add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
-@@ -3773,7 +3796,8 @@ int __init its_init(struct fwnode_handle
+@@ -3796,7 +3819,8 @@ int __init its_init(struct fwnode_handle
}
gic_rdists = rdists;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index c93f50ee9eec..18460630db7d 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -580,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return prev_cpu;
/*
-@@ -6000,7 +6000,7 @@ static int select_idle_core(struct task_
+@@ -6001,7 +6001,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -589,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -6034,7 +6034,7 @@ static int select_idle_smt(struct task_s
+@@ -6035,7 +6035,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -598,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
return cpu;
-@@ -6097,7 +6097,7 @@ static int select_idle_cpu(struct task_s
+@@ -6098,7 +6098,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -607,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (available_idle_cpu(cpu))
break;
-@@ -6134,7 +6134,7 @@ static int select_idle_sibling(struct ta
+@@ -6135,7 +6135,7 @@ static int select_idle_sibling(struct ta
recent_used_cpu != target &&
cpus_share_cache(recent_used_cpu, target) &&
available_idle_cpu(recent_used_cpu) &&
@@ -616,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Replace recent_used_cpu with prev as it is a potential
* candidate for the next wake:
-@@ -6352,7 +6352,7 @@ select_task_rq_fair(struct task_struct *
+@@ -6353,7 +6353,7 @@ select_task_rq_fair(struct task_struct *
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -625,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -7091,14 +7091,14 @@ int can_migrate_task(struct task_struct
+@@ -7092,14 +7092,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -642,7 +642,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -7118,7 +7118,7 @@ int can_migrate_task(struct task_struct
+@@ -7119,7 +7119,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's CPUs: */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -651,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7715,7 +7715,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -7716,7 +7716,7 @@ check_cpu_capacity(struct rq *rq, struct
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -660,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a
* cpumask covering 1 CPU of the first group and 3 CPUs of the second group.
-@@ -8330,7 +8330,7 @@ static struct sched_group *find_busiest_
+@@ -8331,7 +8331,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -669,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8726,7 +8726,7 @@ static int load_balance(int this_cpu, st
+@@ -8727,7 +8727,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest CPU can't be
* moved to this_cpu:
*/
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 25e5fadbaae8..e1f3b8d87864 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt13
++-rt14
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index 365dbb0e5d9f..81991cdfa918 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6799,6 +6801,7 @@ static int raid456_cpu_up_prepare(unsign
+@@ -6803,6 +6805,7 @@ static int raid456_cpu_up_prepare(unsign
__func__, cpu);
return -ENOMEM;
}
@@ -49,7 +49,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
return 0;
}
-@@ -6809,7 +6812,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6813,7 +6816,6 @@ static int raid5_alloc_percpu(struct r5c
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index b1ee61377334..e9c0bedfa891 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2514,7 +2514,7 @@ config MIPS_CRC_SUPPORT
+@@ -2517,7 +2517,7 @@ config MIPS_CRC_SUPPORT
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index a5d3f5929da6..5e8468f94179 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <asm/page.h>
struct notifier_block;
-@@ -322,6 +323,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -331,6 +332,7 @@ extern unsigned long nr_free_pagecache_p
/* linux/mm/swap.c */
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -7196,8 +7196,9 @@ void __init free_area_init(unsigned long
+@@ -7205,8 +7205,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index 41dc6c2737e2..b15a7471a571 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1633,6 +1633,7 @@ choice
+@@ -1634,6 +1634,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1653,6 +1654,7 @@ config SLUB
+@@ -1654,6 +1655,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 91bf513c25f3..612dafd9e58e 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1268,10 +1281,10 @@ static void __free_pages_ok(struct page
+@@ -1296,10 +1309,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2539,13 +2552,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2560,13 +2573,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2567,7 +2580,7 @@ static void drain_pages_zone(unsigned in
+@@ -2588,7 +2601,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2575,7 +2588,7 @@ static void drain_pages_zone(unsigned in
+@@ -2596,7 +2609,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -2613,6 +2626,7 @@ void drain_local_pages(struct zone *zone
+@@ -2634,6 +2647,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
/*
-@@ -2626,6 +2640,7 @@ static void drain_local_pages_wq(struct
+@@ -2647,6 +2661,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(NULL);
preempt_enable();
}
@@ -107,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2692,7 +2707,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2713,7 +2728,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
-@@ -2700,6 +2722,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2721,6 +2743,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2819,9 +2842,9 @@ void free_unref_page(struct page *page)
+@@ -2840,9 +2863,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -2848,7 +2871,7 @@ void free_unref_page_list(struct list_he
+@@ -2869,7 +2892,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -2863,12 +2886,12 @@ void free_unref_page_list(struct list_he
+@@ -2884,12 +2907,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3017,7 +3040,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3038,7 +3061,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, pcp, list);
-@@ -3025,7 +3048,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3046,7 +3069,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@@ -186,7 +186,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3052,7 +3075,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3073,7 +3096,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3072,14 +3095,14 @@ struct page *rmqueue(struct zone *prefer
+@@ -3093,14 +3116,14 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8085,7 +8108,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8094,7 +8117,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8094,7 +8117,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8103,7 +8126,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 02dcfb6b89d9..4198f839956e 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Time (in jiffies) of last Tx
*/
-@@ -2605,14 +2609,53 @@ void netdev_freemem(struct net_device *d
+@@ -2608,14 +2612,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *dev_get_by_index(struct net *net, int ifindex);
struct net_device *__dev_get_by_index(struct net *net, int ifindex);
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
-@@ -3788,10 +3831,48 @@ static inline u32 netif_msg_init(int deb
+@@ -3791,10 +3834,48 @@ static inline u32 netif_msg_init(int deb
return (1 << debug_value) - 1;
}
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline bool __netif_tx_acquire(struct netdev_queue *txq)
-@@ -3808,32 +3889,32 @@ static inline void __netif_tx_release(st
+@@ -3811,32 +3892,32 @@ static inline void __netif_tx_release(st
static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
{
spin_lock_bh(&txq->_xmit_lock);
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 6d187c3444c3..2a3057c424e0 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void raise_softirq(unsigned int nr);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -694,6 +694,27 @@ void __raise_softirq_irqoff(unsigned int
+@@ -722,6 +722,27 @@ void __raise_softirq_irqoff(unsigned int
}
/*
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 4b7f07e0bc87..60ee65ebdbb3 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned int sas_ss_flags;
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1226,8 +1226,8 @@ int do_send_sig_info(int sig, struct sig
+@@ -1268,8 +1268,8 @@ int do_send_sig_info(int sig, struct sig
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1256,6 +1256,39 @@ force_sig_info(int sig, struct siginfo *
+@@ -1298,6 +1298,39 @@ force_sig_info(int sig, struct siginfo *
return ret;
}
diff --git a/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch b/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
index 15db55c5590f..006169578a09 100644
--- a/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
+++ b/patches/pci-switchtec-Don-t-use-completion-s-wait-queue.patch
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret |= EPOLLIN | EPOLLRDNORM;
if (stuser->event_cnt != atomic_read(&stdev->event_cnt))
-@@ -1038,7 +1041,8 @@ static void stdev_kill(struct switchtec_
+@@ -1041,7 +1044,8 @@ static void stdev_kill(struct switchtec_
/* Wake up and kill any users waiting on an MRPC request */
list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) {
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 05e6dc552631..a516023d76eb 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "posix-timers.h"
-@@ -1135,14 +1138,12 @@ static inline int fastpath_timer_check(s
+@@ -1136,14 +1139,12 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
* group timers. If that's so, just return.
-@@ -1195,6 +1196,153 @@ void run_posix_cpu_timers(struct task_st
+@@ -1196,6 +1197,153 @@ void run_posix_cpu_timers(struct task_st
}
}
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 40fdb38c871c..a8353ec2fc93 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -450,7 +450,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6613,7 +6613,7 @@ static void check_preempt_wakeup(struct
+@@ -6614,7 +6614,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -459,7 +459,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -9700,7 +9700,7 @@ static void task_fork_fair(struct task_s
+@@ -9701,7 +9701,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -468,7 +468,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9724,7 +9724,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9725,7 +9725,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 68badf0da2d1..5ccf43fdda91 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int hv_ce_set_next_event(unsigned long delta,
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
-@@ -1011,6 +1011,8 @@ static void vmbus_isr(void)
+@@ -1042,6 +1042,8 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool handled = false;
if (unlikely(page_addr == NULL))
-@@ -1054,7 +1056,7 @@ static void vmbus_isr(void)
+@@ -1085,7 +1087,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch
index b2930643f896..915dc6e9964d 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/rtmutex-add-sleeping-lock-implementation.patch
@@ -425,7 +425,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3222,7 +3223,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3230,7 +3231,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -1041,7 +1041,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1923,6 +2268,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1949,6 +2294,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -1049,7 +1049,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-@@ -1934,6 +2280,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1960,6 +2306,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
diff --git a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index 382dd176710a..f06eb9031061 100644
--- a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -294,7 +294,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
-@@ -2274,7 +2423,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2300,7 +2449,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2358,3 +2507,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2385,3 +2534,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 9dd192089049..7a68754de759 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -3183,7 +3193,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3191,7 +3201,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3241,20 +3251,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3249,20 +3259,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3263,7 +3308,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3271,7 +3316,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3274,7 +3320,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3282,7 +3328,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3288,7 +3334,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3296,7 +3342,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
diff --git a/patches/series b/patches/series
index dab78135f40a..38e3fc22b34f 100644
--- a/patches/series
+++ b/patches/series
@@ -223,6 +223,8 @@ softirq-split-locks.patch
net-core-use-local_bh_disable-in-netif_rx_ni.patch
irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
softirq-split-timer-softirqs-out-of-ksoftirqd.patch
+softirq-Avoid-local_softirq_pending-messages-if-ksof.patch
+softirq-Avoid-local_softirq_pending-messages-if-task.patch
rtmutex-trylock-is-okay-on-RT.patch
# compile fix due to rtmutex locks
@@ -298,6 +300,7 @@ md-raid5-percpu-handling-rt-aware.patch
# CPU CHILL
rt-introduce-cpu-chill.patch
+hrtimer-Don-t-lose-state-in-cpu_chill.patch
# block
block-blk-mq-move-blk_queue_usage_counter_release-in.patch
@@ -395,6 +398,7 @@ lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
preempt-lazy-support.patch
ftrace-Fix-trace-header-alignment.patch
x86-preempt-lazy.patch
+x86-lazy-preempt-properly-check-against-preempt-mask.patch
arm-preempt-lazy-support.patch
powerpc-preempt-lazy-support.patch
arch-arm64-Add-lazy-preempt-support.patch
diff --git a/patches/signal-revert-ptrace-preempt-magic.patch b/patches/signal-revert-ptrace-preempt-magic.patch
index 2dd895a1c0b0..ea0adee2f75e 100644
--- a/patches/signal-revert-ptrace-preempt-magic.patch
+++ b/patches/signal-revert-ptrace-preempt-magic.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -2052,15 +2052,7 @@ static void ptrace_stop(int exit_code, i
+@@ -2094,15 +2094,7 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 35422669f1b7..dd828e3c6ce7 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1705,7 +1765,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1747,7 +1807,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index ffbaa0f6277d..6b4332cc2314 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2967,6 +2967,7 @@ struct softnet_data {
+@@ -2970,6 +2970,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -9304,10 +9317,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -9307,10 +9320,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -9616,8 +9632,9 @@ static int __init net_dev_init(void)
+@@ -9619,8 +9635,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 60248ae03d5c..998b4533a8d2 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1697,7 +1697,7 @@ config SLAB_FREELIST_HARDENED
+@@ -1698,7 +1698,7 @@ config SLAB_FREELIST_HARDENED
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/softirq-Avoid-local_softirq_pending-messages-if-ksof.patch b/patches/softirq-Avoid-local_softirq_pending-messages-if-ksof.patch
new file mode 100644
index 000000000000..81bf7516549f
--- /dev/null
+++ b/patches/softirq-Avoid-local_softirq_pending-messages-if-ksof.patch
@@ -0,0 +1,105 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 18 Feb 2019 13:19:59 +0100
+Subject: [PATCH] softirq: Avoid "local_softirq_pending" messages if
+ ksoftirqd is blocked
+
+If the ksoftirqd thread has a softirq pending and is blocked on the
+`local_softirq_locks' lock then softirq_check_pending_idle() won't
+complain because the "lock owner" will mask away this softirq from the
+mask of pending softirqs.
+If ksoftirqd has an additional softirq pending then it won't be masked
+out because we never look at ksoftirqd's mask.
+
+If there are still pending softirqs while going to idle check
+ksoftirqd's and ktimersfotd's mask before complaining about unhandled
+softirqs.
+
+Cc: stable-rt@vger.kernel.org
+Tested-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 57 +++++++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 41 insertions(+), 16 deletions(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -92,6 +92,31 @@ static inline void softirq_clr_runner(un
+ sr->runner[sirq] = NULL;
+ }
+
++static bool softirq_check_runner_tsk(struct task_struct *tsk,
++ unsigned int *pending)
++{
++ bool ret = false;
++
++ if (!tsk)
++ return ret;
++
++ /*
++ * The wakeup code in rtmutex.c wakes up the task
++ * _before_ it sets pi_blocked_on to NULL under
++ * tsk->pi_lock. So we need to check for both: state
++ * and pi_blocked_on.
++ */
++ raw_spin_lock(&tsk->pi_lock);
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
++ /* Clear all bits pending in that task */
++ *pending &= ~(tsk->softirqs_raised);
++ ret = true;
++ }
++ raw_spin_unlock(&tsk->pi_lock);
++
++ return ret;
++}
++
+ /*
+ * On preempt-rt a softirq running context might be blocked on a
+ * lock. There might be no other runnable task on this CPU because the
+@@ -104,6 +129,7 @@ static inline void softirq_clr_runner(un
+ */
+ void softirq_check_pending_idle(void)
+ {
++ struct task_struct *tsk;
+ static int rate_limit;
+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners);
+ u32 warnpending;
+@@ -113,24 +139,23 @@ void softirq_check_pending_idle(void)
+ return;
+
+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK;
++ if (!warnpending)
++ return;
+ for (i = 0; i < NR_SOFTIRQS; i++) {
+- struct task_struct *tsk = sr->runner[i];
++ tsk = sr->runner[i];
++
++ if (softirq_check_runner_tsk(tsk, &warnpending))
++ warnpending &= ~(1 << i);
++ }
+
+- /*
+- * The wakeup code in rtmutex.c wakes up the task
+- * _before_ it sets pi_blocked_on to NULL under
+- * tsk->pi_lock. So we need to check for both: state
+- * and pi_blocked_on.
+- */
+- if (tsk) {
+- raw_spin_lock(&tsk->pi_lock);
+- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
+- /* Clear all bits pending in that task */
+- warnpending &= ~(tsk->softirqs_raised);
+- warnpending &= ~(1 << i);
+- }
+- raw_spin_unlock(&tsk->pi_lock);
+- }
++ if (warnpending) {
++ tsk = __this_cpu_read(ksoftirqd);
++ softirq_check_runner_tsk(tsk, &warnpending);
++ }
++
++ if (warnpending) {
++ tsk = __this_cpu_read(ktimer_softirqd);
++ softirq_check_runner_tsk(tsk, &warnpending);
+ }
+
+ if (warnpending) {
diff --git a/patches/softirq-Avoid-local_softirq_pending-messages-if-task.patch b/patches/softirq-Avoid-local_softirq_pending-messages-if-task.patch
new file mode 100644
index 000000000000..2dc1897825d7
--- /dev/null
+++ b/patches/softirq-Avoid-local_softirq_pending-messages-if-task.patch
@@ -0,0 +1,34 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 19 Feb 2019 16:49:29 +0100
+Subject: [PATCH] softirq: Avoid "local_softirq_pending" messages if task
+ is in cpu_chill()
+
+If the softirq thread enters cpu_chill() then ->state is UNINTERRUPTIBLE
+and has no ->pi_blocked_on set and so its mask is not taken into account.
+
+->sleeping_lock is increased by cpu_chill() since it is also requried to
+avoid a splat by RCU in case cpu_chill() is used while a RCU-read lock
+is held. Use the same mechanism for the softirq-pending check.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/softirq.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -105,9 +105,12 @@ static bool softirq_check_runner_tsk(str
+ * _before_ it sets pi_blocked_on to NULL under
+ * tsk->pi_lock. So we need to check for both: state
+ * and pi_blocked_on.
++ * The test against UNINTERRUPTIBLE + ->sleeping_lock is in case the
++ * task does cpu_chill().
+ */
+ raw_spin_lock(&tsk->pi_lock);
+- if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) {
++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING ||
++ (tsk->state == TASK_UNINTERRUPTIBLE && tsk->sleeping_lock)) {
+ /* Clear all bits pending in that task */
+ *pending &= ~(tsk->softirqs_raised);
+ ret = true;
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 01b8b21eac54..c860215d5dfb 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -9286,6 +9292,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -9289,6 +9295,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index 6ab104ae7598..f03f9d045f5b 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2415,8 +2415,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2417,8 +2417,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 5ec302edb421..b04b7841c864 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6692,6 +6692,13 @@ int kvm_arch_init(void *opaque)
+@@ -6698,6 +6698,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-lazy-preempt-properly-check-against-preempt-mask.patch b/patches/x86-lazy-preempt-properly-check-against-preempt-mask.patch
new file mode 100644
index 000000000000..0151e39436fd
--- /dev/null
+++ b/patches/x86-lazy-preempt-properly-check-against-preempt-mask.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 18 Feb 2019 16:57:09 +0100
+Subject: [PATCH] x86: lazy-preempt: properly check against preempt-mask
+
+should_resched() should check against preempt_offset after unmasking the
+need-resched-bit. Otherwise should_resched() won't work for
+preempt_offset != 0 and lazy-preempt set.
+
+Cc: stable-rt@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/preempt.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/include/asm/preempt.h
++++ b/arch/x86/include/asm/preempt.h
+@@ -118,7 +118,7 @@ static __always_inline bool should_resch
+
+ /* preempt count == 0 ? */
+ tmp &= ~PREEMPT_NEED_RESCHED;
+- if (tmp)
++ if (tmp != preempt_offset)
+ return false;
+ if (current_thread_info()->preempt_lazy_count)
+ return false;