diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-10-29 18:16:51 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-10-29 18:16:51 +0100 |
commit | 8393e03324b68d4cce593decf54e2cfd301d69a2 (patch) | |
tree | 0ad92bf48ac301a6b06881c259773ef6f5a49aee | |
parent | d70bef33e60c3b5d6272343bd7aa4276ea13509e (diff) | |
download | linux-rt-8393e03324b68d4cce593decf54e2cfd301d69a2.tar.gz |
[ANNOUNCE] v4.19-rt1v4.19-rt1-patches
Dear RT folks!
I'm pleased to announce the v4.19-rt1 patch set.
Changes since v4.18.16-rt9:
- rebase to v4.19
Known issues
- A warning triggered in "rcu_note_context_switch" originated from
SyS_timer_gettime(). The issue was always there, it is now
visible. Reported by Grygorii Strashko and Daniel Wagner.
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.19-rt1
The RT patch against v4.19 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patch-4.19-rt1.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.19/older/patches-4.19-rt1.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
157 files changed, 825 insertions, 2386 deletions
diff --git a/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch b/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch deleted file mode 100644 index 637875992255..000000000000 --- a/patches/0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch +++ /dev/null @@ -1,102 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Mon, 7 May 2018 16:51:09 +0200 -Subject: [PATCH] bdi: use refcount_t for reference counting instead atomic_t - -refcount_t type and corresponding API should be used instead of atomic_t when -the variable is used as a reference counter. This allows to avoid accidental -refcounter overflows that might lead to use-after-free situations. - -Suggested-by: Peter Zijlstra <peterz@infradead.org> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - include/linux/backing-dev-defs.h | 3 ++- - include/linux/backing-dev.h | 4 ++-- - mm/backing-dev.c | 12 ++++++------ - 3 files changed, 10 insertions(+), 9 deletions(-) - ---- a/include/linux/backing-dev-defs.h -+++ b/include/linux/backing-dev-defs.h -@@ -12,6 +12,7 @@ - #include <linux/timer.h> - #include <linux/workqueue.h> - #include <linux/kref.h> -+#include <linux/refcount.h> - - struct page; - struct device; -@@ -75,7 +76,7 @@ enum wb_reason { - */ - struct bdi_writeback_congested { - unsigned long state; /* WB_[a]sync_congested flags */ -- atomic_t refcnt; /* nr of attached wb's and blkg */ -+ refcount_t refcnt; /* nr of attached wb's and blkg */ - - #ifdef CONFIG_CGROUP_WRITEBACK - struct backing_dev_info *__bdi; /* the associated bdi, set to NULL ---- a/include/linux/backing-dev.h -+++ b/include/linux/backing-dev.h -@@ -404,13 +404,13 @@ static inline bool inode_cgwb_enabled(st - static inline struct bdi_writeback_congested * - wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) - { -- atomic_inc(&bdi->wb_congested->refcnt); -+ refcount_inc(&bdi->wb_congested->refcnt); - return bdi->wb_congested; - } - - static inline void wb_congested_put(struct bdi_writeback_congested *congested) - { -- if (atomic_dec_and_test(&congested->refcnt)) -+ if (refcount_dec_and_test(&congested->refcnt)) - kfree(congested); - } - ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -438,10 +438,10 @@ wb_congested_get_create(struct backing_d - if (new_congested) { - /* !found and storage for new one already allocated, insert */ - congested = new_congested; -- new_congested = NULL; - rb_link_node(&congested->rb_node, parent, node); - rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); -- goto found; -+ spin_unlock_irqrestore(&cgwb_lock, flags); -+ return congested; - } - - spin_unlock_irqrestore(&cgwb_lock, flags); -@@ -451,13 +451,13 @@ wb_congested_get_create(struct backing_d - if (!new_congested) - return NULL; - -- atomic_set(&new_congested->refcnt, 0); -+ refcount_set(&new_congested->refcnt, 1); - new_congested->__bdi = bdi; - new_congested->blkcg_id = blkcg_id; - goto retry; - - found: -- atomic_inc(&congested->refcnt); -+ refcount_inc(&congested->refcnt); - spin_unlock_irqrestore(&cgwb_lock, flags); - kfree(new_congested); - return congested; -@@ -474,7 +474,7 @@ void wb_congested_put(struct bdi_writeba - unsigned long flags; - - local_irq_save(flags); -- if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { -+ if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) { - local_irq_restore(flags); - return; - } -@@ -804,7 +804,7 @@ static int cgwb_bdi_init(struct backing_ - if (!bdi->wb_congested) - return -ENOMEM; - -- atomic_set(&bdi->wb_congested->refcnt, 1); -+ refcount_set(&bdi->wb_congested->refcnt, 1); - - err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL); - if (err) { diff --git a/patches/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch b/patches/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch deleted file mode 100644 index f67eaf202856..000000000000 --- a/patches/0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Tue, 3 Jul 2018 12:56:19 +0200 -Subject: [PATCH 1/4] mm/list_lru: use list_lru_walk_one() in - list_lru_walk_node() - -list_lru_walk_node() invokes __list_lru_walk_one() with -1 as the -memcg_idx parameter. The same can be achieved by list_lru_walk_one() and -passing NULL as memcg argument which then gets converted into -1. This -is a preparation step when the spin_lock() function is lifted to the -caller of __list_lru_walk_one(). -Invoke list_lru_walk_one() instead __list_lru_walk_one() when possible. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/list_lru.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -272,8 +272,8 @@ unsigned long list_lru_walk_node(struct - long isolated = 0; - int memcg_idx; - -- isolated += __list_lru_walk_one(lru, nid, -1, isolate, cb_arg, -- nr_to_walk); -+ isolated += list_lru_walk_one(lru, nid, NULL, isolate, cb_arg, -+ nr_to_walk); - if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) { - isolated += __list_lru_walk_one(lru, nid, memcg_idx, diff --git a/patches/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch b/patches/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch deleted file mode 100644 index 17ebd8b446ae..000000000000 --- a/patches/0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch +++ /dev/null @@ -1,45 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Fri, 22 Jun 2018 10:48:51 +0200 -Subject: [PATCH 1/3] mm: workingset: remove local_irq_disable() from - count_shadow_nodes() - -In commit 0c7c1bed7e13 ("mm: make counting of list_lru_one::nr_items -lockless") the - spin_lock(&nlru->lock); - -statement was replaced with - rcu_read_lock(); - -in __list_lru_count_one(). The comment in count_shadow_nodes() says that -the local_irq_disable() is required because the lock must be acquired -with disabled interrupts and (spin_lock()) does not do so. -Since the lock is replaced with rcu_read_lock() the local_irq_disable() -is no longer needed. The code path is - list_lru_shrink_count() - -> list_lru_count_one() - -> __list_lru_count_one() - -> rcu_read_lock() - -> list_lru_from_memcg_idx() - -> rcu_read_unlock() - -Remove the local_irq_disable() statement. - -Cc: Kirill Tkhai <ktkhai@virtuozzo.com> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/workingset.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -366,10 +366,7 @@ static unsigned long count_shadow_nodes( - unsigned long nodes; - unsigned long cache; - -- /* list_lru lock nests inside the IRQ-safe i_pages lock */ -- local_irq_disable(); - nodes = list_lru_shrink_count(&shadow_nodes, sc); -- local_irq_enable(); - - /* - * Approximate a reasonable limit for the radix tree nodes diff --git a/patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch b/patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch deleted file mode 100644 index e95405005c9c..000000000000 --- a/patches/0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch +++ /dev/null @@ -1,36 +0,0 @@ -From: Anna-Maria Gleixner <anna-maria@linutronix.de> -Date: Fri, 4 May 2018 17:45:32 +0200 -Subject: [PATCH 2/3] drivers/md/raid5: Use irqsave variant of - atomic_dec_and_lock() - -The irqsave variant of atomic_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save is no longer required. - -Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/md/raid5.c | 5 ++--- - 1 file changed, 2 insertions(+), 3 deletions(-) - ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -409,16 +409,15 @@ void raid5_release_stripe(struct stripe_ - md_wakeup_thread(conf->mddev->thread); - return; - slow_path: -- local_irq_save(flags); - /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ -- if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { -+ if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { - INIT_LIST_HEAD(&list); - hash = sh->hash_lock_index; - do_release_stripe(conf, sh, &list); - spin_unlock(&conf->device_lock); - release_inactive_stripe_list(conf, &list, hash); -+ local_irq_restore(flags); - } -- local_irq_restore(flags); - } - - static inline void remove_hash(struct stripe_head *sh) diff --git a/patches/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch b/patches/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch deleted file mode 100644 index adc04a57bede..000000000000 --- a/patches/0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch +++ /dev/null @@ -1,64 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Tue, 3 Jul 2018 13:06:07 +0200 -Subject: [PATCH 2/4] mm/list_lru: Move locking from __list_lru_walk_one() to - its caller - -Move the locking inside __list_lru_walk_one() to its caller. This is a -preparation step in order to introduce list_lru_walk_one_irq() which -does spin_lock_irq() instead of spin_lock() for the locking. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/list_lru.c | 18 +++++++++++++----- - 1 file changed, 13 insertions(+), 5 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -204,7 +204,6 @@ static unsigned long - struct list_head *item, *n; - unsigned long isolated = 0; - -- spin_lock(&nlru->lock); - l = list_lru_from_memcg_idx(nlru, memcg_idx); - restart: - list_for_each_safe(item, n, &l->list) { -@@ -250,8 +249,6 @@ static unsigned long - BUG(); - } - } -- -- spin_unlock(&nlru->lock); - return isolated; - } - -@@ -260,8 +257,14 @@ list_lru_walk_one(struct list_lru *lru, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) - { -- return __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -- isolate, cb_arg, nr_to_walk); -+ struct list_lru_node *nlru = &lru->node[nid]; -+ unsigned long ret; -+ -+ spin_lock(&nlru->lock); -+ ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -+ isolate, cb_arg, nr_to_walk); -+ spin_unlock(&nlru->lock); -+ return ret; - } - EXPORT_SYMBOL_GPL(list_lru_walk_one); - -@@ -276,8 +279,13 @@ unsigned long list_lru_walk_node(struct - nr_to_walk); - if (*nr_to_walk > 0 && list_lru_memcg_aware(lru)) { - for_each_memcg_cache_index(memcg_idx) { -+ struct list_lru_node *nlru = &lru->node[nid]; -+ -+ spin_lock(&nlru->lock); - isolated += __list_lru_walk_one(lru, nid, memcg_idx, - isolate, cb_arg, nr_to_walk); -+ spin_unlock(&nlru->lock); -+ - if (*nr_to_walk <= 0) - break; - } diff --git a/patches/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch b/patches/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch deleted file mode 100644 index 84e845fb9495..000000000000 --- a/patches/0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch +++ /dev/null @@ -1,44 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Fri, 22 Jun 2018 11:43:35 +0200 -Subject: [PATCH 2/3] mm: workingset: make shadow_lru_isolate() use locking - suffix - -shadow_lru_isolate() disables interrupts and acquires a lock. It could -use spin_lock_irq() instead. It also uses local_irq_enable() while it -could use spin_unlock_irq()/xa_unlock_irq(). - -Use proper suffix for lock/unlock in order to enable/disable interrupts -during release/acquire of a lock. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/workingset.c | 8 +++----- - 1 file changed, 3 insertions(+), 5 deletions(-) - ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -431,7 +431,7 @@ static enum lru_status shadow_lru_isolat - - /* Coming from the list, invert the lock order */ - if (!xa_trylock(&mapping->i_pages)) { -- spin_unlock(lru_lock); -+ spin_unlock_irq(lru_lock); - ret = LRU_RETRY; - goto out; - } -@@ -469,13 +469,11 @@ static enum lru_status shadow_lru_isolat - workingset_lookup_update(mapping)); - - out_invalid: -- xa_unlock(&mapping->i_pages); -+ xa_unlock_irq(&mapping->i_pages); - ret = LRU_REMOVED_RETRY; - out: -- local_irq_enable(); - cond_resched(); -- local_irq_disable(); -- spin_lock(lru_lock); -+ spin_lock_irq(lru_lock); - return ret; - } - diff --git a/patches/0002-userns-use-refcount_t-for-reference-counting-instead.patch b/patches/0002-userns-use-refcount_t-for-reference-counting-instead.patch deleted file mode 100644 index 938572c6ce44..000000000000 --- a/patches/0002-userns-use-refcount_t-for-reference-counting-instead.patch +++ /dev/null @@ -1,82 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Mon, 7 May 2018 17:09:42 +0200 -Subject: [PATCH] userns: use refcount_t for reference counting instead - atomic_t - -refcount_t type and corresponding API should be used instead of atomic_t when -the variable is used as a reference counter. This allows to avoid accidental -refcounter overflows that might lead to use-after-free situations. - -Suggested-by: Peter Zijlstra <peterz@infradead.org> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - include/linux/sched/user.h | 5 +++-- - kernel/user.c | 8 ++++---- - 2 files changed, 7 insertions(+), 6 deletions(-) - ---- a/include/linux/sched/user.h -+++ b/include/linux/sched/user.h -@@ -4,6 +4,7 @@ - - #include <linux/uidgid.h> - #include <linux/atomic.h> -+#include <linux/refcount.h> - #include <linux/ratelimit.h> - - struct key; -@@ -12,7 +13,7 @@ struct key; - * Some day this will be a full-fledged user tracking system.. - */ - struct user_struct { -- atomic_t __count; /* reference count */ -+ refcount_t __count; /* reference count */ - atomic_t processes; /* How many processes does this user have? */ - atomic_t sigpending; /* How many pending signals does this user have? */ - #ifdef CONFIG_FANOTIFY -@@ -59,7 +60,7 @@ extern struct user_struct root_user; - extern struct user_struct * alloc_uid(kuid_t); - static inline struct user_struct *get_uid(struct user_struct *u) - { -- atomic_inc(&u->__count); -+ refcount_inc(&u->__count); - return u; - } - extern void free_uid(struct user_struct *); ---- a/kernel/user.c -+++ b/kernel/user.c -@@ -96,7 +96,7 @@ static DEFINE_SPINLOCK(uidhash_lock); - - /* root_user.__count is 1, for init task cred */ - struct user_struct root_user = { -- .__count = ATOMIC_INIT(1), -+ .__count = REFCOUNT_INIT(1), - .processes = ATOMIC_INIT(1), - .sigpending = ATOMIC_INIT(0), - .locked_shm = 0, -@@ -123,7 +123,7 @@ static struct user_struct *uid_hash_find - - hlist_for_each_entry(user, hashent, uidhash_node) { - if (uid_eq(user->uid, uid)) { -- atomic_inc(&user->__count); -+ refcount_inc(&user->__count); - return user; - } - } -@@ -170,7 +170,7 @@ void free_uid(struct user_struct *up) - return; - - local_irq_save(flags); -- if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) -+ if (refcount_dec_and_lock(&up->__count, &uidhash_lock)) - free_user(up, flags); - else - local_irq_restore(flags); -@@ -191,7 +191,7 @@ struct user_struct *alloc_uid(kuid_t uid - goto out_unlock; - - new->uid = uid; -- atomic_set(&new->__count, 1); -+ refcount_set(&new->__count, 1); - ratelimit_state_init(&new->ratelimit, HZ, 100); - ratelimit_set_flags(&new->ratelimit, RATELIMIT_MSG_ON_RELEASE); - diff --git a/patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch b/patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch deleted file mode 100644 index dff8bdff023b..000000000000 --- a/patches/0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Anna-Maria Gleixner <anna-maria@linutronix.de> -Date: Fri, 4 May 2018 17:45:33 +0200 -Subject: [PATCH 3/3] drivers/md/raid5: Do not disable irq on - release_inactive_stripe_list() call - -There is no need to invoke release_inactive_stripe_list() with interrupts -disabled. All call sites, except raid5_release_stripe(), unlock -->device_lock and enable interrupts before invoking the function. - -Make it consistent. - -Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/md/raid5.c | 3 +-- - 1 file changed, 1 insertion(+), 2 deletions(-) - ---- a/drivers/md/raid5.c -+++ b/drivers/md/raid5.c -@@ -414,9 +414,8 @@ void raid5_release_stripe(struct stripe_ - INIT_LIST_HEAD(&list); - hash = sh->hash_lock_index; - do_release_stripe(conf, sh, &list); -- spin_unlock(&conf->device_lock); -+ spin_unlock_irqrestore(&conf->device_lock, flags); - release_inactive_stripe_list(conf, &list, hash); -- local_irq_restore(flags); - } - } - diff --git a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch index ccdefb2caf88..444e3b7c9698 100644 --- a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch +++ b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch @@ -394,7 +394,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct list_head slabs_partial; /* partial list first, better asm code */ --- a/mm/slub.c +++ b/mm/slub.c -@@ -1169,7 +1169,7 @@ static noinline int free_debug_processin +@@ -1167,7 +1167,7 @@ static noinline int free_debug_processin unsigned long uninitialized_var(flags); int ret = 0; @@ -403,7 +403,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { -@@ -1204,7 +1204,7 @@ static noinline int free_debug_processin +@@ -1202,7 +1202,7 @@ static noinline int free_debug_processin bulk_cnt, cnt); slab_unlock(page); @@ -412,7 +412,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; -@@ -1804,7 +1804,7 @@ static void *get_partial_node(struct kme +@@ -1802,7 +1802,7 @@ static void *get_partial_node(struct kme if (!n || !n->nr_partial) return NULL; @@ -421,7 +421,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; -@@ -1829,7 +1829,7 @@ static void *get_partial_node(struct kme +@@ -1827,7 +1827,7 @@ static void *get_partial_node(struct kme break; } @@ -430,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return object; } -@@ -2075,7 +2075,7 @@ static void deactivate_slab(struct kmem_ +@@ -2073,7 +2073,7 @@ static void deactivate_slab(struct kmem_ * that acquire_slab() will see a slab page that * is frozen */ @@ -439,7 +439,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } else { m = M_FULL; -@@ -2086,7 +2086,7 @@ static void deactivate_slab(struct kmem_ +@@ -2084,7 +2084,7 @@ static void deactivate_slab(struct kmem_ * slabs from diagnostic functions will not see * any frozen slabs. */ @@ -448,7 +448,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -2121,7 +2121,7 @@ static void deactivate_slab(struct kmem_ +@@ -2119,7 +2119,7 @@ static void deactivate_slab(struct kmem_ goto redo; if (lock) @@ -457,7 +457,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); -@@ -2156,10 +2156,10 @@ static void unfreeze_partials(struct kme +@@ -2154,10 +2154,10 @@ static void unfreeze_partials(struct kme n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) @@ -470,7 +470,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } do { -@@ -2188,7 +2188,7 @@ static void unfreeze_partials(struct kme +@@ -2186,7 +2186,7 @@ static void unfreeze_partials(struct kme } if (n) @@ -479,7 +479,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> while (discard_page) { page = discard_page; -@@ -2357,10 +2357,10 @@ static unsigned long count_partial(struc +@@ -2355,10 +2355,10 @@ static unsigned long count_partial(struc unsigned long x = 0; struct page *page; @@ -492,7 +492,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2795,7 +2795,7 @@ static void __slab_free(struct kmem_cach +@@ -2793,7 +2793,7 @@ static void __slab_free(struct kmem_cach do { if (unlikely(n)) { @@ -501,7 +501,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> n = NULL; } prior = page->freelist; -@@ -2827,7 +2827,7 @@ static void __slab_free(struct kmem_cach +@@ -2825,7 +2825,7 @@ static void __slab_free(struct kmem_cach * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ @@ -510,7 +510,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } } -@@ -2869,7 +2869,7 @@ static void __slab_free(struct kmem_cach +@@ -2867,7 +2867,7 @@ static void __slab_free(struct kmem_cach add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } @@ -519,7 +519,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return; slab_empty: -@@ -2884,7 +2884,7 @@ static void __slab_free(struct kmem_cach +@@ -2882,7 +2882,7 @@ static void __slab_free(struct kmem_cach remove_full(s, n, page); } @@ -528,7 +528,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> stat(s, FREE_SLAB); discard_slab(s, page); } -@@ -3271,7 +3271,7 @@ static void +@@ -3269,7 +3269,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; @@ -537,7 +537,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); -@@ -3655,7 +3655,7 @@ static void free_partial(struct kmem_cac +@@ -3653,7 +3653,7 @@ static void free_partial(struct kmem_cac struct page *page, *h; BUG_ON(irqs_disabled()); @@ -546,7 +546,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); -@@ -3665,7 +3665,7 @@ static void free_partial(struct kmem_cac +@@ -3663,7 +3663,7 @@ static void free_partial(struct kmem_cac "Objects remaining in %s on __kmem_cache_shutdown()"); } } @@ -555,7 +555,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_for_each_entry_safe(page, h, &discard, lru) discard_slab(s, page); -@@ -3938,7 +3938,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3936,7 +3936,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); @@ -564,7 +564,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Build lists of slabs to discard or promote. -@@ -3969,7 +3969,7 @@ int __kmem_cache_shrink(struct kmem_cach +@@ -3967,7 +3967,7 @@ int __kmem_cache_shrink(struct kmem_cach for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); @@ -573,7 +573,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) -@@ -4383,7 +4383,7 @@ static int validate_slab_node(struct kme +@@ -4381,7 +4381,7 @@ static int validate_slab_node(struct kme struct page *page; unsigned long flags; @@ -582,7 +582,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); -@@ -4405,7 +4405,7 @@ static int validate_slab_node(struct kme +@@ -4403,7 +4403,7 @@ static int validate_slab_node(struct kme s->name, count, atomic_long_read(&n->nr_slabs)); out: @@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return count; } -@@ -4595,12 +4595,12 @@ static int list_locations(struct kmem_ca +@@ -4593,12 +4593,12 @@ static int list_locations(struct kmem_ca if (!atomic_long_read(&n->nr_slabs)) continue; diff --git a/patches/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch b/patches/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch deleted file mode 100644 index 662957cfcd57..000000000000 --- a/patches/0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch +++ /dev/null @@ -1,55 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Tue, 3 Jul 2018 13:08:56 +0200 -Subject: [PATCH 3/4] mm/list_lru: Pass struct list_lru_node as an argument - __list_lru_walk_one() - -__list_lru_walk_one() is invoked with struct list_lru *lru, int nid as -the first two argument. Those two are only used to retrieve struct -list_lru_node. Since this is already done by the caller of the function -for the locking, we can pass struct list_lru_node directly and avoid the -dance around it. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/list_lru.c | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -194,12 +194,11 @@ unsigned long list_lru_count_node(struct - EXPORT_SYMBOL_GPL(list_lru_count_node); - - static unsigned long --__list_lru_walk_one(struct list_lru *lru, int nid, int memcg_idx, -+__list_lru_walk_one(struct list_lru_node *nlru, int memcg_idx, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) - { - -- struct list_lru_node *nlru = &lru->node[nid]; - struct list_lru_one *l; - struct list_head *item, *n; - unsigned long isolated = 0; -@@ -261,8 +260,8 @@ list_lru_walk_one(struct list_lru *lru, - unsigned long ret; - - spin_lock(&nlru->lock); -- ret = __list_lru_walk_one(lru, nid, memcg_cache_id(memcg), -- isolate, cb_arg, nr_to_walk); -+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, -+ nr_to_walk); - spin_unlock(&nlru->lock); - return ret; - } -@@ -282,8 +281,9 @@ unsigned long list_lru_walk_node(struct - struct list_lru_node *nlru = &lru->node[nid]; - - spin_lock(&nlru->lock); -- isolated += __list_lru_walk_one(lru, nid, memcg_idx, -- isolate, cb_arg, nr_to_walk); -+ isolated += __list_lru_walk_one(nlru, memcg_idx, -+ isolate, cb_arg, -+ nr_to_walk); - spin_unlock(&nlru->lock); - - if (*nr_to_walk <= 0) diff --git a/patches/0004-ARM-at91-Implement-clocksource-selection.patch b/patches/0004-ARM-at91-Implement-clocksource-selection.patch index ddef46d43833..8f1f88f5bf48 100644 --- a/patches/0004-ARM-at91-Implement-clocksource-selection.patch +++ b/patches/0004-ARM-at91-Implement-clocksource-selection.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/arm/mach-at91/Kconfig +++ b/arch/arm/mach-at91/Kconfig -@@ -106,6 +106,31 @@ config SOC_AT91SAM9 +@@ -107,6 +107,31 @@ config SOC_AT91SAM9 AT91SAM9X35 AT91SAM9XE diff --git a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch index 76d543b31620..ee9464d80a1b 100644 --- a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch +++ b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/slub.c +++ b/mm/slub.c -@@ -1332,6 +1332,12 @@ static inline void dec_slabs_node(struct +@@ -1330,6 +1330,12 @@ static inline void dec_slabs_node(struct #endif /* CONFIG_SLUB_DEBUG */ @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. -@@ -1686,6 +1692,16 @@ static void __free_slab(struct kmem_cach +@@ -1684,6 +1690,16 @@ static void __free_slab(struct kmem_cach __free_pages(page, order); } @@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static void rcu_free_slab(struct rcu_head *h) { struct page *page = container_of(h, struct page, rcu_head); -@@ -1697,6 +1713,12 @@ static void free_slab(struct kmem_cache +@@ -1695,6 +1711,12 @@ static void free_slab(struct kmem_cache { if (unlikely(s->flags & SLAB_TYPESAFE_BY_RCU)) { call_rcu(&page->rcu_head, rcu_free_slab); @@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } else __free_slab(s, page); } -@@ -2225,14 +2247,21 @@ static void put_cpu_partial(struct kmem_ +@@ -2223,14 +2245,21 @@ static void put_cpu_partial(struct kmem_ pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { @@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> oldpage = NULL; pobjects = 0; pages = 0; -@@ -2302,7 +2331,22 @@ static bool has_cpu_slab(int cpu, void * +@@ -2300,7 +2329,22 @@ static bool has_cpu_slab(int cpu, void * static void flush_all(struct kmem_cache *s) { @@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -2500,8 +2544,10 @@ static inline void *get_freelist(struct +@@ -2498,8 +2542,10 @@ static inline void *get_freelist(struct * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, @@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void *freelist; struct page *page; -@@ -2557,6 +2603,13 @@ static void *___slab_alloc(struct kmem_c +@@ -2555,6 +2601,13 @@ static void *___slab_alloc(struct kmem_c VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); @@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return freelist; new_slab: -@@ -2572,7 +2625,7 @@ static void *___slab_alloc(struct kmem_c +@@ -2570,7 +2623,7 @@ static void *___slab_alloc(struct kmem_c if (unlikely(!freelist)) { slab_out_of_memory(s, gfpflags, node); @@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } page = c->page; -@@ -2585,7 +2638,7 @@ static void *___slab_alloc(struct kmem_c +@@ -2583,7 +2636,7 @@ static void *___slab_alloc(struct kmem_c goto new_slab; /* Slab failed checks. Next slab needed */ deactivate_slab(s, page, get_freepointer(s, freelist), c); @@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -2597,6 +2650,7 @@ static void *__slab_alloc(struct kmem_ca +@@ -2595,6 +2648,7 @@ static void *__slab_alloc(struct kmem_ca { void *p; unsigned long flags; @@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_irq_save(flags); #ifdef CONFIG_PREEMPT -@@ -2608,8 +2662,9 @@ static void *__slab_alloc(struct kmem_ca +@@ -2606,8 +2660,9 @@ static void *__slab_alloc(struct kmem_ca c = this_cpu_ptr(s->cpu_slab); #endif @@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return p; } -@@ -3087,6 +3142,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3085,6 +3140,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca void **p) { struct kmem_cache_cpu *c; @@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int i; /* memcg and kmem_cache debug support */ -@@ -3110,7 +3166,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3108,7 +3164,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, @@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (unlikely(!p[i])) goto error; -@@ -3122,6 +3178,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3120,6 +3176,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca } c->tid = next_tid(c->tid); local_irq_enable(); @@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { -@@ -3136,6 +3193,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca +@@ -3134,6 +3191,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca return i; error: local_irq_enable(); @@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> slab_post_alloc_hook(s, flags, i, p); __kmem_cache_free_bulk(s, i, p); return 0; -@@ -4182,6 +4240,12 @@ void __init kmem_cache_init(void) +@@ -4180,6 +4238,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; diff --git a/patches/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch b/patches/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch deleted file mode 100644 index 1e0f8bea2378..000000000000 --- a/patches/0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch +++ /dev/null @@ -1,106 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Tue, 3 Jul 2018 13:17:27 +0200 -Subject: [PATCH 4/4] mm/list_lru: Introduce list_lru_shrink_walk_irq() - -Provide list_lru_shrink_walk_irq() and let it behave like -list_lru_walk_one() except that it locks the spinlock with -spin_lock_irq(). This is used by scan_shadow_nodes() because its lock -nests within the i_pages lock which is acquired with IRQ. -This change allows to use proper locking promitives instead hand crafted -lock_irq_disable() plus spin_lock(). -There is no EXPORT_SYMBOL provided because the current user is in-KERNEL -only. - -Add list_lru_shrink_walk_irq() which acquires the spinlock with the -proper locking primitives. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - include/linux/list_lru.h | 25 +++++++++++++++++++++++++ - mm/list_lru.c | 15 +++++++++++++++ - mm/workingset.c | 8 ++------ - 3 files changed, 42 insertions(+), 6 deletions(-) - ---- a/include/linux/list_lru.h -+++ b/include/linux/list_lru.h -@@ -162,6 +162,23 @@ unsigned long list_lru_walk_one(struct l - int nid, struct mem_cgroup *memcg, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk); -+/** -+ * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items. -+ * @lru: the lru pointer. -+ * @nid: the node id to scan from. -+ * @memcg: the cgroup to scan from. -+ * @isolate: callback function that is resposible for deciding what to do with -+ * the item currently being scanned -+ * @cb_arg: opaque type that will be passed to @isolate -+ * @nr_to_walk: how many items to scan. -+ * -+ * Same as @list_lru_walk_one except that the spinlock is acquired with -+ * spin_lock_irq(). -+ */ -+unsigned long list_lru_walk_one_irq(struct list_lru *lru, -+ int nid, struct mem_cgroup *memcg, -+ list_lru_walk_cb isolate, void *cb_arg, -+ unsigned long *nr_to_walk); - unsigned long list_lru_walk_node(struct list_lru *lru, int nid, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk); -@@ -175,6 +192,14 @@ list_lru_shrink_walk(struct list_lru *lr - } - - static inline unsigned long -+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc, -+ list_lru_walk_cb isolate, void *cb_arg) -+{ -+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg, -+ &sc->nr_to_scan); -+} -+ -+static inline unsigned long - list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate, - void *cb_arg, unsigned long nr_to_walk) - { ---- a/mm/list_lru.c -+++ b/mm/list_lru.c -@@ -267,6 +267,21 @@ list_lru_walk_one(struct list_lru *lru, - } - EXPORT_SYMBOL_GPL(list_lru_walk_one); - -+unsigned long -+list_lru_walk_one_irq(struct list_lru *lru, int nid, struct mem_cgroup *memcg, -+ list_lru_walk_cb isolate, void *cb_arg, -+ unsigned long *nr_to_walk) -+{ -+ struct list_lru_node *nlru = &lru->node[nid]; -+ unsigned long ret; -+ -+ spin_lock_irq(&nlru->lock); -+ ret = __list_lru_walk_one(nlru, memcg_cache_id(memcg), isolate, cb_arg, -+ nr_to_walk); -+ spin_unlock_irq(&nlru->lock); -+ return ret; -+} -+ - unsigned long list_lru_walk_node(struct list_lru *lru, int nid, - list_lru_walk_cb isolate, void *cb_arg, - unsigned long *nr_to_walk) ---- a/mm/workingset.c -+++ b/mm/workingset.c -@@ -480,13 +480,9 @@ static enum lru_status shadow_lru_isolat - static unsigned long scan_shadow_nodes(struct shrinker *shrinker, - struct shrink_control *sc) - { -- unsigned long ret; -- - /* list_lru lock nests inside the IRQ-safe i_pages lock */ -- local_irq_disable(); -- ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL); -- local_irq_enable(); -- return ret; -+ return list_lru_shrink_walk_irq(&shadow_nodes, sc, shadow_lru_isolate, -+ NULL); - } - - static struct shrinker workingset_shadow_shrinker = { diff --git a/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch b/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch deleted file mode 100644 index 473023771316..000000000000 --- a/patches/0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Anna-Maria Gleixner <anna-maria@linutronix.de> -Date: Wed, 4 Apr 2018 11:43:56 +0200 -Subject: [PATCH] bdi: Use irqsave variant of refcount_dec_and_lock() - -The irqsave variant of refcount_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save/restore is no longer required. - -Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> -[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ] -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - mm/backing-dev.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/mm/backing-dev.c -+++ b/mm/backing-dev.c -@@ -473,11 +473,8 @@ void wb_congested_put(struct bdi_writeba - { - unsigned long flags; - -- local_irq_save(flags); -- if (!refcount_dec_and_lock(&congested->refcnt, &cgwb_lock)) { -- local_irq_restore(flags); -+ if (!refcount_dec_and_lock_irqsave(&congested->refcnt, &cgwb_lock, &flags)) - return; -- } - - /* bdi might already have been destroyed leaving @congested unlinked */ - if (congested->__bdi) { diff --git a/patches/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch b/patches/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch deleted file mode 100644 index 2226f31b2e07..000000000000 --- a/patches/0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch +++ /dev/null @@ -1,30 +0,0 @@ -From: Anna-Maria Gleixner <anna-maria@linutronix.de> -Date: Wed, 4 Apr 2018 11:43:57 +0200 -Subject: [PATCH] userns: Use irqsave variant of refcount_dec_and_lock() - -The irqsave variant of refcount_dec_and_lock handles irqsave/restore when -taking/releasing the spin lock. With this variant the call of -local_irq_save/restore is no longer required. - -Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> -[bigeasy: s@atomic_dec_and_lock@refcount_dec_and_lock@g ] -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - kernel/user.c | 5 +---- - 1 file changed, 1 insertion(+), 4 deletions(-) - ---- a/kernel/user.c -+++ b/kernel/user.c -@@ -169,11 +169,8 @@ void free_uid(struct user_struct *up) - if (!up) - return; - -- local_irq_save(flags); -- if (refcount_dec_and_lock(&up->__count, &uidhash_lock)) -+ if (refcount_dec_and_lock_irqsave(&up->__count, &uidhash_lock, &flags)) - free_user(up, flags); -- else -- local_irq_restore(flags); - } - - struct user_struct *alloc_uid(kuid_t uid) diff --git a/patches/ARM-enable-irq-in-translation-section-permission-fau.patch b/patches/ARM-enable-irq-in-translation-section-permission-fau.patch index 2b69d19597c6..e41d4889a96e 100644 --- a/patches/ARM-enable-irq-in-translation-section-permission-fau.patch +++ b/patches/ARM-enable-irq-in-translation-section-permission-fau.patch @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c -@@ -438,6 +438,9 @@ do_translation_fault(unsigned long addr, +@@ -439,6 +439,9 @@ do_translation_fault(unsigned long addr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); @@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (user_mode(regs)) goto bad_area; -@@ -505,6 +508,9 @@ do_translation_fault(unsigned long addr, +@@ -506,6 +509,9 @@ do_translation_fault(unsigned long addr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { diff --git a/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch b/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch index 42dec86f03e5..579a4c586705 100644 --- a/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch +++ b/patches/EXP-rcu-Revert-expedited-GP-parallelization-cleverne.patch @@ -1,4 +1,3 @@ -From 53d4eed5325b9a26985c3d4f017d94919eb4ac89 Mon Sep 17 00:00:00 2001 From: Paul E. McKenney <paulmck@linux.ibm.com> Date: Mon, 29 Oct 2018 11:53:01 +0100 Subject: [PATCH] EXP rcu: Revert expedited GP parallelization cleverness @@ -27,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct rcu_node *rnp; trace_rcu_exp_grace_period(rsp->name, rcu_exp_gp_seq_endval(rsp), TPS("reset")); -@@ -493,13 +492,7 @@ static void sync_rcu_exp_select_cpus(str +@@ -494,13 +493,7 @@ static void sync_rcu_exp_select_cpus(str continue; } INIT_WORK(&rnp->rew.rew_work, sync_rcu_exp_select_node_cpus); diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch index ae918a775717..80534af12fa6 100644 --- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch +++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch @@ -7,66 +7,66 @@ with a "full" buffer after executing "dmesg" on the shell. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - kernel/printk/printk.c | 27 +++++++++++++++++++++++++++ - 1 file changed, 27 insertions(+) + kernel/printk/printk.c | 28 ++++++++++++++++++++++++++++ + 1 file changed, 28 insertions(+) --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1411,6 +1411,8 @@ static int syslog_print_all(char __user - { - char *text; - int len = 0; +@@ -1415,12 +1415,23 @@ static int syslog_print_all(char __user + u64 next_seq; + u64 seq; + u32 idx; + int attempts = 0; + int num_msg; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) -@@ -1422,6 +1424,14 @@ static int syslog_print_all(char __user - u64 seq; - u32 idx; + return -ENOMEM; + logbuf_lock_irq(); ++ +try_again: -+ attempts++; -+ if (attempts > 10) { -+ len = -EBUSY; -+ goto out; -+ } -+ num_msg = 0; ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + - /* - * Find first record that fits, including all following records, - * into the user-provided buffer for this dump. -@@ -1434,6 +1444,14 @@ static int syslog_print_all(char __user - len += msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } + /* + * Find first record that fits, including all following records, + * into the user-provided buffer for this dump. +@@ -1433,6 +1444,14 @@ static int syslog_print_all(char __user + len += msg_print_text(msg, true, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } - /* move first record forward until length fits into the buffer */ -@@ -1445,6 +1463,14 @@ static int syslog_print_all(char __user - len -= msg_print_text(msg, true, NULL, 0); - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ logbuf_unlock_irq(); -+ logbuf_lock_irq(); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } + /* move first record forward until length fits into the buffer */ +@@ -1444,6 +1463,14 @@ static int syslog_print_all(char __user + len -= msg_print_text(msg, true, NULL, 0); + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ logbuf_unlock_irq(); ++ logbuf_lock_irq(); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } - /* last message fitting into this dump */ -@@ -1483,6 +1509,7 @@ static int syslog_print_all(char __user + /* last message fitting into this dump */ +@@ -1481,6 +1508,7 @@ static int syslog_print_all(char __user clear_seq = log_next_seq; clear_idx = log_next_idx; } diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch index ef1d1f04b18e..818944745bec 100644 --- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch +++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c -@@ -694,7 +694,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -700,7 +700,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v * involves poking the GIC, which must be done in a * non-preemptible context. */ @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> kvm_pmu_flush_hwstate(vcpu); -@@ -743,7 +743,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -749,7 +749,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); local_irq_enable(); @@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; } -@@ -821,7 +821,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v +@@ -827,7 +827,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v /* Exit types that need handling before we can be preempted */ handle_exit_early(vcpu, run, ret); diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch index 1c1875c14e46..6ceb61cd0889 100644 --- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch +++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c -@@ -2822,7 +2822,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2859,7 +2859,7 @@ static int _nfs4_open_and_get_state(stru unsigned int seq; int ret; @@ -66,15 +66,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ret = _nfs4_proc_open(opendata, ctx); if (ret != 0) -@@ -2860,7 +2860,7 @@ static int _nfs4_open_and_get_state(stru +@@ -2900,7 +2900,7 @@ static int _nfs4_open_and_get_state(stru if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (read_seqretry(&sp->so_reclaim_seqlock, seq)) nfs4_schedule_stateid_recovery(server, state); - else - pnfs_parse_lgopen(state->inode, opendata->lgp, ctx); + } + --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -511,7 +511,7 @@ nfs4_alloc_state_owner(struct nfs_server @@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> mutex_init(&sp->so_delegreturn_mutex); return sp; } -@@ -1562,8 +1562,12 @@ static int nfs4_reclaim_open_state(struc +@@ -1563,8 +1563,12 @@ static int nfs4_reclaim_open_state(struc * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) -@@ -1632,14 +1636,20 @@ static int nfs4_reclaim_open_state(struc +@@ -1651,14 +1655,20 @@ static int nfs4_reclaim_open_state(struc spin_lock(&sp->so_lock); goto restart; } diff --git a/patches/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch b/patches/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch deleted file mode 100644 index 3543810f5952..000000000000 --- a/patches/SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch +++ /dev/null @@ -1,48 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Thu, 12 Apr 2018 09:16:22 +0200 -Subject: [PATCH] [SCSI] libsas: remove irq save in sas_ata_qc_issue() - -[ upstream commit 2da11d4262639dc0e2fabc6a70886db57af25c43 ] - -Since commit 312d3e56119a ("[SCSI] libsas: remove ata_port.lock -management duties from lldds") the sas_ata_qc_issue() function unlocks -the ata_port.lock and disables interrupts before doing so. -That lock is always taken with disabled interrupts so at this point, the -interrupts are already disabled. There is no need to disable the -interrupts before the unlock operation because they are already -disabled. -Restoring the interrupt state later does not change anything because -they were disabled and remain disabled. Therefore remove the operations -which do not change the behaviour. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/scsi/libsas/sas_ata.c | 3 --- - 1 file changed, 3 deletions(-) - ---- a/drivers/scsi/libsas/sas_ata.c -+++ b/drivers/scsi/libsas/sas_ata.c -@@ -176,7 +176,6 @@ static void sas_ata_task_done(struct sas - - static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) - { -- unsigned long flags; - struct sas_task *task; - struct scatterlist *sg; - int ret = AC_ERR_SYSTEM; -@@ -190,7 +189,6 @@ static unsigned int sas_ata_qc_issue(str - /* TODO: audit callers to ensure they are ready for qc_issue to - * unconditionally re-enable interrupts - */ -- local_irq_save(flags); - spin_unlock(ap->lock); - - /* If the device fell off, no sense in issuing commands */ -@@ -252,7 +250,6 @@ static unsigned int sas_ata_qc_issue(str - - out: - spin_lock(ap->lock); -- local_irq_restore(flags); - return ret; - } - diff --git a/patches/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch b/patches/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch deleted file mode 100644 index b7cdb4a3a016..000000000000 --- a/patches/SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch +++ /dev/null @@ -1,41 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Thu, 12 Apr 2018 09:55:25 +0200 -Subject: [PATCH] [SCSI] qla2xxx: remove irq save in qla2x00_poll() - -[ upstream commit b3a8aa90c46095cbad454eb068bfb5a8eb56d4e3 ] - -In commit d2ba5675d899 ("[SCSI] qla2xxx: Disable local-interrupts while -polling for RISC status.") added a local_irq_disable() before invoking -the ->intr_handler callback. The function, which was used in this -callback, did not disable interrupts while acquiring the spin_lock so a -deadlock was possible and this change was one possible solution. - -The function in question was qla2300_intr_handler() and is using -spin_lock_irqsave() since commit 43fac4d97a1a ("[SCSI] qla2xxx: Resolve -a performance issue in interrupt"). -I checked all other ->intr_handler callbacks and all of them use the -irqsave variant so it is safe to remove the local_irq_save() block now. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/scsi/qla2xxx/qla_inline.h | 4 +--- - 1 file changed, 1 insertion(+), 3 deletions(-) - ---- a/drivers/scsi/qla2xxx/qla_inline.h -+++ b/drivers/scsi/qla2xxx/qla_inline.h -@@ -58,14 +58,12 @@ qla2x00_debounce_register(volatile uint1 - static inline void - qla2x00_poll(struct rsp_que *rsp) - { -- unsigned long flags; - struct qla_hw_data *ha = rsp->hw; -- local_irq_save(flags); -+ - if (IS_P3P_TYPE(ha)) - qla82xx_poll(0, rsp); - else - ha->isp_ops->intr_handler(0, rsp); -- local_irq_restore(flags); - } - - static inline uint8_t * diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch index b3cd95511369..d3264f234656 100644 --- a/patches/add_migrate_disable.patch +++ b/patches/add_migrate_disable.patch @@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable() * boot command line: --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1030,7 +1030,15 @@ void set_cpus_allowed_common(struct task +@@ -1007,7 +1007,15 @@ void set_cpus_allowed_common(struct task p->nr_cpus_allowed = cpumask_weight(new_mask); } @@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable() { struct rq *rq = task_rq(p); bool queued, running; -@@ -1059,6 +1067,20 @@ void do_set_cpus_allowed(struct task_str +@@ -1036,6 +1044,20 @@ void do_set_cpus_allowed(struct task_str set_curr_task(rq, p); } @@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable() /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -1117,9 +1139,16 @@ static int __set_cpus_allowed_ptr(struct +@@ -1094,9 +1116,16 @@ static int __set_cpus_allowed_ptr(struct } /* Can the task run on the task's current CPU? If so, we're done */ @@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable() dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; -@@ -7076,3 +7105,100 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7053,3 +7082,100 @@ const u32 sched_prio_to_wmult[40] = { }; #undef CREATE_TRACE_POINTS @@ -238,7 +238,7 @@ Subject: kernel/sched/core: add migrate_disable() +#endif --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c -@@ -979,6 +979,10 @@ void proc_sched_show_task(struct task_st +@@ -978,6 +978,10 @@ void proc_sched_show_task(struct task_st P(dl.runtime); P(dl.deadline); } diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch index e020480052c5..7b2c832d5c74 100644 --- a/patches/arch-arm64-Add-lazy-preempt-support.patch +++ b/patches/arch-arm64-Add-lazy-preempt-support.patch @@ -20,14 +20,14 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig -@@ -125,6 +125,7 @@ config ARM64 +@@ -140,6 +140,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE - select HAVE_STACKPROTECTOR + select HAVE_RSEQ --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -43,6 +43,7 @@ struct thread_info { @@ -77,7 +77,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0)); --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S -@@ -633,11 +633,16 @@ ENDPROC(el1_sync) +@@ -623,11 +623,16 @@ ENDPROC(el1_sync) #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count @@ -97,7 +97,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on -@@ -651,6 +656,7 @@ ENDPROC(el1_irq) +@@ -641,6 +646,7 @@ ENDPROC(el1_irq) 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? @@ -107,7 +107,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org> --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c -@@ -923,7 +923,7 @@ asmlinkage void do_notify_resume(struct +@@ -926,7 +926,7 @@ asmlinkage void do_notify_resume(struct /* Check valid user FS if needed */ addr_limit_user_check(); diff --git a/patches/arm-disable-NEON-in-kernel-mode.patch b/patches/arm-disable-NEON-in-kernel-mode.patch index 9d4261fc2643..a42ed480f4cf 100644 --- a/patches/arm-disable-NEON-in-kernel-mode.patch +++ b/patches/arm-disable-NEON-in-kernel-mode.patch @@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -2169,7 +2169,7 @@ config NEON +@@ -2160,7 +2160,7 @@ config NEON config KERNEL_MODE_NEON bool "Support for NEON in kernel mode" diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch index 731d815e6555..34e2b9d2c44f 100644 --- a/patches/arm-preempt-lazy-support.patch +++ b/patches/arm-preempt-lazy-support.patch @@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -89,6 +89,7 @@ config ARM +@@ -90,6 +90,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif /* __ASM_ARM_THREAD_INFO_H */ --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c -@@ -67,6 +67,7 @@ int main(void) +@@ -56,6 +56,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); diff --git a/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch index 1e903ef243cc..9a6063b8edd4 100644 --- a/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch +++ b/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch @@ -32,10 +32,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + return sve_state; +} + - static void change_cpacr(u64 val, u64 mask) - { - u64 cpacr = read_sysreg(CPACR_EL1); -@@ -566,6 +576,7 @@ int sve_set_vector_length(struct task_st + /* + * TIF_SVE controls whether a task can use SVE without trapping while + * in userspace, and also the way a task's FPSIMD/SVE state is stored +@@ -547,6 +557,7 @@ int sve_set_vector_length(struct task_st * non-SVE thread. */ if (task == current) { @@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); fpsimd_save(); -@@ -576,8 +587,10 @@ int sve_set_vector_length(struct task_st +@@ -557,8 +568,10 @@ int sve_set_vector_length(struct task_st if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Force reallocation of task SVE state to the correct size -@@ -832,6 +845,7 @@ asmlinkage void do_sve_acc(unsigned int +@@ -813,6 +826,7 @@ asmlinkage void do_sve_acc(unsigned int sve_alloc(current); @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); fpsimd_save(); -@@ -845,6 +859,7 @@ asmlinkage void do_sve_acc(unsigned int +@@ -826,6 +840,7 @@ asmlinkage void do_sve_acc(unsigned int WARN_ON(1); /* SVE access shouldn't have trapped */ local_bh_enable(); @@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -911,10 +926,12 @@ void fpsimd_thread_switch(struct task_st +@@ -892,10 +907,12 @@ void fpsimd_thread_switch(struct task_st void fpsimd_flush_thread(void) { int vl, supported_vl; @@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); memset(¤t->thread.uw.fpsimd_state, 0, -@@ -923,7 +940,7 @@ void fpsimd_flush_thread(void) +@@ -904,7 +921,7 @@ void fpsimd_flush_thread(void) if (system_supports_sve()) { clear_thread_flag(TIF_SVE); @@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Reset the task vector length as required. -@@ -959,6 +976,8 @@ void fpsimd_flush_thread(void) +@@ -940,6 +957,8 @@ void fpsimd_flush_thread(void) set_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -970,9 +989,11 @@ void fpsimd_preserve_current_state(void) +@@ -951,9 +970,11 @@ void fpsimd_preserve_current_state(void) if (!system_supports_fpsimd()) return; @@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -1030,6 +1051,7 @@ void fpsimd_restore_current_state(void) +@@ -1011,6 +1032,7 @@ void fpsimd_restore_current_state(void) if (!system_supports_fpsimd()) return; @@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { -@@ -1038,6 +1060,7 @@ void fpsimd_restore_current_state(void) +@@ -1019,6 +1041,7 @@ void fpsimd_restore_current_state(void) } local_bh_enable(); @@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -1050,6 +1073,7 @@ void fpsimd_update_current_state(struct +@@ -1031,6 +1054,7 @@ void fpsimd_update_current_state(struct if (!system_supports_fpsimd()) return; @@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); current->thread.uw.fpsimd_state = *state; -@@ -1062,6 +1086,7 @@ void fpsimd_update_current_state(struct +@@ -1043,6 +1067,7 @@ void fpsimd_update_current_state(struct clear_thread_flag(TIF_FOREIGN_FPSTATE); local_bh_enable(); @@ -146,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -1107,6 +1132,7 @@ void kernel_neon_begin(void) +@@ -1088,6 +1113,7 @@ void kernel_neon_begin(void) BUG_ON(!may_use_simd()); @@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_disable(); __this_cpu_write(kernel_neon_busy, true); -@@ -1120,6 +1146,7 @@ void kernel_neon_begin(void) +@@ -1101,6 +1127,7 @@ void kernel_neon_begin(void) preempt_disable(); local_bh_enable(); diff --git a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch index 7263c0a3deb7..95c89ea5e088 100644 --- a/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch +++ b/patches/block-blk-mq-move-blk_queue_usage_counter_release-in.patch @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void blk_rq_timed_out_timer(struct timer_list *t) -@@ -1068,6 +1077,7 @@ struct request_queue *blk_alloc_queue_no +@@ -1067,6 +1076,7 @@ struct request_queue *blk_alloc_queue_no queue_flag_set_unlocked(QUEUE_FLAG_BYPASS, q); init_waitqueue_head(&q->mq_freeze_wq); @@ -98,10 +98,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <linux/scatterlist.h> #include <linux/blkzoned.h> +#include <linux/swork.h> - #include <linux/seqlock.h> - #include <linux/u64_stats_sync.h> -@@ -651,6 +652,7 @@ struct request_queue { + struct module; + struct scsi_ioctl_command; +@@ -649,6 +650,7 @@ struct request_queue { #endif struct rcu_head rcu_head; wait_queue_head_t mq_freeze_wq; diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch index c35cadc0a6c8..f0edb21b8788 100644 --- a/patches/block-mq-don-t-complete-requests-via-IPI.patch +++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h -@@ -247,7 +247,7 @@ static inline u16 blk_mq_unique_tag_to_t +@@ -249,7 +249,7 @@ static inline u16 blk_mq_unique_tag_to_t return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } @@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void blk_mq_end_request(struct request *rq, blk_status_t error); --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -151,6 +151,9 @@ enum mq_rq_state { +@@ -149,6 +149,9 @@ enum mq_rq_state { */ struct request { struct request_queue *q; diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch index 08d15df28abe..8f28f71180e3 100644 --- a/patches/block-mq-drop-preempt-disable.patch +++ b/patches/block-mq-drop-preempt-disable.patch @@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx) -@@ -1328,14 +1328,14 @@ static void __blk_mq_delay_run_hw_queue( +@@ -1360,14 +1360,14 @@ static void __blk_mq_delay_run_hw_queue( return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch index e0021cbd12e4..ddcd4e826855 100644 --- a/patches/block-mq-use-cpu_light.patch +++ b/patches/block-mq-use-cpu_light.patch @@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/block/blk-mq.h +++ b/block/blk-mq.h -@@ -112,12 +112,12 @@ static inline struct blk_mq_ctx *__blk_m +@@ -113,12 +113,12 @@ static inline struct blk_mq_ctx *__blk_m */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { diff --git a/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch b/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch deleted file mode 100644 index 8209ab018d7f..000000000000 --- a/patches/cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch +++ /dev/null @@ -1,270 +0,0 @@ -From: "Steven Rostedt (VMware)" <rostedt@goodmis.org> -Date: Mon, 9 Jul 2018 17:48:54 -0400 -Subject: [PATCH] cgroup/tracing: Move taking of spin lock out of trace event - handlers - -[ Upstream commit e4f8d81c738db6d3ffdabfb8329aa2feaa310699 ] - -It is unwise to take spin locks from the handlers of trace events. -Mainly, because they can introduce lockups, because it introduces locks -in places that are normally not tested. Worse yet, because trace events -are tucked away in the include/trace/events/ directory, locks that are -taken there are forgotten about. - -As a general rule, I tell people never to take any locks in a trace -event handler. - -Several cgroup trace event handlers call cgroup_path() which eventually -takes the kernfs_rename_lock spinlock. This injects the spinlock in the -code without people realizing it. It also can cause issues for the -PREEMPT_RT patch, as the spinlock becomes a mutex, and the trace event -handlers are called with preemption disabled. - -By moving the calculation of the cgroup_path() out of the trace event -handlers and into a macro (surrounded by a -trace_cgroup_##type##_enabled()), then we could place the cgroup_path -into a string, and pass that to the trace event. Not only does this -remove the taking of the spinlock out of the trace event handler, but -it also means that the cgroup_path() only needs to be called once (it -is currently called twice, once to get the length to reserver the -buffer for, and once again to get the path itself. Now it only needs to -be done once. - -Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org> -Signed-off-by: Tejun Heo <tj@kernel.org> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - include/trace/events/cgroup.h | 47 +++++++++++++++++++--------------------- - kernel/cgroup/cgroup-internal.h | 26 ++++++++++++++++++++++ - kernel/cgroup/cgroup-v1.c | 4 +-- - kernel/cgroup/cgroup.c | 12 +++++----- - 4 files changed, 58 insertions(+), 31 deletions(-) - ---- a/include/trace/events/cgroup.h -+++ b/include/trace/events/cgroup.h -@@ -53,24 +53,22 @@ DEFINE_EVENT(cgroup_root, cgroup_remount - - DECLARE_EVENT_CLASS(cgroup, - -- TP_PROTO(struct cgroup *cgrp), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgrp), -+ TP_ARGS(cgrp, path), - - TP_STRUCT__entry( - __field( int, root ) - __field( int, id ) - __field( int, level ) -- __dynamic_array(char, path, -- cgroup_path(cgrp, NULL, 0) + 1) -+ __string( path, path ) - ), - - TP_fast_assign( - __entry->root = cgrp->root->hierarchy_id; - __entry->id = cgrp->id; - __entry->level = cgrp->level; -- cgroup_path(cgrp, __get_dynamic_array(path), -- __get_dynamic_array_len(path)); -+ __assign_str(path, path); - ), - - TP_printk("root=%d id=%d level=%d path=%s", -@@ -79,45 +77,45 @@ DECLARE_EVENT_CLASS(cgroup, - - DEFINE_EVENT(cgroup, cgroup_mkdir, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_rmdir, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_release, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DEFINE_EVENT(cgroup, cgroup_rename, - -- TP_PROTO(struct cgroup *cgroup), -+ TP_PROTO(struct cgroup *cgrp, const char *path), - -- TP_ARGS(cgroup) -+ TP_ARGS(cgrp, path) - ); - - DECLARE_EVENT_CLASS(cgroup_migrate, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup), -+ TP_ARGS(dst_cgrp, path, task, threadgroup), - - TP_STRUCT__entry( - __field( int, dst_root ) - __field( int, dst_id ) - __field( int, dst_level ) -- __dynamic_array(char, dst_path, -- cgroup_path(dst_cgrp, NULL, 0) + 1) - __field( int, pid ) -+ __string( dst_path, path ) - __string( comm, task->comm ) - ), - -@@ -125,8 +123,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate, - __entry->dst_root = dst_cgrp->root->hierarchy_id; - __entry->dst_id = dst_cgrp->id; - __entry->dst_level = dst_cgrp->level; -- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path), -- __get_dynamic_array_len(dst_path)); -+ __assign_str(dst_path, path); - __entry->pid = task->pid; - __assign_str(comm, task->comm); - ), -@@ -138,16 +135,18 @@ DECLARE_EVENT_CLASS(cgroup_migrate, - - DEFINE_EVENT(cgroup_migrate, cgroup_attach_task, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup) -+ TP_ARGS(dst_cgrp, path, task, threadgroup) - ); - - DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks, - -- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup), -+ TP_PROTO(struct cgroup *dst_cgrp, const char *path, -+ struct task_struct *task, bool threadgroup), - -- TP_ARGS(dst_cgrp, task, threadgroup) -+ TP_ARGS(dst_cgrp, path, task, threadgroup) - ); - - #endif /* _TRACE_CGROUP_H */ ---- a/kernel/cgroup/cgroup-internal.h -+++ b/kernel/cgroup/cgroup-internal.h -@@ -8,6 +8,32 @@ - #include <linux/list.h> - #include <linux/refcount.h> - -+#define TRACE_CGROUP_PATH_LEN 1024 -+extern spinlock_t trace_cgroup_path_lock; -+extern char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; -+ -+/* -+ * cgroup_path() takes a spin lock. It is good practice not to take -+ * spin locks within trace point handlers, as they are mostly hidden -+ * from normal view. As cgroup_path() can take the kernfs_rename_lock -+ * spin lock, it is best to not call that function from the trace event -+ * handler. -+ * -+ * Note: trace_cgroup_##type##_enabled() is a static branch that will only -+ * be set when the trace event is enabled. -+ */ -+#define TRACE_CGROUP_PATH(type, cgrp, ...) \ -+ do { \ -+ if (trace_cgroup_##type##_enabled()) { \ -+ spin_lock(&trace_cgroup_path_lock); \ -+ cgroup_path(cgrp, trace_cgroup_path, \ -+ TRACE_CGROUP_PATH_LEN); \ -+ trace_cgroup_##type(cgrp, trace_cgroup_path, \ -+ ##__VA_ARGS__); \ -+ spin_unlock(&trace_cgroup_path_lock); \ -+ } \ -+ } while (0) -+ - /* - * A cgroup can be associated with multiple css_sets as different tasks may - * belong to different cgroups on different hierarchies. In the other ---- a/kernel/cgroup/cgroup-v1.c -+++ b/kernel/cgroup/cgroup-v1.c -@@ -135,7 +135,7 @@ int cgroup_transfer_tasks(struct cgroup - if (task) { - ret = cgroup_migrate(task, false, &mgctx); - if (!ret) -- trace_cgroup_transfer_tasks(to, task, false); -+ TRACE_CGROUP_PATH(transfer_tasks, to, task, false); - put_task_struct(task); - } - } while (task && !ret); -@@ -865,7 +865,7 @@ static int cgroup1_rename(struct kernfs_ - - ret = kernfs_rename(kn, new_parent, new_name_str); - if (!ret) -- trace_cgroup_rename(cgrp); -+ TRACE_CGROUP_PATH(rename, cgrp); - - mutex_unlock(&cgroup_mutex); - ---- a/kernel/cgroup/cgroup.c -+++ b/kernel/cgroup/cgroup.c -@@ -83,6 +83,9 @@ EXPORT_SYMBOL_GPL(cgroup_mutex); - EXPORT_SYMBOL_GPL(css_set_lock); - #endif - -+DEFINE_SPINLOCK(trace_cgroup_path_lock); -+char trace_cgroup_path[TRACE_CGROUP_PATH_LEN]; -+ - /* - * Protects cgroup_idr and css_idr so that IDs can be released without - * grabbing cgroup_mutex. -@@ -2638,7 +2641,7 @@ int cgroup_attach_task(struct cgroup *ds - cgroup_migrate_finish(&mgctx); - - if (!ret) -- trace_cgroup_attach_task(dst_cgrp, leader, threadgroup); -+ TRACE_CGROUP_PATH(attach_task, dst_cgrp, leader, threadgroup); - - return ret; - } -@@ -4641,7 +4644,7 @@ static void css_release_work_fn(struct w - struct cgroup *tcgrp; - - /* cgroup release path */ -- trace_cgroup_release(cgrp); -+ TRACE_CGROUP_PATH(release, cgrp); - - if (cgroup_on_dfl(cgrp)) - cgroup_rstat_flush(cgrp); -@@ -4984,7 +4987,7 @@ int cgroup_mkdir(struct kernfs_node *par - if (ret) - goto out_destroy; - -- trace_cgroup_mkdir(cgrp); -+ TRACE_CGROUP_PATH(mkdir, cgrp); - - /* let's create and online css's */ - kernfs_activate(kn); -@@ -5172,9 +5175,8 @@ int cgroup_rmdir(struct kernfs_node *kn) - return 0; - - ret = cgroup_destroy_locked(cgrp); -- - if (!ret) -- trace_cgroup_rmdir(cgrp); -+ TRACE_CGROUP_PATH(rmdir, cgrp); - - cgroup_kn_unlock(kn); - return ret; diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch index 897fe0826dac..457e89c5baf3 100644 --- a/patches/cgroups-use-simple-wait-in-css_release.patch +++ b/patches/cgroups-use-simple-wait-in-css_release.patch @@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c -@@ -4618,10 +4618,10 @@ static void css_free_rwork_fn(struct wor +@@ -4620,10 +4620,10 @@ static void css_free_rwork_fn(struct wor } } @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; -@@ -4681,8 +4681,8 @@ static void css_release(struct percpu_re +@@ -4683,8 +4683,8 @@ static void css_release(struct percpu_re struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); @@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5404,6 +5404,7 @@ static int __init cgroup_wq_init(void) +@@ -5406,6 +5406,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch index ff615f7c1816..07f01ab9d42e 100644 --- a/patches/completion-use-simple-wait-queues.patch +++ b/patches/completion-use-simple-wait-queues.patch @@ -8,30 +8,46 @@ contention on the waitqueue lock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- - drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 - + arch/powerpc/platforms/ps3/device-init.c | 4 +- + drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 4 +- drivers/usb/gadget/function/f_fs.c | 2 - drivers/usb/gadget/legacy/inode.c | 4 +- include/linux/completion.h | 8 ++-- include/linux/suspend.h | 6 +++ - include/linux/swait.h | 1 + include/linux/swait.h | 2 + kernel/power/hibernate.c | 7 ++++ kernel/power/suspend.c | 4 ++ kernel/sched/completion.c | 34 ++++++++++---------- kernel/sched/core.c | 10 ++++- - kernel/sched/swait.c | 19 +++++++++++ - 11 files changed, 70 insertions(+), 27 deletions(-) + kernel/sched/swait.c | 21 +++++++++++- + 12 files changed, 75 insertions(+), 31 deletions(-) +--- a/arch/powerpc/platforms/ps3/device-init.c ++++ b/arch/powerpc/platforms/ps3/device-init.c +@@ -752,8 +752,8 @@ static int ps3_notification_read_write(s + } + pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); + +- res = wait_event_interruptible(dev->done.wait, +- dev->done.done || kthread_should_stop()); ++ res = swait_event_interruptible_exclusive(dev->done.wait, ++ dev->done.done || kthread_should_stop()); + if (kthread_should_stop()) + res = -EINTR; + if (res) { --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c -@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez +@@ -697,8 +697,8 @@ static void ezusb_req_ctx_wait(struct ez while (!ctx->done.done && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, -+ swait_event_interruptible(ctx->done.wait, - ctx->done.done); +- ctx->done.done); ++ swait_event_interruptible_exclusive(ctx->done.wait, ++ ctx->done.done); } break; + default: --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1633,7 +1633,7 @@ static void ffs_data_put(struct ffs_data @@ -50,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible_exclusive(done.wait, done.done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -59,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); -+ swait_event (done.wait, done.done); ++ swait_event_exclusive(done.wait, done.done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { @@ -118,17 +134,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> extern suspend_state_t mem_sleep_default; --- a/include/linux/swait.h +++ b/include/linux/swait.h -@@ -160,6 +160,7 @@ static inline bool swq_has_sleeper(struc - extern void swake_up(struct swait_queue_head *q); +@@ -160,7 +160,9 @@ static inline bool swq_has_sleeper(struc + extern void swake_up_one(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); +extern void swake_up_all_locked(struct swait_queue_head *q); - extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); - extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); ++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); + extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state); + extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); + --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c -@@ -679,6 +679,10 @@ static int load_image_and_restore(void) +@@ -681,6 +681,10 @@ static int load_image_and_restore(void) return error; } @@ -139,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * hibernate - Carry out system hibernation, including saving the image. */ -@@ -692,6 +696,8 @@ int hibernate(void) +@@ -694,6 +698,8 @@ int hibernate(void) return -EPERM; } @@ -148,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { -@@ -770,6 +776,7 @@ int hibernate(void) +@@ -772,6 +778,7 @@ int hibernate(void) atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); @@ -158,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return error; --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c -@@ -594,6 +594,8 @@ static int enter_state(suspend_state_t s +@@ -600,6 +600,8 @@ static int enter_state(suspend_state_t s return error; } @@ -167,7 +185,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * pm_suspend - Externally visible function for suspending the system. * @state: System sleep state to enter. -@@ -608,6 +610,7 @@ int pm_suspend(suspend_state_t state) +@@ -614,6 +616,7 @@ int pm_suspend(suspend_state_t state) if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) return -EINVAL; @@ -175,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pr_info("suspend entry (%s)\n", mem_sleep_labels[state]); error = enter_state(state); if (error) { -@@ -617,6 +620,7 @@ int pm_suspend(suspend_state_t state) +@@ -623,6 +626,7 @@ int pm_suspend(suspend_state_t state) suspend_stats.success++; } pr_info("suspend exit\n"); @@ -281,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(completion_done); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7125,7 +7125,10 @@ void migrate_disable(void) +@@ -7102,7 +7102,10 @@ void migrate_disable(void) return; } #ifdef CONFIG_SCHED_DEBUG @@ -293,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif if (p->migrate_disable) { -@@ -7155,7 +7158,10 @@ void migrate_enable(void) +@@ -7132,7 +7135,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG @@ -330,6 +348,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +} +EXPORT_SYMBOL(swake_up_all_locked); + - void swake_up(struct swait_queue_head *q) + void swake_up_one(struct swait_queue_head *q) { unsigned long flags; +@@ -69,7 +88,7 @@ void swake_up_all(struct swait_queue_hea + } + EXPORT_SYMBOL(swake_up_all); + +-static void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) + { + wait->task = current; + if (list_empty(&wait->task_list)) diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch index 2797b883831b..7d1eee8b2a79 100644 --- a/patches/cpu-hotplug--Implement-CPU-pinning.patch +++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch @@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP) static struct lockdep_map cpuhp_state_up_map = STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map); -@@ -288,7 +293,30 @@ static int cpu_hotplug_disabled; +@@ -285,7 +290,30 @@ static int cpu_hotplug_disabled; */ void pin_current_cpu(void) { @@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** -@@ -296,6 +324,13 @@ void pin_current_cpu(void) +@@ -293,6 +321,13 @@ void pin_current_cpu(void) */ void unpin_current_cpu(void) { @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -854,6 +889,7 @@ static int take_cpu_down(void *_param) +@@ -846,6 +881,7 @@ static int take_cpu_down(void *_param) static int takedown_cpu(unsigned int cpu) { @@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; -@@ -866,11 +902,14 @@ static int takedown_cpu(unsigned int cpu +@@ -858,11 +894,14 @@ static int takedown_cpu(unsigned int cpu */ irq_lock_sparse(); @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ -@@ -889,6 +928,7 @@ static int takedown_cpu(unsigned int cpu +@@ -881,6 +920,7 @@ static int takedown_cpu(unsigned int cpu wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch index fdab49a15c95..9c5d9049329e 100644 --- a/patches/cpumask-disable-offstack-on-rt.patch +++ b/patches/cpumask-disable-offstack-on-rt.patch @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -940,7 +940,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT +@@ -938,7 +938,7 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL @@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> If unsure, say N. --- a/lib/Kconfig +++ b/lib/Kconfig -@@ -434,6 +434,7 @@ config CHECK_SIGNATURE +@@ -441,6 +441,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS diff --git a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch index 3434899bc945..21ac780719c1 100644 --- a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch +++ b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch @@ -56,8 +56,8 @@ Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Cc: linux-rt-users <linux-rt-users@vger.kernel.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- - drivers/gpu/drm/i915/intel_sprite.c | 11 +++++++---- - 1 file changed, 7 insertions(+), 4 deletions(-) + drivers/gpu/drm/i915/intel_sprite.c | 13 ++++++++----- + 1 file changed, 8 insertions(+), 5 deletions(-) --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include "intel_drv.h" #include "intel_frontbuffer.h" #include <drm/i915_drm.h> -@@ -74,6 +75,8 @@ int intel_usecs_to_scanlines(const struc +@@ -60,6 +61,8 @@ int intel_usecs_to_scanlines(const struc #define VBLANK_EVASION_TIME_US 100 #endif @@ -79,15 +79,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * intel_pipe_update_start() - start update of a set of display registers * @new_crtc_state: the new crtc state @@ -107,7 +110,7 @@ void intel_pipe_update_start(const struc - VBLANK_EVASION_TIME_US); - max = vblank_start - 1; + if (intel_psr_wait_for_idle(new_crtc_state)) + DRM_ERROR("PSR idle timed out, atomic update may fail\n"); - local_irq_disable(); + local_lock_irq(pipe_update_lock); - if (min <= 0 || max <= 0) - return; -@@ -137,11 +140,11 @@ void intel_pipe_update_start(const struc + crtc->debug.min_vbl = min; + crtc->debug.max_vbl = max; +@@ -131,11 +134,11 @@ void intel_pipe_update_start(const struc break; } @@ -101,7 +101,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } finish_wait(wq, &wait); -@@ -206,7 +209,7 @@ void intel_pipe_update_end(struct intel_ +@@ -168,7 +171,7 @@ void intel_pipe_update_start(const struc + return; + + irq_disable: +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + /** +@@ -204,7 +207,7 @@ void intel_pipe_update_end(struct intel_ new_crtc_state->base.event = NULL; } diff --git a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch index 57dc82b07f6f..208b03f5542f 100644 --- a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch +++ b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c -@@ -1009,6 +1009,7 @@ static bool i915_get_crtc_scanoutpos(str +@@ -1025,6 +1025,7 @@ static bool i915_get_crtc_scanoutpos(str spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ @@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Get optional system timestamp before query. */ if (stime) -@@ -1060,6 +1061,7 @@ static bool i915_get_crtc_scanoutpos(str +@@ -1076,6 +1077,7 @@ static bool i915_get_crtc_scanoutpos(str *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ diff --git a/patches/efi-Allow-efi-runtime.patch b/patches/efi-Allow-efi-runtime.patch index a700ca2dbb67..5a3dfd3b433b 100644 --- a/patches/efi-Allow-efi-runtime.patch +++ b/patches/efi-Allow-efi-runtime.patch @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c -@@ -110,6 +110,9 @@ static int __init parse_efi_cmdline(char +@@ -113,6 +113,9 @@ static int __init parse_efi_cmdline(char if (parse_option_str(str, "noruntime")) disable_runtime = true; diff --git a/patches/efi-Disable-runtime-services-on-RT.patch b/patches/efi-Disable-runtime-services-on-RT.patch index becb502605e5..30094a01290e 100644 --- a/patches/efi-Disable-runtime-services-on-RT.patch +++ b/patches/efi-Disable-runtime-services-on-RT.patch @@ -28,9 +28,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c -@@ -84,7 +84,7 @@ struct mm_struct efi_mm = { - .mmlist = LIST_HEAD_INIT(efi_mm.mmlist), - }; +@@ -87,7 +87,7 @@ struct mm_struct efi_mm = { + + struct workqueue_struct *efi_rts_wq; -static bool disable_runtime; +static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE); diff --git a/patches/epoll-use-get-cpu-light.patch b/patches/epoll-use-get-cpu-light.patch index d7c97ec100bb..79e571153fe1 100644 --- a/patches/epoll-use-get-cpu-light.patch +++ b/patches/epoll-use-get-cpu-light.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/fs/eventpoll.c +++ b/fs/eventpoll.c -@@ -563,12 +563,12 @@ static int ep_poll_wakeup_proc(void *pri +@@ -571,12 +571,12 @@ static int ep_poll_wakeup_proc(void *pri static void ep_poll_safewake(wait_queue_head_t *wq) { diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch index 421b61358ed1..443e3ec835f4 100644 --- a/patches/fs-aio-simple-simple-work.patch +++ b/patches/fs-aio-simple-simple-work.patch @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/aio.c +++ b/fs/aio.c -@@ -40,6 +40,7 @@ +@@ -42,6 +42,7 @@ #include <linux/ramfs.h> #include <linux/percpu-refcount.h> #include <linux/mount.h> @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <asm/kmap_types.h> #include <linux/uaccess.h> -@@ -118,6 +119,7 @@ struct kioctx { +@@ -120,6 +121,7 @@ struct kioctx { long nr_pages; struct rcu_work free_rwork; /* see free_ioctx() */ @@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * signals when all in-flight requests are done -@@ -256,6 +258,7 @@ static int __init aio_setup(void) +@@ -254,6 +256,7 @@ static int __init aio_setup(void) .mount = aio_mount, .kill_sb = kill_anon_super, }; @@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> aio_mnt = kern_mount(&aio_fs); if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); -@@ -597,9 +600,9 @@ static void free_ioctx_reqs(struct percp +@@ -595,9 +598,9 @@ static void free_ioctx_reqs(struct percp * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ @@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); -@@ -617,6 +620,14 @@ static void free_ioctx_users(struct perc +@@ -615,6 +618,14 @@ static void free_ioctx_users(struct perc percpu_ref_put(&ctx->reqs); } diff --git a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch index 7ab1869003e4..9dcb00b80e8d 100644 --- a/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch +++ b/patches/fs-dcache-bring-back-explicit-INIT_HLIST_BL_HEAD-in.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -3116,6 +3116,8 @@ static int __init set_dhash_entries(char +@@ -3062,6 +3062,8 @@ static int __init set_dhash_entries(char static void __init dcache_init_early(void) { @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ -@@ -3132,11 +3134,16 @@ static void __init dcache_init_early(voi +@@ -3078,11 +3080,16 @@ static void __init dcache_init_early(voi NULL, 0, 0); @@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature -@@ -3160,6 +3167,10 @@ static void __init dcache_init(void) +@@ -3106,6 +3113,10 @@ static void __init dcache_init(void) NULL, 0, 0); diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index c8a768865a3e..8835c69049d0 100644 --- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2431,9 +2431,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2404,9 +2404,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return n; cpu_relax(); } -@@ -2441,7 +2442,8 @@ static inline unsigned start_dir_add(str +@@ -2414,7 +2415,8 @@ static inline unsigned start_dir_add(str static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void d_wait_lookup(struct dentry *dentry) -@@ -2474,7 +2476,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2447,7 +2449,7 @@ struct dentry *d_alloc_parallel(struct d retry: rcu_read_lock(); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2502,7 +2504,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct d } hlist_bl_lock(b); @@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/fs.h +++ b/include/linux/fs.h -@@ -657,7 +657,7 @@ struct inode { +@@ -669,7 +669,7 @@ struct inode { struct block_device *i_bdev; struct cdev *i_cdev; char *i_link; diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch index 825ded4aa319..a027688b9a38 100644 --- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch +++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -22,8 +22,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> +#include <linux/delay.h> #include "autofs_i.h" - static unsigned long now; -@@ -148,7 +149,7 @@ static struct dentry *get_next_positive_ + /* Check if a dentry can be expired */ +@@ -153,7 +154,7 @@ static struct dentry *get_next_positive_ parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); @@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/security.h> #include <linux/cred.h> #include <linux/idr.h> -@@ -353,8 +354,11 @@ int __mnt_want_write(struct vfsmount *m) +@@ -327,8 +328,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch index e585b21af764..b9515da52876 100644 --- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2448,21 +2448,24 @@ static inline void end_dir_add(struct in +@@ -2421,21 +2421,24 @@ static inline void end_dir_add(struct in static void d_wait_lookup(struct dentry *dentry) { @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + + INIT_LIST_HEAD(&__wait.task_list); + do { -+ prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); ++ prepare_to_swait_exclusive(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&dentry->d_lock); + schedule(); + spin_lock(&dentry->d_lock); @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2577,7 +2580,7 @@ void __d_lookup_done(struct dentry *dent +@@ -2550,7 +2553,7 @@ void __d_lookup_done(struct dentry *dent hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> INIT_HLIST_NODE(&dentry->d_u.d_alias); --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c -@@ -1196,7 +1196,7 @@ static int fuse_direntplus_link(struct f +@@ -1203,7 +1203,7 @@ static int fuse_direntplus_link(struct f struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/fs/namei.c +++ b/fs/namei.c -@@ -1604,7 +1604,7 @@ static struct dentry *__lookup_slow(cons +@@ -1645,7 +1645,7 @@ static struct dentry *__lookup_slow(cons { struct dentry *dentry, *old; struct inode *inode = dir->d_inode; @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Don't go there if it's already dead */ if (unlikely(IS_DEADDIR(inode))) -@@ -3121,7 +3121,7 @@ static int lookup_open(struct nameidata +@@ -3135,7 +3135,7 @@ static int lookup_open(struct nameidata struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; @@ -120,9 +120,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); -@@ -1454,7 +1454,7 @@ int nfs_atomic_open(struct inode *dir, s +@@ -1459,7 +1459,7 @@ int nfs_atomic_open(struct inode *dir, s struct file *file, unsigned open_flags, - umode_t mode, int *opened) + umode_t mode) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); @@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> spin_lock(&dentry->d_lock); --- a/fs/proc/base.c +++ b/fs/proc/base.c -@@ -1878,7 +1878,7 @@ bool proc_fill_cache(struct file *file, +@@ -1876,7 +1876,7 @@ bool proc_fill_cache(struct file *file, child = d_hash_and_lookup(dir, &qname); if (!child) { @@ -182,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ -@@ -238,7 +238,7 @@ extern struct dentry * d_alloc(struct de +@@ -236,7 +236,7 @@ extern struct dentry * d_alloc(struct de extern struct dentry * d_alloc_anon(struct super_block *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, @@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> extern struct dentry * d_exact_alias(struct dentry *, struct inode *); --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h -@@ -1536,7 +1536,7 @@ struct nfs_unlinkdata { +@@ -1549,7 +1549,7 @@ struct nfs_unlinkdata { struct nfs_removeargs args; struct nfs_removeres res; struct dentry *dentry; diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch index b860ef042536..4ac463250fbc 100644 --- a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch +++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c -@@ -1775,7 +1775,11 @@ int nfs_rmdir(struct inode *dir, struct +@@ -1786,7 +1786,11 @@ int nfs_rmdir(struct inode *dir, struct trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { -@@ -1785,7 +1789,11 @@ int nfs_rmdir(struct inode *dir, struct +@@ -1796,7 +1800,11 @@ int nfs_rmdir(struct inode *dir, struct case -ENOENT: nfs_dentry_handle_enoent(dentry); } diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch index 3b725128bd73..b5e2a42e8bfb 100644 --- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch +++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch @@ -10,9 +10,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> fs/buffer.c | 21 +++++++-------------- fs/ext4/page-io.c | 6 ++---- fs/ntfs/aops.c | 10 +++------- - fs/xfs/xfs_aops.c | 6 ++---- include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++ - 5 files changed, 48 insertions(+), 29 deletions(-) + 4 files changed, 46 insertions(+), 25 deletions(-) --- a/fs/buffer.c +++ b/fs/buffer.c @@ -75,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(end_buffer_async_write); -@@ -3349,6 +3341,7 @@ struct buffer_head *alloc_buffer_head(gf +@@ -3360,6 +3352,7 @@ struct buffer_head *alloc_buffer_head(gf struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); @@ -138,28 +137,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /** ---- a/fs/xfs/xfs_aops.c -+++ b/fs/xfs/xfs_aops.c -@@ -108,8 +108,7 @@ xfs_finish_page_writeback( - ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE); - ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0); - -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &head->b_state); -+ flags = bh_uptodate_lock_irqsave(head); - do { - if (off >= bvec->bv_offset && - off < bvec->bv_offset + bvec->bv_len) { -@@ -131,8 +130,7 @@ xfs_finish_page_writeback( - } - off += bh->b_size; - } while ((bh = bh->b_this_page) != head); -- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(head, flags); - - if (!busy) - end_page_writeback(bvec->bv_page); --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -76,8 +76,42 @@ struct buffer_head { diff --git a/patches/fscache-initialize-cookie-hash-table-raw-spinlocks.patch b/patches/fscache-initialize-cookie-hash-table-raw-spinlocks.patch index e9fe577eb752..8dd59acf1224 100644 --- a/patches/fscache-initialize-cookie-hash-table-raw-spinlocks.patch +++ b/patches/fscache-initialize-cookie-hash-table-raw-spinlocks.patch @@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/fscache/cookie.c +++ b/fs/fscache/cookie.c -@@ -973,3 +973,11 @@ int __fscache_check_consistency(struct f +@@ -962,3 +962,11 @@ int __fscache_check_consistency(struct f return -ESTALE; } EXPORT_SYMBOL(__fscache_check_consistency); @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +} --- a/fs/fscache/main.c +++ b/fs/fscache/main.c -@@ -151,6 +151,7 @@ static int __init fscache_init(void) +@@ -149,6 +149,7 @@ static int __init fscache_init(void) ret = -ENOMEM; goto error_cookie_jar; } diff --git a/patches/ftrace-Fix-trace-header-alignment.patch b/patches/ftrace-Fix-trace-header-alignment.patch index c771ad07685d..2baa6c6adf66 100644 --- a/patches/ftrace-Fix-trace-header-alignment.patch +++ b/patches/ftrace-Fix-trace-header-alignment.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -3347,17 +3347,17 @@ get_total_entries(struct trace_buffer *b +@@ -3348,17 +3348,17 @@ get_total_entries(struct trace_buffer *b static void print_lat_help_header(struct seq_file *m) { diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch index ee7c2d0e14ec..15e60523de06 100644 --- a/patches/ftrace-migrate-disable-tracing.patch +++ b/patches/ftrace-migrate-disable-tracing.patch @@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define TRACE_EVENT_TYPE_MAX \ --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2145,6 +2145,8 @@ tracing_generic_entry_update(struct trac +@@ -2146,6 +2146,8 @@ tracing_generic_entry_update(struct trac ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); @@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); -@@ -3348,9 +3350,10 @@ static void print_lat_help_header(struct +@@ -3349,9 +3351,10 @@ static void print_lat_help_header(struct "# | / _----=> need-resched \n" "# || / _---=> hardirq/softirq \n" "# ||| / _--=> preempt-depth \n" @@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static void print_event_info(struct trace_buffer *buf, struct seq_file *m) --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c -@@ -187,6 +187,8 @@ static int trace_define_common_fields(vo +@@ -188,6 +188,8 @@ static int trace_define_common_fields(vo __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); @@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c -@@ -493,6 +493,11 @@ int trace_print_lat_fmt(struct trace_seq +@@ -494,6 +494,11 @@ int trace_print_lat_fmt(struct trace_seq else trace_seq_putc(s, '.'); diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch index 460c4631a36f..b76cd28a20e5 100644 --- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch +++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -2261,7 +2261,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state) +@@ -2270,7 +2270,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state) * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch index 426f96bd18a5..6050bf9b8baa 100644 --- a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch +++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \ __RWLOCK_RT_INITIALIZER(cpuhp_pin_lock); #endif -@@ -293,6 +293,7 @@ static int cpu_hotplug_disabled; +@@ -290,6 +290,7 @@ static int cpu_hotplug_disabled; */ void pin_current_cpu(void) { @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct rt_rw_lock *cpuhp_pin; unsigned int cpu; int ret; -@@ -317,6 +318,7 @@ void pin_current_cpu(void) +@@ -314,6 +315,7 @@ void pin_current_cpu(void) goto again; } current->pinned_on_cpu = cpu; @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /** -@@ -324,6 +326,7 @@ void pin_current_cpu(void) +@@ -321,6 +323,7 @@ void pin_current_cpu(void) */ void unpin_current_cpu(void) { @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock); if (WARN_ON(current->pinned_on_cpu != smp_processor_id())) -@@ -331,6 +334,7 @@ void unpin_current_cpu(void) +@@ -328,6 +331,7 @@ void unpin_current_cpu(void) current->pinned_on_cpu = -1; __read_rt_unlock(cpuhp_pin); @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock); -@@ -889,7 +893,9 @@ static int take_cpu_down(void *_param) +@@ -881,7 +885,9 @@ static int take_cpu_down(void *_param) static int takedown_cpu(unsigned int cpu) { @@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; -@@ -902,14 +908,18 @@ static int takedown_cpu(unsigned int cpu +@@ -894,14 +900,18 @@ static int takedown_cpu(unsigned int cpu */ irq_lock_sparse(); @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* CPU refused to die */ irq_unlock_sparse(); /* Unpark the hotplug thread so we can rollback there */ -@@ -928,7 +938,9 @@ static int takedown_cpu(unsigned int cpu +@@ -920,7 +930,9 @@ static int takedown_cpu(unsigned int cpu wait_for_ap_thread(st, false); BUG_ON(st->state != CPUHP_AP_IDLE_DEAD); diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch index 31fd234b8669..9645f75c9d6d 100644 --- a/patches/hotplug-light-get-online-cpus.patch +++ b/patches/hotplug-light-get-online-cpus.patch @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/cpu.h +++ b/include/linux/cpu.h -@@ -110,6 +110,8 @@ extern void cpu_hotplug_disable(void); +@@ -111,6 +111,8 @@ extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); void clear_tasks_mm_cpumask(int cpu); int cpu_down(unsigned int cpu); @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else /* CONFIG_HOTPLUG_CPU */ -@@ -120,6 +122,9 @@ static inline void cpus_read_unlock(void +@@ -122,6 +124,9 @@ static inline int cpus_read_trylock(voi static inline void lockdep_assert_cpus_held(void) { } static inline void cpu_hotplug_disable(void) { } static inline void cpu_hotplug_enable(void) { } @@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Wrappers which go away once all code is converted */ --- a/kernel/cpu.c +++ b/kernel/cpu.c -@@ -283,6 +283,21 @@ static int cpu_hotplug_disabled; +@@ -280,6 +280,21 @@ static int cpu_hotplug_disabled; #ifdef CONFIG_HOTPLUG_CPU @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void cpus_read_lock(void) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7220,6 +7220,7 @@ void migrate_disable(void) +@@ -7197,6 +7197,7 @@ void migrate_disable(void) } preempt_disable(); @@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> migrate_disable_update_cpus_allowed(p); p->migrate_disable = 1; -@@ -7285,12 +7286,15 @@ void migrate_enable(void) +@@ -7262,12 +7263,15 @@ void migrate_enable(void) arg.task = p; arg.dest_cpu = dest_cpu; diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch index bdae2fa9eff9..4a1fb25269bb 100644 --- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch +++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c -@@ -2192,7 +2192,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc +@@ -2245,7 +2245,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc apic->vcpu = vcpu; hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> timer->function = perf_mux_hrtimer_handler; } -@@ -9176,7 +9176,7 @@ static void perf_swevent_init_hrtimer(st +@@ -9173,7 +9173,7 @@ static void perf_swevent_init_hrtimer(st if (!is_sampling_event(event)) return; @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -321,7 +321,7 @@ static void hrtick_rq_init(struct rq *rq +@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq rq->hrtick_csd.info = rq; #endif @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #else /* CONFIG_SCHED_HRTICK */ --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -1053,7 +1053,7 @@ void init_dl_task_timer(struct sched_dl_ +@@ -1054,7 +1054,7 @@ void init_dl_task_timer(struct sched_dl_ { struct hrtimer *timer = &dl_se->dl_timer; @@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -5184,9 +5184,9 @@ void init_cfs_bandwidth(struct cfs_bandw +@@ -4878,9 +4878,9 @@ void init_cfs_bandwidth(struct cfs_bandw cfs_b->period = ns_to_ktime(default_cfs_period()); INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); @@ -112,11 +112,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> - hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD); cfs_b->slack_timer.function = sched_cfs_slack_timer; + cfs_b->distribute_running = 0; } - --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c -@@ -43,8 +43,8 @@ void init_rt_bandwidth(struct rt_bandwid +@@ -45,8 +45,8 @@ void init_rt_bandwidth(struct rt_bandwid raw_spin_lock_init(&rt_b->rt_runtime_lock); @@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> memset(timer, 0, sizeof(struct hrtimer)); cpu_base = raw_cpu_ptr(&hrtimer_bases); -@@ -1682,6 +1691,14 @@ static void __hrtimer_init_sleeper(struc +@@ -1681,6 +1690,14 @@ static void __hrtimer_init_sleeper(struc enum hrtimer_mode mode, struct task_struct *task) { @@ -198,7 +198,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Get the next period (per-CPU) */ --- a/kernel/watchdog.c +++ b/kernel/watchdog.c -@@ -463,7 +463,7 @@ static void watchdog_enable(unsigned int +@@ -483,7 +483,7 @@ static void watchdog_enable(unsigned int * Start the timer first to prevent the NMI watchdog triggering * before the timer has a chance to fire. */ diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch index 1d311bfc5725..544ce46ce102 100644 --- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch +++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch @@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> --- a/block/blk-mq.c +++ b/block/blk-mq.c -@@ -2986,10 +2986,9 @@ static bool blk_mq_poll_hybrid_sleep(str +@@ -3115,10 +3115,9 @@ static bool blk_mq_poll_hybrid_sleep(str kt = nsecs; mode = HRTIMER_MODE_REL; @@ -40,15 +40,15 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> break; --- a/drivers/staging/android/vsoc.c +++ b/drivers/staging/android/vsoc.c -@@ -438,12 +438,10 @@ static int handle_vsoc_cond_wait(struct - - if (!timespec_valid(&ts)) +@@ -437,12 +437,10 @@ static int handle_vsoc_cond_wait(struct return -EINVAL; + wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec); + - hrtimer_init_on_stack(&to->timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); + hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS, current); - hrtimer_set_expires_range_ns(&to->timer, timespec_to_ktime(ts), + hrtimer_set_expires_range_ns(&to->timer, wake_time, current->timer_slack_ns); - - hrtimer_init_sleeper(to, current); @@ -158,7 +158,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> } --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1649,13 +1649,44 @@ static enum hrtimer_restart hrtimer_wake +@@ -1648,13 +1648,44 @@ static enum hrtimer_restart hrtimer_wake return HRTIMER_NORESTART; } @@ -204,7 +204,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts) { switch(restart->nanosleep.type) { -@@ -1679,8 +1710,6 @@ static int __sched do_nanosleep(struct h +@@ -1678,8 +1709,6 @@ static int __sched do_nanosleep(struct h { struct restart_block *restart; @@ -213,7 +213,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> do { set_current_state(TASK_INTERRUPTIBLE); hrtimer_start_expires(&t->timer, mode); -@@ -1717,10 +1746,9 @@ static long __sched hrtimer_nanosleep_re +@@ -1716,10 +1745,9 @@ static long __sched hrtimer_nanosleep_re struct hrtimer_sleeper t; int ret; @@ -226,7 +226,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> ret = do_nanosleep(&t, HRTIMER_MODE_ABS); destroy_hrtimer_on_stack(&t.timer); return ret; -@@ -1738,7 +1766,7 @@ long hrtimer_nanosleep(const struct time +@@ -1737,7 +1765,7 @@ long hrtimer_nanosleep(const struct time if (dl_task(current) || rt_task(current)) slack = 0; @@ -235,7 +235,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack); ret = do_nanosleep(&t, mode); if (ret != -ERESTART_RESTARTBLOCK) -@@ -1937,11 +1965,9 @@ schedule_hrtimeout_range_clock(ktime_t * +@@ -1936,11 +1964,9 @@ schedule_hrtimeout_range_clock(ktime_t * return -EINTR; } @@ -250,7 +250,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> if (likely(t.task)) --- a/net/core/pktgen.c +++ b/net/core/pktgen.c -@@ -2162,7 +2162,8 @@ static void spin(struct pktgen_dev *pkt_ +@@ -2160,7 +2160,8 @@ static void spin(struct pktgen_dev *pkt_ s64 remaining; struct hrtimer_sleeper t; @@ -260,7 +260,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> hrtimer_set_expires(&t.timer, spin_until); remaining = ktime_to_ns(hrtimer_expires_remaining(&t.timer)); -@@ -2177,7 +2178,6 @@ static void spin(struct pktgen_dev *pkt_ +@@ -2175,7 +2176,6 @@ static void spin(struct pktgen_dev *pkt_ } while (ktime_compare(end_time, spin_until) < 0); } else { /* see do_nanosleep */ diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch index 40e012ee235c..e9838c7bb602 100644 --- a/patches/hrtimers-prepare-full-preemption.patch +++ b/patches/hrtimers-prepare-full-preemption.patch @@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1847,6 +1875,9 @@ int hrtimers_prepare_cpu(unsigned int cp +@@ -1846,6 +1874,9 @@ int hrtimers_prepare_cpu(unsigned int cp cpu_base->softirq_next_timer = NULL; cpu_base->expires_next = KTIME_MAX; cpu_base->softirq_expires_next = KTIME_MAX; @@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c -@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itime +@@ -215,6 +215,7 @@ int do_setitimer(int which, struct itime /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); @@ -173,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> expires = timeval_to_ktime(value->it_value); --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c -@@ -478,7 +478,7 @@ static struct k_itimer * alloc_posix_tim +@@ -466,7 +466,7 @@ static struct k_itimer * alloc_posix_tim static void k_itimer_rcu_free(struct rcu_head *head) { @@ -182,7 +182,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> kmem_cache_free(posix_timers_cache, tmr); } -@@ -495,7 +495,7 @@ static void release_posix_timer(struct k +@@ -483,7 +483,7 @@ static void release_posix_timer(struct k } put_pid(tmr->it_pid); sigqueue_free(tmr->sigq); @@ -191,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static int common_timer_create(struct k_itimer *new_timer) -@@ -834,6 +834,22 @@ static void common_hrtimer_arm(struct k_ +@@ -824,6 +824,22 @@ static void common_hrtimer_arm(struct k_ hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } @@ -214,7 +214,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static int common_hrtimer_try_to_cancel(struct k_itimer *timr) { return hrtimer_try_to_cancel(&timr->it.real.timer); -@@ -898,6 +914,7 @@ static int do_timer_settime(timer_t time +@@ -888,6 +904,7 @@ static int do_timer_settime(timer_t time if (!timr) return -EINVAL; @@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> kc = timr->kclock; if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; -@@ -906,9 +923,12 @@ static int do_timer_settime(timer_t time +@@ -896,9 +913,12 @@ static int do_timer_settime(timer_t time unlock_timer(timr, flag); if (error == TIMER_RETRY) { @@ -235,7 +235,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return error; } -@@ -990,10 +1010,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t +@@ -980,10 +1000,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t if (!timer) return -EINVAL; @@ -251,7 +251,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> spin_lock(¤t->sighand->siglock); list_del(&timer->list); -@@ -1019,8 +1044,18 @@ static void itimer_delete(struct k_itime +@@ -1009,8 +1034,18 @@ static void itimer_delete(struct k_itime retry_delete: spin_lock_irqsave(&timer->it_lock, flags); diff --git a/patches/iommu-amd-drop-irqs_disabled-warn_on.patch b/patches/iommu-amd-drop-irqs_disabled-warn_on.patch deleted file mode 100644 index b0eb9d34b615..000000000000 --- a/patches/iommu-amd-drop-irqs_disabled-warn_on.patch +++ /dev/null @@ -1,52 +0,0 @@ -From: Anna-Maria Gleixner <anna-maria@linutronix.de> -Subject: [PATCH] iommu/amd: Remove redundant WARN_ON() -Date: Fri, 20 Jul 2018 10:45:45 +0200 - -The WARN_ON() was introduced in commit 272e4f99e966 ("iommu/amd: WARN -when __[attach|detach]_device are called with irqs enabled") to ensure -that the domain->lock is taken in proper irqs disabled context. This -is required, because the domain->lock is taken as well in irq -context. - -The proper context check by the WARN_ON() is redundant, because it is -already covered by LOCKDEP. When working with locks and changing -context, a run with LOCKDEP is required anyway and would detect the -wrong lock context. - -Furthermore all callers for those functions are within the same file -and all callers acquire another lock which already disables interrupts. - -Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/iommu/amd_iommu.c | 12 ------------ - 1 file changed, 12 deletions(-) - ---- a/drivers/iommu/amd_iommu.c -+++ b/drivers/iommu/amd_iommu.c -@@ -1950,12 +1950,6 @@ static int __attach_device(struct iommu_ - { - int ret; - -- /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -- */ -- WARN_ON(!irqs_disabled()); -- - /* lock domain */ - spin_lock(&domain->lock); - -@@ -2121,12 +2115,6 @@ static void __detach_device(struct iommu - { - struct protection_domain *domain; - -- /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -- */ -- WARN_ON(!irqs_disabled()); -- - domain = dev_data->domain; - - spin_lock(&domain->lock); diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch index f68bf01438bf..da2d07e2d07b 100644 --- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch +++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -960,7 +960,15 @@ irq_forced_thread_fn(struct irq_desc *de +@@ -967,7 +967,15 @@ irq_forced_thread_fn(struct irq_desc *de local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); @@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return ret; } -@@ -1470,6 +1478,9 @@ static int +@@ -1476,6 +1484,9 @@ static int irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } diff --git a/patches/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch b/patches/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch deleted file mode 100644 index 36d87db119f3..000000000000 --- a/patches/irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch +++ /dev/null @@ -1,92 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Wed, 18 Jul 2018 17:42:04 +0200 -Subject: [PATCH] irqchip/gic-v3-its: Make its_lock a raw_spin_lock_t - -[ Upstream commit a8db74564b0c634667e1722264bde303d296f566 ] - -The its_lock lock is held while a new device is added to the list and -during setup while the CPU is booted. Even on -RT the CPU-bootup is -performed with disabled interrupts. - -Make its_lock a raw_spin_lock_t. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> ---- - drivers/irqchip/irq-gic-v3-its.c | 18 +++++++++--------- - 1 file changed, 9 insertions(+), 9 deletions(-) - ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -160,7 +160,7 @@ static struct { - } vpe_proxy; - - static LIST_HEAD(its_nodes); --static DEFINE_SPINLOCK(its_lock); -+static DEFINE_RAW_SPINLOCK(its_lock); - static struct rdists *gic_rdists; - static struct irq_domain *its_parent; - -@@ -1997,12 +1997,12 @@ static void its_cpu_init_collections(voi - { - struct its_node *its; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - - list_for_each_entry(its, &its_nodes, entry) - its_cpu_init_collection(its); - -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - } - - static struct its_device *its_find_device(struct its_node *its, u32 dev_id) -@@ -3070,7 +3070,7 @@ static int its_save_disable(void) - struct its_node *its; - int err = 0; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_for_each_entry(its, &its_nodes, entry) { - void __iomem *base; - -@@ -3102,7 +3102,7 @@ static int its_save_disable(void) - writel_relaxed(its->ctlr_save, base + GITS_CTLR); - } - } -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - - return err; - } -@@ -3112,7 +3112,7 @@ static void its_restore_enable(void) - struct its_node *its; - int ret; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_for_each_entry(its, &its_nodes, entry) { - void __iomem *base; - int i; -@@ -3164,7 +3164,7 @@ static void its_restore_enable(void) - GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) - its_cpu_init_collection(its); - } -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - } - - static struct syscore_ops its_syscore_ops = { -@@ -3398,9 +3398,9 @@ static int __init its_probe_one(struct r - if (err) - goto out_free_tables; - -- spin_lock(&its_lock); -+ raw_spin_lock(&its_lock); - list_add(&its->entry, &its_nodes); -- spin_unlock(&its_lock); -+ raw_spin_unlock(&its_lock); - - return 0; - diff --git a/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch b/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch index 20b2bab359bf..216cb2763cd1 100644 --- a/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch +++ b/patches/irqchip-gic-v3-its-Move-pending-table-allocation-to-.patch @@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -171,6 +171,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); +@@ -173,6 +173,7 @@ static DEFINE_RAW_SPINLOCK(vmovp_lock); static DEFINE_IDA(its_vpeid_ida); #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) @@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) -@@ -1555,7 +1556,7 @@ static void its_free_prop_table(struct p +@@ -1622,7 +1623,7 @@ static void its_free_prop_table(struct p get_order(LPI_PROPBASE_SZ)); } @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { phys_addr_t paddr; -@@ -1877,30 +1878,47 @@ static void its_free_pending_table(struc +@@ -1945,30 +1946,47 @@ static void its_free_pending_table(struc get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K))); } @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* set PROPBASE */ val = (page_to_phys(gic_rdists->prop_page) | GICR_PROPBASER_InnerShareable | -@@ -1952,6 +1970,10 @@ static void its_cpu_init_lpis(void) +@@ -2020,6 +2038,10 @@ static void its_cpu_init_lpis(void) /* Make sure the GIC has seen the above */ dsb(sy); @@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void its_cpu_init_collection(struct its_node *its) -@@ -3427,16 +3449,6 @@ static int redist_disable_lpis(void) +@@ -3498,16 +3520,6 @@ static int redist_disable_lpis(void) u64 timeout = USEC_PER_SEC; u64 val; @@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!gic_rdists_supports_plpis()) { pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); return -ENXIO; -@@ -3446,7 +3458,18 @@ static int redist_disable_lpis(void) +@@ -3517,7 +3529,18 @@ static int redist_disable_lpis(void) if (!(val & GICR_CTLR_ENABLE_LPIS)) return 0; @@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> smp_processor_id()); add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); -@@ -3702,7 +3725,8 @@ int __init its_init(struct fwnode_handle +@@ -3773,7 +3796,8 @@ int __init its_init(struct fwnode_handle } gic_rdists = rdists; @@ -152,11 +152,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h -@@ -574,6 +574,7 @@ struct rdists { +@@ -585,6 +585,7 @@ struct rdists { void __iomem *rd_base; struct page *pend_page; phys_addr_t phys_base; + bool lpi_enabled; } __percpu *rdist; struct page *prop_page; - int id_bits; + u64 flags; diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch index 9be6b7c2b973..52c36c6d11ed 100644 --- a/patches/irqwork-push_most_work_into_softirq_context.patch +++ b/patches/irqwork-push_most_work_into_softirq_context.patch @@ -181,13 +181,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Synchronize against the irq_work @entry, ensures the entry is not --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -1294,6 +1294,7 @@ static int rcu_implicit_dynticks_qs(stru - !rdp->rcu_iw_pending && rdp->rcu_iw_gpnum != rnp->gpnum && +@@ -1296,6 +1296,7 @@ static int rcu_implicit_dynticks_qs(stru + !rdp->rcu_iw_pending && rdp->rcu_iw_gp_seq != rnp->gp_seq && (rnp->ffmask & rdp->grpmask)) { init_irq_work(&rdp->rcu_iw, rcu_iw_handler); + rdp->rcu_iw.flags = IRQ_WORK_HARD_IRQ; rdp->rcu_iw_pending = true; - rdp->rcu_iw_gpnum = rnp->gpnum; + rdp->rcu_iw_gp_seq = rnp->gp_seq; irq_work_queue_on(&rdp->rcu_iw, rdp->cpu); --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch index 4a00cf5ce748..02184b19a913 100644 --- a/patches/jump-label-rt.patch +++ b/patches/jump-label-rt.patch @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig -@@ -50,7 +50,7 @@ config ARM +@@ -51,7 +51,7 @@ config ARM select HARDIRQS_SW_RESEND select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch index feb295d095d3..75e518436c66 100644 --- a/patches/kconfig-disable-a-few-options-rt.patch +++ b/patches/kconfig-disable-a-few-options-rt.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/Kconfig +++ b/arch/Kconfig -@@ -20,6 +20,7 @@ config OPROFILE +@@ -28,6 +28,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE @@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> help --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -378,7 +378,7 @@ config NOMMU_INITIAL_TRIM_EXCESS +@@ -377,7 +377,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" diff --git a/patches/kconfig-preempt-rt-full.patch b/patches/kconfig-preempt-rt-full.patch index c868452cda7a..d1d7a5865a8f 100644 --- a/patches/kconfig-preempt-rt-full.patch +++ b/patches/kconfig-preempt-rt-full.patch @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt -@@ -67,6 +67,14 @@ config PREEMPT_RTB +@@ -69,6 +69,14 @@ config PREEMPT_RTB enables changes which are preliminary for the full preemptible RT kernel. diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch index f49672fb1c9b..245f3a6ee1cc 100644 --- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch +++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1771,6 +1771,11 @@ static void call_console_drivers(const c +@@ -1777,6 +1777,11 @@ static void call_console_drivers(const c if (!console_drivers) return; @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) -@@ -2528,6 +2533,11 @@ void console_unblank(void) +@@ -2535,6 +2540,11 @@ void console_unblank(void) { struct console *c; diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 1715f02d904f..c27c4c44324d 100644 --- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -56,29 +56,30 @@ Cc: Ingo Molnar <mingo@elte.hu> Cc: Rafael J. Wysocki <rjw@rjwysocki.net> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - arch/ia64/kernel/mca.c | 2 - - arch/mips/include/asm/switch_to.h | 4 +- - arch/mips/kernel/mips-mt-fpaff.c | 2 - - arch/mips/kernel/traps.c | 6 ++-- - arch/powerpc/platforms/cell/spufs/sched.c | 2 - - drivers/infiniband/hw/hfi1/affinity.c | 6 ++-- - drivers/infiniband/hw/hfi1/sdma.c | 3 -- - drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++--- - fs/proc/array.c | 4 +- - include/linux/sched.h | 5 ++- - init/init_task.c | 3 +- - kernel/cgroup/cpuset.c | 2 - - kernel/fork.c | 2 + - kernel/sched/core.c | 40 ++++++++++++++--------------- - kernel/sched/cpudeadline.c | 4 +- - kernel/sched/cpupri.c | 4 +- - kernel/sched/deadline.c | 6 ++-- - kernel/sched/fair.c | 32 +++++++++++------------ - kernel/sched/rt.c | 4 +- - kernel/trace/trace_hwlat.c | 2 - - lib/smp_processor_id.c | 2 - - samples/trace_events/trace-events-sample.c | 2 - - 22 files changed, 73 insertions(+), 71 deletions(-) + arch/ia64/kernel/mca.c | 2 - + arch/mips/include/asm/switch_to.h | 4 +- + arch/mips/kernel/mips-mt-fpaff.c | 2 - + arch/mips/kernel/traps.c | 6 ++-- + arch/powerpc/platforms/cell/spufs/sched.c | 2 - + arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c | 2 - + drivers/infiniband/hw/hfi1/affinity.c | 6 ++-- + drivers/infiniband/hw/hfi1/sdma.c | 3 -- + drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++-- + fs/proc/array.c | 4 +- + include/linux/sched.h | 5 ++- + init/init_task.c | 3 +- + kernel/cgroup/cpuset.c | 2 - + kernel/fork.c | 2 + + kernel/sched/core.c | 40 ++++++++++++++-------------- + kernel/sched/cpudeadline.c | 4 +- + kernel/sched/cpupri.c | 4 +- + kernel/sched/deadline.c | 6 ++-- + kernel/sched/fair.c | 32 +++++++++++----------- + kernel/sched/rt.c | 4 +- + kernel/trace/trace_hwlat.c | 2 - + lib/smp_processor_id.c | 2 - + samples/trace_events/trace-events-sample.c | 2 - + 23 files changed, 74 insertions(+), 72 deletions(-) --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -124,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> out_unlock: --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c -@@ -1176,12 +1176,12 @@ static void mt_ase_fp_affinity(void) +@@ -1174,12 +1174,12 @@ static void mt_ase_fp_affinity(void) * restricted the allowed set to exclude any CPUs with FPUs, * we'll skip the procedure. */ @@ -151,6 +152,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Save the current cpu id for spu interrupt routing. */ ctx->last_ran = raw_smp_processor_id(); +--- a/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c ++++ b/arch/x86/kernel/cpu/intel_rdt_pseudo_lock.c +@@ -1435,7 +1435,7 @@ static int pseudo_lock_dev_mmap(struct f + * may be scheduled elsewhere and invalidate entries in the + * pseudo-locked region. + */ +- if (!cpumask_subset(¤t->cpus_allowed, &plr->d->cpu_mask)) { ++ if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) { + mutex_unlock(&rdtgroup_mutex); + return -EINVAL; + } --- a/drivers/infiniband/hw/hfi1/affinity.c +++ b/drivers/infiniband/hw/hfi1/affinity.c @@ -1037,7 +1037,7 @@ int hfi1_get_proc_affinity(int node) @@ -247,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; -@@ -1397,7 +1398,7 @@ extern struct pid *cad_pid; +@@ -1389,7 +1390,7 @@ extern struct pid *cad_pid; #define PF_KTHREAD 0x00200000 /* I am a kernel thread */ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ @@ -258,7 +270,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ --- a/init/init_task.c +++ b/init/init_task.c -@@ -64,7 +64,8 @@ struct task_struct init_task +@@ -71,7 +71,8 @@ struct task_struct init_task .static_prio = MAX_PRIO - 20, .normal_prio = MAX_PRIO - 20, .policy = SCHED_NORMAL, @@ -281,7 +293,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -844,6 +844,8 @@ static struct task_struct *dup_task_stru +@@ -845,6 +845,8 @@ static struct task_struct *dup_task_stru #ifdef CONFIG_STACKPROTECTOR tsk->stack_canary = get_random_canary(); #endif @@ -292,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * One for us, one for whoever does the "release_task()" (usually --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -900,7 +900,7 @@ static inline bool is_per_cpu_kthread(st +@@ -877,7 +877,7 @@ static inline bool is_per_cpu_kthread(st */ static inline bool is_cpu_allowed(struct task_struct *p, int cpu) { @@ -301,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return false; if (is_per_cpu_kthread(p)) -@@ -995,7 +995,7 @@ static int migration_cpu_stop(void *data +@@ -972,7 +972,7 @@ static int migration_cpu_stop(void *data local_irq_disable(); /* * We need to explicitly wake pending tasks before running @@ -310,7 +322,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ sched_ttwu_pending(); -@@ -1026,7 +1026,7 @@ static int migration_cpu_stop(void *data +@@ -1003,7 +1003,7 @@ static int migration_cpu_stop(void *data */ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) { @@ -319,7 +331,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> p->nr_cpus_allowed = cpumask_weight(new_mask); } -@@ -1096,7 +1096,7 @@ static int __set_cpus_allowed_ptr(struct +@@ -1073,7 +1073,7 @@ static int __set_cpus_allowed_ptr(struct goto out; } @@ -328,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; if (!cpumask_intersects(new_mask, cpu_valid_mask)) { -@@ -1258,10 +1258,10 @@ static int migrate_swap_stop(void *data) +@@ -1236,10 +1236,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; @@ -341,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); -@@ -1302,10 +1302,10 @@ int migrate_swap(struct task_struct *cur +@@ -1281,10 +1281,10 @@ int migrate_swap(struct task_struct *cur if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; @@ -354,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); -@@ -1449,7 +1449,7 @@ void kick_process(struct task_struct *p) +@@ -1429,7 +1429,7 @@ void kick_process(struct task_struct *p) EXPORT_SYMBOL_GPL(kick_process); /* @@ -363,7 +375,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * A few notes on cpu_active vs cpu_online: * -@@ -1489,14 +1489,14 @@ static int select_fallback_rq(int cpu, s +@@ -1469,14 +1469,14 @@ static int select_fallback_rq(int cpu, s for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; @@ -380,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!is_cpu_allowed(p, dest_cpu)) continue; -@@ -1540,7 +1540,7 @@ static int select_fallback_rq(int cpu, s +@@ -1520,7 +1520,7 @@ static int select_fallback_rq(int cpu, s } /* @@ -389,7 +401,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) -@@ -1550,11 +1550,11 @@ int select_task_rq(struct task_struct *p +@@ -1530,11 +1530,11 @@ int select_task_rq(struct task_struct *p if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else @@ -403,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * CPU. * * Since this is common to all placement strategies, this lives here. -@@ -2426,7 +2426,7 @@ void wake_up_new_task(struct task_struct +@@ -2401,7 +2401,7 @@ void wake_up_new_task(struct task_struct #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: @@ -412,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4297,7 +4297,7 @@ static int __sched_setscheduler(struct t +@@ -4274,7 +4274,7 @@ static int __sched_setscheduler(struct t * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -421,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4896,7 +4896,7 @@ long sched_getaffinity(pid_t pid, struct +@@ -4873,7 +4873,7 @@ long sched_getaffinity(pid_t pid, struct goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -430,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5476,7 +5476,7 @@ int task_can_attach(struct task_struct * +@@ -5453,7 +5453,7 @@ int task_can_attach(struct task_struct * * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -439,7 +451,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5503,7 +5503,7 @@ int migrate_task_to(struct task_struct * +@@ -5480,7 +5480,7 @@ int migrate_task_to(struct task_struct * if (curr_cpu == target_cpu) return 0; @@ -448,7 +460,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5641,7 +5641,7 @@ static void migrate_tasks(struct rq *dea +@@ -5618,7 +5618,7 @@ static void migrate_tasks(struct rq *dea put_prev_task(rq, next); /* @@ -494,7 +506,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * We have to ensure that we have at least one bit --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c -@@ -538,7 +538,7 @@ static struct rq *dl_task_offline_migrat +@@ -539,7 +539,7 @@ static struct rq *dl_task_offline_migrat * If we cannot preempt any rq, fall back to pick any * online CPU: */ @@ -503,7 +515,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (cpu >= nr_cpu_ids) { /* * Failed to find any suitable CPU. -@@ -1820,7 +1820,7 @@ static void set_curr_task_dl(struct rq * +@@ -1824,7 +1824,7 @@ static void set_curr_task_dl(struct rq * static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -512,7 +524,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return 1; return 0; } -@@ -1970,7 +1970,7 @@ static struct rq *find_lock_later_rq(str +@@ -1974,7 +1974,7 @@ static struct rq *find_lock_later_rq(str /* Retry if something changed. */ if (double_lock_balance(rq, later_rq)) { if (unlikely(task_rq(task) != rq || @@ -523,16 +535,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !task_on_rq_queued(task))) { --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -1616,7 +1616,7 @@ static void task_numa_compare(struct tas +@@ -1631,7 +1631,7 @@ static void task_numa_compare(struct tas + * be incurred if the tasks were swapped. */ - if (cur) { - /* Skip this swap candidate if cannot move to the source CPU: */ -- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) -+ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) - goto unlock; + /* Skip this swap candidate if cannot move to the source cpu */ +- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed)) ++ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr)) + goto unlock; - /* -@@ -1726,7 +1726,7 @@ static void task_numa_find_cpu(struct ta + /* +@@ -1728,7 +1728,7 @@ static void task_numa_find_cpu(struct ta for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) { /* Skip this CPU if the source task cannot migrate */ @@ -541,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; env->dst_cpu = cpu; -@@ -6018,7 +6018,7 @@ find_idlest_group(struct sched_domain *s +@@ -5711,7 +5711,7 @@ find_idlest_group(struct sched_domain *s /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_span(group), @@ -550,7 +562,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; local_group = cpumask_test_cpu(this_cpu, -@@ -6150,7 +6150,7 @@ find_idlest_group_cpu(struct sched_group +@@ -5843,7 +5843,7 @@ find_idlest_group_cpu(struct sched_group return cpumask_first(sched_group_span(group)); /* Traverse only the allowed CPUs */ @@ -559,7 +571,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (available_idle_cpu(i)) { struct rq *rq = cpu_rq(i); struct cpuidle_state *idle = idle_get_state(rq); -@@ -6190,7 +6190,7 @@ static inline int find_idlest_cpu(struct +@@ -5883,7 +5883,7 @@ static inline int find_idlest_cpu(struct { int new_cpu = cpu; @@ -568,7 +580,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return prev_cpu; /* -@@ -6306,7 +6306,7 @@ static int select_idle_core(struct task_ +@@ -5999,7 +5999,7 @@ static int select_idle_core(struct task_ if (!test_idle_cores(target, false)) return -1; @@ -577,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_cpu_wrap(core, cpus, target) { bool idle = true; -@@ -6340,7 +6340,7 @@ static int select_idle_smt(struct task_s +@@ -6033,7 +6033,7 @@ static int select_idle_smt(struct task_s return -1; for_each_cpu(cpu, cpu_smt_mask(target)) { @@ -586,7 +598,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) return cpu; -@@ -6403,7 +6403,7 @@ static int select_idle_cpu(struct task_s +@@ -6096,7 +6096,7 @@ static int select_idle_cpu(struct task_s for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { if (!--nr) return -1; @@ -595,7 +607,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> continue; if (available_idle_cpu(cpu)) break; -@@ -6440,7 +6440,7 @@ static int select_idle_sibling(struct ta +@@ -6133,7 +6133,7 @@ static int select_idle_sibling(struct ta recent_used_cpu != target && cpus_share_cache(recent_used_cpu, target) && available_idle_cpu(recent_used_cpu) && @@ -604,7 +616,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Replace recent_used_cpu with prev as it is a potential * candidate for the next wake: -@@ -6624,7 +6624,7 @@ select_task_rq_fair(struct task_struct * +@@ -6317,7 +6317,7 @@ select_task_rq_fair(struct task_struct * if (sd_flag & SD_BALANCE_WAKE) { record_wakee(p); want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) @@ -613,7 +625,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } rcu_read_lock(); -@@ -7360,14 +7360,14 @@ int can_migrate_task(struct task_struct +@@ -7056,14 +7056,14 @@ int can_migrate_task(struct task_struct /* * We do not migrate tasks that are: * 1) throttled_lb_pair, or @@ -630,7 +642,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int cpu; schedstat_inc(p->se.statistics.nr_failed_migrations_affine); -@@ -7387,7 +7387,7 @@ int can_migrate_task(struct task_struct +@@ -7083,7 +7083,7 @@ int can_migrate_task(struct task_struct /* Prevent to re-select dst_cpu via env's CPUs: */ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) { @@ -639,7 +651,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> env->flags |= LBF_DST_PINNED; env->new_dst_cpu = cpu; break; -@@ -7984,7 +7984,7 @@ check_cpu_capacity(struct rq *rq, struct +@@ -7704,7 +7704,7 @@ check_cpu_capacity(struct rq *rq, struct /* * Group imbalance indicates (and tries to solve) the problem where balancing @@ -648,7 +660,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * Imagine a situation of two groups of 4 CPUs each and 4 tasks each with a * cpumask covering 1 CPU of the first group and 3 CPUs of the second group. -@@ -8599,7 +8599,7 @@ static struct sched_group *find_busiest_ +@@ -8319,7 +8319,7 @@ static struct sched_group *find_busiest_ /* * If the busiest group is imbalanced the below checks don't * work because they assume all things are equal, which typically @@ -657,7 +669,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (busiest->group_type == group_imbalanced) goto force_balance; -@@ -8995,7 +8995,7 @@ static int load_balance(int this_cpu, st +@@ -8715,7 +8715,7 @@ static int load_balance(int this_cpu, st * if the curr task on busiest CPU can't be * moved to this_cpu: */ @@ -668,7 +680,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> env.flags |= LBF_ALL_PINNED; --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c -@@ -1601,7 +1601,7 @@ static void put_prev_task_rt(struct rq * +@@ -1611,7 +1611,7 @@ static void put_prev_task_rt(struct rq * static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && @@ -677,7 +689,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return 1; return 0; -@@ -1738,7 +1738,7 @@ static struct rq *find_lock_lowest_rq(st +@@ -1748,7 +1748,7 @@ static struct rq *find_lock_lowest_rq(st * Also make sure that it wasn't scheduled on its rq. */ if (unlikely(task_rq(task) != rq || @@ -688,7 +700,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !task_on_rq_queued(task))) { --- a/kernel/trace/trace_hwlat.c +++ b/kernel/trace/trace_hwlat.c -@@ -279,7 +279,7 @@ static void move_to_next_cpu(void) +@@ -277,7 +277,7 @@ static void move_to_next_cpu(void) * of this thread, than stop migrating for the duration * of the current test. */ diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 1e99f118fbc9..7fa3033be146 100644 --- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <linux/vmacache.h> #include <linux/nsproxy.h> #include <linux/capability.h> -@@ -692,6 +693,15 @@ void __put_task_struct(struct task_struc +@@ -693,6 +694,15 @@ void __put_task_struct(struct task_struc WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); @@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> security_task_free(tsk); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2765,15 +2765,6 @@ static struct rq *finish_task_switch(str +@@ -2740,15 +2740,6 @@ static struct rq *finish_task_switch(str if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch index b4db8d97a037..cce7b29249f9 100644 --- a/patches/kgb-serial-hackaround.patch +++ b/patches/kgb-serial-hackaround.patch @@ -33,7 +33,7 @@ Jason. #include <linux/uaccess.h> #include <linux/pm_runtime.h> #include <linux/ktime.h> -@@ -3223,6 +3224,8 @@ void serial8250_console_write(struct uar +@@ -3241,6 +3242,8 @@ void serial8250_console_write(struct uar if (port->sysrq || oops_in_progress) locked = 0; diff --git a/patches/kthread-convert-worker-lock-to-raw-spinlock.patch b/patches/kthread-convert-worker-lock-to-raw-spinlock.patch index ffd7d27bb5d7..6404b1ccec71 100644 --- a/patches/kthread-convert-worker-lock-to-raw-spinlock.patch +++ b/patches/kthread-convert-worker-lock-to-raw-spinlock.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct task_struct *task; --- a/kernel/kthread.c +++ b/kernel/kthread.c -@@ -597,7 +597,7 @@ void __kthread_init_worker(struct kthrea +@@ -599,7 +599,7 @@ void __kthread_init_worker(struct kthrea struct lock_class_key *key) { memset(worker, 0, sizeof(struct kthread_worker)); @@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> lockdep_set_class_and_name(&worker->lock, key, name); INIT_LIST_HEAD(&worker->work_list); INIT_LIST_HEAD(&worker->delayed_work_list); -@@ -639,21 +639,21 @@ int kthread_worker_fn(void *worker_ptr) +@@ -641,21 +641,21 @@ int kthread_worker_fn(void *worker_ptr) if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); @@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (work) { __set_current_state(TASK_RUNNING); -@@ -810,12 +810,12 @@ bool kthread_queue_work(struct kthread_w +@@ -812,12 +812,12 @@ bool kthread_queue_work(struct kthread_w bool ret = false; unsigned long flags; @@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return ret; } EXPORT_SYMBOL_GPL(kthread_queue_work); -@@ -841,7 +841,7 @@ void kthread_delayed_work_timer_fn(struc +@@ -843,7 +843,7 @@ void kthread_delayed_work_timer_fn(struc if (WARN_ON_ONCE(!worker)) return; @@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); -@@ -850,7 +850,7 @@ void kthread_delayed_work_timer_fn(struc +@@ -852,7 +852,7 @@ void kthread_delayed_work_timer_fn(struc list_del_init(&work->node); kthread_insert_work(worker, work, &worker->work_list); @@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } EXPORT_SYMBOL(kthread_delayed_work_timer_fn); -@@ -906,14 +906,14 @@ bool kthread_queue_delayed_work(struct k +@@ -908,14 +908,14 @@ bool kthread_queue_delayed_work(struct k unsigned long flags; bool ret = false; @@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return ret; } EXPORT_SYMBOL_GPL(kthread_queue_delayed_work); -@@ -949,7 +949,7 @@ void kthread_flush_work(struct kthread_w +@@ -951,7 +951,7 @@ void kthread_flush_work(struct kthread_w if (!worker) return; @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); -@@ -961,7 +961,7 @@ void kthread_flush_work(struct kthread_w +@@ -963,7 +963,7 @@ void kthread_flush_work(struct kthread_w else noop = true; @@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!noop) wait_for_completion(&fwork.done); -@@ -994,9 +994,9 @@ static bool __kthread_cancel_work(struct +@@ -996,9 +996,9 @@ static bool __kthread_cancel_work(struct * any queuing is blocked by setting the canceling counter. */ work->canceling++; @@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> work->canceling--; } -@@ -1043,7 +1043,7 @@ bool kthread_mod_delayed_work(struct kth +@@ -1045,7 +1045,7 @@ bool kthread_mod_delayed_work(struct kth unsigned long flags; int ret = false; @@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Do not bother with canceling when never queued. */ if (!work->worker) -@@ -1060,7 +1060,7 @@ bool kthread_mod_delayed_work(struct kth +@@ -1062,7 +1062,7 @@ bool kthread_mod_delayed_work(struct kth fast_queue: __kthread_queue_delayed_work(worker, dwork, delay); out: @@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return ret; } EXPORT_SYMBOL_GPL(kthread_mod_delayed_work); -@@ -1074,7 +1074,7 @@ static bool __kthread_cancel_work_sync(s +@@ -1076,7 +1076,7 @@ static bool __kthread_cancel_work_sync(s if (!worker) goto out; @@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Work must not be used with >1 worker, see kthread_queue_work(). */ WARN_ON_ONCE(work->worker != worker); -@@ -1088,13 +1088,13 @@ static bool __kthread_cancel_work_sync(s +@@ -1090,13 +1090,13 @@ static bool __kthread_cancel_work_sync(s * In the meantime, block any queuing by setting the canceling counter. */ work->canceling++; diff --git a/patches/leds-trigger-disable-CPU-trigger-on-RT.patch b/patches/leds-trigger-disable-CPU-trigger-on-RT.patch index e59b962998ae..68f8c113b65c 100644 --- a/patches/leds-trigger-disable-CPU-trigger-on-RT.patch +++ b/patches/leds-trigger-disable-CPU-trigger-on-RT.patch @@ -19,17 +19,16 @@ as it triggers: Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- - drivers/leds/trigger/Kconfig | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) + drivers/leds/trigger/Kconfig | 1 + + 1 file changed, 1 insertion(+) --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig -@@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT +@@ -63,6 +63,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_CPU bool "LED CPU Trigger" -- depends on LEDS_TRIGGERS -+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE ++ depends on !PREEMPT_RT_BASE help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/patches/libata-remove-ata_sff_data_xfer_noirq.patch b/patches/libata-remove-ata_sff_data_xfer_noirq.patch deleted file mode 100644 index fafa8524e558..000000000000 --- a/patches/libata-remove-ata_sff_data_xfer_noirq.patch +++ /dev/null @@ -1,196 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Thu, 19 Apr 2018 12:55:14 +0200 -Subject: [PATCH] libata: remove ata_sff_data_xfer_noirq() - -ata_sff_data_xfer_noirq() is invoked via the ->sff_data_xfer hook. The -latter is invoked by ata_pio_sector(), atapi_send_cdb() and -__atapi_pio_bytes() which in turn is invoked by ata_sff_hsm_move(). -The latter function requires that the "ap->lock" lock is held which -needs to be taken with disabled interrupts. - -There is no need have to have ata_sff_data_xfer_noirq() which invokes -ata_sff_data_xfer32() with disabled interrupts because at this point the -interrupts are already disabled. -Remove the function and its references to it and replace all callers -with ata_sff_data_xfer32(). - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - Documentation/driver-api/libata.rst | 3 +-- - drivers/ata/libata-sff.c | 30 ------------------------------ - drivers/ata/pata_cmd640.c | 2 +- - drivers/ata/pata_icside.c | 2 +- - drivers/ata/pata_imx.c | 2 +- - drivers/ata/pata_legacy.c | 6 +++--- - drivers/ata/pata_palmld.c | 2 +- - drivers/ata/pata_pcmcia.c | 2 +- - drivers/ata/pata_platform.c | 2 +- - drivers/ata/pata_via.c | 2 +- - include/linux/libata.h | 2 -- - 11 files changed, 11 insertions(+), 44 deletions(-) - ---- a/Documentation/driver-api/libata.rst -+++ b/Documentation/driver-api/libata.rst -@@ -118,8 +118,7 @@ PIO data read/write - All bmdma-style drivers must implement this hook. This is the low-level - operation that actually copies the data bytes during a PIO data - transfer. Typically the driver will choose one of --:c:func:`ata_sff_data_xfer_noirq`, :c:func:`ata_sff_data_xfer`, or --:c:func:`ata_sff_data_xfer32`. -+:c:func:`ata_sff_data_xfer`, or :c:func:`ata_sff_data_xfer32`. - - ATA command execute - ~~~~~~~~~~~~~~~~~~~ ---- a/drivers/ata/libata-sff.c -+++ b/drivers/ata/libata-sff.c -@@ -658,36 +658,6 @@ unsigned int ata_sff_data_xfer32(struct - EXPORT_SYMBOL_GPL(ata_sff_data_xfer32); - - /** -- * ata_sff_data_xfer_noirq - Transfer data by PIO -- * @qc: queued command -- * @buf: data buffer -- * @buflen: buffer length -- * @rw: read/write -- * -- * Transfer data from/to the device data register by PIO. Do the -- * transfer with interrupts disabled. -- * -- * LOCKING: -- * Inherited from caller. -- * -- * RETURNS: -- * Bytes consumed. -- */ --unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, unsigned char *buf, -- unsigned int buflen, int rw) --{ -- unsigned long flags; -- unsigned int consumed; -- -- local_irq_save(flags); -- consumed = ata_sff_data_xfer32(qc, buf, buflen, rw); -- local_irq_restore(flags); -- -- return consumed; --} --EXPORT_SYMBOL_GPL(ata_sff_data_xfer_noirq); -- --/** - * ata_pio_sector - Transfer a sector of data. - * @qc: Command on going - * ---- a/drivers/ata/pata_cmd640.c -+++ b/drivers/ata/pata_cmd640.c -@@ -178,7 +178,7 @@ static struct scsi_host_template cmd640_ - static struct ata_port_operations cmd640_port_ops = { - .inherits = &ata_sff_port_ops, - /* In theory xfer_noirq is not needed once we kill the prefetcher */ -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .sff_irq_check = cmd640_sff_irq_check, - .qc_issue = cmd640_qc_issue, - .cable_detect = ata_cable_40wire, ---- a/drivers/ata/pata_icside.c -+++ b/drivers/ata/pata_icside.c -@@ -324,7 +324,7 @@ static struct ata_port_operations pata_i - .inherits = &ata_bmdma_port_ops, - /* no need to build any PRD tables for DMA */ - .qc_prep = ata_noop_qc_prep, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .bmdma_setup = pata_icside_bmdma_setup, - .bmdma_start = pata_icside_bmdma_start, - .bmdma_stop = pata_icside_bmdma_stop, ---- a/drivers/ata/pata_imx.c -+++ b/drivers/ata/pata_imx.c -@@ -102,7 +102,7 @@ static struct scsi_host_template pata_im - - static struct ata_port_operations pata_imx_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_unknown, - .set_piomode = pata_imx_set_piomode, - }; ---- a/drivers/ata/pata_legacy.c -+++ b/drivers/ata/pata_legacy.c -@@ -246,12 +246,12 @@ static const struct ata_port_operations - - static struct ata_port_operations simple_port_ops = { - .inherits = &legacy_base_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - }; - - static struct ata_port_operations legacy_port_ops = { - .inherits = &legacy_base_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .set_mode = legacy_set_mode, - }; - -@@ -341,7 +341,7 @@ static unsigned int pdc_data_xfer_vlb(st - } - local_irq_restore(flags); - } else -- buflen = ata_sff_data_xfer_noirq(qc, buf, buflen, rw); -+ buflen = ata_sff_data_xfer32(qc, buf, buflen, rw); - - return buflen; - } ---- a/drivers/ata/pata_palmld.c -+++ b/drivers/ata/pata_palmld.c -@@ -44,7 +44,7 @@ static struct scsi_host_template palmld_ - - static struct ata_port_operations palmld_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_40wire, - }; - ---- a/drivers/ata/pata_pcmcia.c -+++ b/drivers/ata/pata_pcmcia.c -@@ -151,7 +151,7 @@ static struct scsi_host_template pcmcia_ - - static struct ata_port_operations pcmcia_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_40wire, - .set_mode = pcmcia_set_mode, - }; ---- a/drivers/ata/pata_platform.c -+++ b/drivers/ata/pata_platform.c -@@ -49,7 +49,7 @@ static struct scsi_host_template pata_pl - - static struct ata_port_operations pata_platform_port_ops = { - .inherits = &ata_sff_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - .cable_detect = ata_cable_unknown, - .set_mode = pata_platform_set_mode, - }; ---- a/drivers/ata/pata_via.c -+++ b/drivers/ata/pata_via.c -@@ -471,7 +471,7 @@ static struct ata_port_operations via_po - - static struct ata_port_operations via_port_ops_noirq = { - .inherits = &via_port_ops, -- .sff_data_xfer = ata_sff_data_xfer_noirq, -+ .sff_data_xfer = ata_sff_data_xfer32, - }; - - /** ---- a/include/linux/libata.h -+++ b/include/linux/libata.h -@@ -1858,8 +1858,6 @@ extern unsigned int ata_sff_data_xfer(st - unsigned char *buf, unsigned int buflen, int rw); - extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, - unsigned char *buf, unsigned int buflen, int rw); --extern unsigned int ata_sff_data_xfer_noirq(struct ata_queued_cmd *qc, -- unsigned char *buf, unsigned int buflen, int rw); - extern void ata_sff_irq_on(struct ata_port *ap); - extern void ata_sff_irq_clear(struct ata_port *ap); - extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, diff --git a/patches/localversion.patch b/patches/localversion.patch index 02952cda4bfa..a02382e6df70 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt9 ++-rt1 diff --git a/patches/lockdep-disable-self-test.patch b/patches/lockdep-disable-self-test.patch index 1816cbdf6154..a0485fa98cfb 100644 --- a/patches/lockdep-disable-self-test.patch +++ b/patches/lockdep-disable-self-test.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug -@@ -1201,7 +1201,7 @@ config DEBUG_ATOMIC_SLEEP +@@ -1207,7 +1207,7 @@ config DEBUG_ATOMIC_SLEEP config DEBUG_LOCKING_API_SELFTESTS bool "Locking API boot-time self-tests" diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch index 6a9fa468a3a9..027a811cc628 100644 --- a/patches/lockdep-no-softirq-accounting-on-rt.patch +++ b/patches/lockdep-no-softirq-accounting-on-rt.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h -@@ -32,14 +32,6 @@ do { \ +@@ -43,14 +43,6 @@ do { \ do { \ current->hardirq_context--; \ } while (0) @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else # define trace_hardirqs_on() do { } while (0) # define trace_hardirqs_off() do { } while (0) -@@ -54,6 +46,21 @@ do { \ +@@ -63,6 +55,21 @@ do { \ # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) #endif @@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> defined(CONFIG_PREEMPT_TRACER) --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c -@@ -3847,6 +3847,7 @@ static void check_flags(unsigned long fl +@@ -3823,6 +3823,7 @@ static void check_flags(unsigned long fl } } @@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only -@@ -3861,6 +3862,7 @@ static void check_flags(unsigned long fl +@@ -3837,6 +3838,7 @@ static void check_flags(unsigned long fl DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } diff --git a/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch index 4746c07f8855..6cd8d3f31351 100644 --- a/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch +++ b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch @@ -61,9 +61,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -# error "please don't include this file directly" -#endif - - #include <linux/types.h> + #include <asm-generic/qspinlock_types.h> + #include <asm-generic/qrwlock_types.h> - #define TICKET_SHIFT 16 --- a/arch/hexagon/include/asm/spinlock_types.h +++ b/arch/hexagon/include/asm/spinlock_types.h @@ -21,10 +21,6 @@ diff --git a/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch b/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch index f825a7f4911d..20855d74f743 100644 --- a/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch +++ b/patches/locking-locktorture-Do-NOT-include-rwlock.h-directly.patch @@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c -@@ -26,7 +26,6 @@ +@@ -29,7 +29,6 @@ #include <linux/kthread.h> #include <linux/sched/rt.h> #include <linux/spinlock.h> diff --git a/patches/md-disable-bcache.patch b/patches/md-disable-bcache.patch index fdfe5933603d..6c510466c108 100644 --- a/patches/md-disable-bcache.patch +++ b/patches/md-disable-bcache.patch @@ -26,6 +26,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> config BCACHE tristate "Block device as cache" + depends on !PREEMPT_RT_FULL - ---help--- + select CRC64 + help Allows a block device to be used as cache for other devices; uses - a btree for indexing and the layout is optimized for SSDs. diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch index bfb4e3ed66ca..b1ee61377334 100644 --- a/patches/mips-disable-highmem-on-rt.patch +++ b/patches/mips-disable-highmem-on-rt.patch @@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig -@@ -2519,7 +2519,7 @@ config MIPS_CRC_SUPPORT +@@ -2514,7 +2514,7 @@ config MIPS_CRC_SUPPORT # config HIGHMEM bool "High Memory Support" diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch index fcf4667a79ac..7c29e8d42337 100644 --- a/patches/mm-convert-swap-to-percpu-locked.patch +++ b/patches/mm-convert-swap-to-percpu-locked.patch @@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -7137,8 +7137,9 @@ void __init free_area_init(unsigned long +@@ -7184,8 +7184,9 @@ void __init free_area_init(unsigned long static int page_alloc_cpu_dead(unsigned int cpu) { diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch index bcaf76c76bcd..b0df4353043b 100644 --- a/patches/mm-disable-sloub-rt.patch +++ b/patches/mm-disable-sloub-rt.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/init/Kconfig +++ b/init/Kconfig -@@ -1591,6 +1591,7 @@ choice +@@ -1628,6 +1628,7 @@ choice config SLAB bool "SLAB" @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work -@@ -1611,6 +1612,7 @@ config SLUB +@@ -1648,6 +1649,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch index f5e0dc95f06e..78657d75592d 100644 --- a/patches/mm-enable-slub.patch +++ b/patches/mm-enable-slub.patch @@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/slub.c +++ b/mm/slub.c -@@ -3679,6 +3679,11 @@ static void list_slab_objects(struct kme +@@ -3677,6 +3677,11 @@ static void list_slab_objects(struct kme const char *text) { #ifdef CONFIG_SLUB_DEBUG @@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void *addr = page_address(page); void *p; unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), -@@ -3700,6 +3705,7 @@ static void list_slab_objects(struct kme +@@ -3698,6 +3703,7 @@ static void list_slab_objects(struct kme slab_unlock(page); kfree(map); #endif diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch index 8b92ab4b06a7..b05b6bf8f34e 100644 --- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch +++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch @@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/mm/memcontrol.c +++ b/mm/memcontrol.c -@@ -1783,7 +1783,7 @@ static void drain_all_stock(struct mem_c +@@ -2036,7 +2036,7 @@ static void drain_all_stock(struct mem_c * as well as workers from this path always operate on the local * per-cpu data. CPU up doesn't touch memcg_stock at all. */ @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; -@@ -1803,7 +1803,7 @@ static void drain_all_stock(struct mem_c +@@ -2056,7 +2056,7 @@ static void drain_all_stock(struct mem_c } css_put(&memcg->css); } diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch index 361416da5ed5..0143f826aaf6 100644 --- a/patches/mm-memcontrol-do_not_disable_irq.patch +++ b/patches/mm-memcontrol-do_not_disable_irq.patch @@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { -@@ -4558,12 +4561,12 @@ static int mem_cgroup_move_account(struc +@@ -4843,12 +4846,12 @@ static int mem_cgroup_move_account(struc ret = 0; @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> out_unlock: unlock_page(page); out: -@@ -5637,10 +5640,10 @@ void mem_cgroup_commit_charge(struct pag +@@ -5967,10 +5970,10 @@ void mem_cgroup_commit_charge(struct pag commit_charge(page, memcg, lrucare); @@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; -@@ -5709,7 +5712,7 @@ static void uncharge_batch(const struct +@@ -6039,7 +6042,7 @@ static void uncharge_batch(const struct memcg_oom_recover(ug->memcg); } @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> __mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon); __mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file); __mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge); -@@ -5717,7 +5720,7 @@ static void uncharge_batch(const struct +@@ -6047,7 +6050,7 @@ static void uncharge_batch(const struct __count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout); __this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages); memcg_check_events(ug->memcg, ug->dummy_page); @@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!mem_cgroup_is_root(ug->memcg)) css_put_many(&ug->memcg->css, nr_pages); -@@ -5880,10 +5883,10 @@ void mem_cgroup_migrate(struct page *old +@@ -6210,10 +6213,10 @@ void mem_cgroup_migrate(struct page *old commit_charge(newpage, memcg, false); @@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); -@@ -6075,6 +6078,7 @@ void mem_cgroup_swapout(struct page *pag +@@ -6405,6 +6408,7 @@ void mem_cgroup_swapout(struct page *pag struct mem_cgroup *memcg, *swap_memcg; unsigned int nr_entries; unsigned short oldid; @@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); -@@ -6120,13 +6124,17 @@ void mem_cgroup_swapout(struct page *pag +@@ -6450,13 +6454,17 @@ void mem_cgroup_swapout(struct page *pag * important here to have the interrupts disabled because it is the * only synchronisation we have for updating the per-CPU variables. */ diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch index 92dc5fe251a6..76bdfd8f6f34 100644 --- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch +++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/mm/page_alloc.c +++ b/mm/page_alloc.c -@@ -61,6 +61,7 @@ +@@ -60,6 +60,7 @@ #include <linux/hugetlb.h> #include <linux/sched/rt.h> #include <linux/sched/mm.h> @@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return NULL; } -@@ -8015,7 +8038,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8062,7 +8085,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ @@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); -@@ -8024,7 +8047,7 @@ void zone_pcp_reset(struct zone *zone) +@@ -8071,7 +8094,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch index 78f41047ac2a..dd8ec0a89e0a 100644 --- a/patches/mm-rt-kmap-atomic-scheduling.patch +++ b/patches/mm-rt-kmap-atomic-scheduling.patch @@ -247,7 +247,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -@@ -1193,6 +1194,12 @@ struct task_struct { +@@ -1205,6 +1206,12 @@ struct task_struct { int softirq_nestcnt; unsigned int softirqs_raised; #endif diff --git a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch index f3bd21543ff6..b3b14d392945 100644 --- a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch +++ b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch @@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void reset_page(struct page *page) -@@ -1350,7 +1418,7 @@ void *zs_map_object(struct zs_pool *pool +@@ -1337,7 +1405,7 @@ void *zs_map_object(struct zs_pool *pool class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; @@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ -@@ -1404,7 +1472,7 @@ void zs_unmap_object(struct zs_pool *poo +@@ -1391,7 +1459,7 @@ void zs_unmap_object(struct zs_pool *poo __zs_unmap_object(area, pages, off, class->size); } diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch index 0929a7fe918a..7c800eaf3dce 100644 --- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch +++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch @@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -411,7 +411,19 @@ typedef enum rx_handler_result rx_handle +@@ -422,7 +422,19 @@ typedef enum rx_handler_result rx_handle typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); @@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5492,6 +5492,7 @@ bool napi_schedule_prep(struct napi_stru +@@ -5926,6 +5926,7 @@ bool napi_schedule_prep(struct napi_stru } EXPORT_SYMBOL(napi_schedule_prep); @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule -@@ -5503,6 +5504,7 @@ void __napi_schedule_irqoff(struct napi_ +@@ -5937,6 +5938,7 @@ void __napi_schedule_irqoff(struct napi_ ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch index 8946b5ce226d..4922bfa18d70 100644 --- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch +++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch @@ -65,14 +65,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> @@ -60,13 +61,13 @@ int gen_new_estimator(struct gnet_stats_ struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); void gen_kill_estimator(struct net_rate_estimator __rcu **ptr); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **ptr, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); bool gen_estimator_active(struct net_rate_estimator __rcu **ptr); @@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <linux/refcount.h> #include <linux/workqueue.h> #include <net/gen_stats.h> -@@ -94,7 +95,7 @@ struct Qdisc { +@@ -97,7 +98,7 @@ struct Qdisc { struct sk_buff_head gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; -@@ -115,7 +116,11 @@ static inline bool qdisc_is_running(stru +@@ -118,7 +119,11 @@ static inline bool qdisc_is_running(stru { if (qdisc->flags & TCQ_F_NOLOCK) return spin_is_locked(&qdisc->seqlock); @@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static inline bool qdisc_run_begin(struct Qdisc *qdisc) -@@ -126,17 +131,27 @@ static inline bool qdisc_run_begin(struc +@@ -129,17 +134,27 @@ static inline bool qdisc_run_begin(struc } else if (qdisc_is_running(qdisc)) { return false; } @@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (qdisc->flags & TCQ_F_NOLOCK) spin_unlock(&qdisc->seqlock); } -@@ -412,7 +427,7 @@ static inline spinlock_t *qdisc_root_sle +@@ -458,7 +473,7 @@ static inline spinlock_t *qdisc_root_sle return qdisc_lock(root); } @@ -178,21 +178,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> @@ -129,7 +129,7 @@ int gen_new_estimator(struct gnet_stats_ struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, + net_seqlock_t *running, struct nlattr *opt) { struct gnet_estimator *parm = nla_data(opt); -@@ -222,7 +222,7 @@ int gen_replace_estimator(struct gnet_st +@@ -227,7 +227,7 @@ int gen_replace_estimator(struct gnet_st struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct net_rate_estimator __rcu **rate_est, - spinlock_t *stats_lock, + spinlock_t *lock, - seqcount_t *running, struct nlattr *opt) + net_seqlock_t *running, struct nlattr *opt) { return gen_new_estimator(bstats, cpu_bstats, rate_est, - stats_lock, running, opt); + lock, running, opt); --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -142,7 +142,7 @@ static void @@ -228,7 +228,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct gnet_stats_basic_packed *b) --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c -@@ -1159,7 +1159,7 @@ static struct Qdisc *qdisc_create(struct +@@ -1166,7 +1166,7 @@ static struct Qdisc *qdisc_create(struct rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { diff --git a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch index d1b60d73d412..77c40ed5d1f1 100644 --- a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch +++ b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch @@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <net/net_namespace.h> #include <net/icmp.h> -@@ -632,6 +633,7 @@ void tcp_v4_send_check(struct sock *sk, +@@ -633,6 +634,7 @@ void tcp_v4_send_check(struct sock *sk, } EXPORT_SYMBOL(tcp_v4_send_check); @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * This routine will send an RST to the other tcp. * -@@ -766,6 +768,7 @@ static void tcp_v4_send_reset(const stru +@@ -767,6 +769,7 @@ static void tcp_v4_send_reset(const stru arg.tos = ip_hdr(skb)->tos; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -778,6 +781,7 @@ static void tcp_v4_send_reset(const stru +@@ -779,6 +782,7 @@ static void tcp_v4_send_reset(const stru ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG -@@ -858,6 +862,7 @@ static void tcp_v4_send_ack(const struct +@@ -859,6 +863,7 @@ static void tcp_v4_send_ack(const struct arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); @@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk); if (sk) ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? -@@ -869,6 +874,7 @@ static void tcp_v4_send_ack(const struct +@@ -870,6 +875,7 @@ static void tcp_v4_send_ack(const struct ctl_sk->sk_mark = 0; __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); diff --git a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch index 326ce1d00bc3..7fb500703d2e 100644 --- a/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch +++ b/patches/net-core-use-local_bh_disable-in-netif_rx_ni.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4274,11 +4274,9 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -4507,11 +4507,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch index 12a886127b14..4af8216d86e3 100644 --- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch +++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch @@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3264,7 +3264,11 @@ static inline int __dev_xmit_skb(struct +@@ -3446,7 +3446,11 @@ static inline int __dev_xmit_skb(struct * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch index eaa99a6b2e92..d237db4d07f0 100644 --- a/patches/net-make-devnet_rename_seq-a-mutex.patch +++ b/patches/net-make-devnet_rename_seq-a-mutex.patch @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -197,6 +197,7 @@ static unsigned int napi_gen_id = NR_CPU +@@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPU static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); static seqcount_t devnet_rename_seq; @@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static inline void dev_base_seq_inc(struct net *net) { -@@ -922,7 +923,8 @@ int netdev_get_name(struct net *net, cha +@@ -920,7 +921,8 @@ int netdev_get_name(struct net *net, cha strcpy(name, dev->name); rcu_read_unlock(); if (read_seqcount_retry(&devnet_rename_seq, seq)) { @@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> goto retry; } -@@ -1185,20 +1187,17 @@ int dev_change_name(struct net_device *d +@@ -1183,20 +1185,17 @@ int dev_change_name(struct net_device *d if (dev->flags & IFF_UP) return -EBUSY; @@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (oldname[0] && !strchr(oldname, '%')) netdev_info(dev, "renamed from %s\n", oldname); -@@ -1211,11 +1210,12 @@ int dev_change_name(struct net_device *d +@@ -1209,11 +1208,12 @@ int dev_change_name(struct net_device *d if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> netdev_adjacent_rename_links(dev, oldname); -@@ -1236,7 +1236,8 @@ int dev_change_name(struct net_device *d +@@ -1234,7 +1234,8 @@ int dev_change_name(struct net_device *d /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; @@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; -@@ -1249,6 +1250,11 @@ int dev_change_name(struct net_device *d +@@ -1247,6 +1248,11 @@ int dev_change_name(struct net_device *d } return err; diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch index d2cc34bcc3da..e093f864732a 100644 --- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch +++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch @@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -573,7 +573,11 @@ struct netdev_queue { +@@ -587,7 +587,11 @@ struct netdev_queue { * write-mostly part */ spinlock_t _xmit_lock ____cacheline_aligned_in_smp; @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Time (in jiffies) of last Tx */ -@@ -2561,14 +2565,53 @@ void netdev_freemem(struct net_device *d +@@ -2605,14 +2609,53 @@ void netdev_freemem(struct net_device *d void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); -@@ -3646,10 +3689,48 @@ static inline u32 netif_msg_init(int deb +@@ -3788,10 +3831,48 @@ static inline u32 netif_msg_init(int deb return (1 << debug_value) - 1; } @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static inline bool __netif_tx_acquire(struct netdev_queue *txq) -@@ -3666,32 +3747,32 @@ static inline void __netif_tx_release(st +@@ -3808,32 +3889,32 @@ static inline void __netif_tx_release(st static inline void __netif_tx_lock_bh(struct netdev_queue *txq) { spin_lock_bh(&txq->_xmit_lock); @@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1196,6 +1196,9 @@ struct task_struct { +@@ -1208,6 +1208,9 @@ struct task_struct { #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif @@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> struct task_struct *oom_reaper_list; --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -3336,8 +3336,10 @@ static void skb_update_prio(struct sk_bu +@@ -3518,8 +3518,10 @@ static void skb_update_prio(struct sk_bu #define skb_update_prio(skb) #endif @@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /** * dev_loopback_xmit - loop back @skb -@@ -3577,9 +3579,12 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3810,9 +3812,12 @@ static int __dev_queue_xmit(struct sk_bu if (dev->flags & IFF_UP) { int cpu = smp_processor_id(); /* ok because BHs are off */ @@ -219,7 +219,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto recursion_alert; skb = validate_xmit_skb(skb, dev, &again); -@@ -3589,9 +3594,9 @@ static int __dev_queue_xmit(struct sk_bu +@@ -3822,9 +3827,9 @@ static int __dev_queue_xmit(struct sk_bu HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { @@ -231,7 +231,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; -@@ -7882,7 +7887,7 @@ static void netdev_init_one_queue(struct +@@ -8359,7 +8364,7 @@ static void netdev_init_one_queue(struct /* Initialize queue lock */ spin_lock_init(&queue->_xmit_lock); netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type); @@ -242,7 +242,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifdef CONFIG_BQL --- a/net/core/filter.c +++ b/net/core/filter.c -@@ -1983,7 +1983,7 @@ static inline int __bpf_tx_skb(struct ne +@@ -2000,7 +2000,7 @@ static inline int __bpf_tx_skb(struct ne { int ret; @@ -251,7 +251,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; -@@ -1991,9 +1991,9 @@ static inline int __bpf_tx_skb(struct ne +@@ -2008,9 +2008,9 @@ static inline int __bpf_tx_skb(struct ne skb->dev = dev; diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch index 14f2cee40f02..19b719721323 100644 --- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch +++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void raise_softirq_irqoff(unsigned int nr) --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -5897,7 +5897,7 @@ static __latent_entropy void net_rx_acti +@@ -6353,7 +6353,7 @@ static __latent_entropy void net_rx_acti list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch index 782f6d5e3c19..e82598515710 100644 --- a/patches/net-use-cpu-chill.patch +++ b/patches/net-use-cpu-chill.patch @@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> -@@ -666,7 +667,7 @@ static void prb_retire_rx_blk_timer_expi +@@ -667,7 +668,7 @@ static void prb_retire_rx_blk_timer_expi if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } } -@@ -928,7 +929,7 @@ static void prb_retire_current_block(str +@@ -929,7 +930,7 @@ static void prb_retire_current_block(str if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ @@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include "rds_single_path.h" #include "ib_mr.h" -@@ -210,7 +211,7 @@ static inline void wait_clean_list_grace +@@ -222,7 +223,7 @@ static inline void wait_clean_list_grace for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) diff --git a/patches/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch b/patches/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch deleted file mode 100644 index c42c6098e526..000000000000 --- a/patches/ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch +++ /dev/null @@ -1,51 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Tue, 10 Apr 2018 17:54:32 +0200 -Subject: [PATCH] ntfs: don't disable interrupts during kmap_atomic() - -ntfs_end_buffer_async_read() disables interrupts around kmap_atomic(). This is -a leftover from the old kmap_atomic() implementation which relied on fixed -mapping slots, so the caller had to make sure that the same slot could not be -reused from an interrupting context. - -kmap_atomic() was changed to dynamic slots long ago and commit 1ec9c5ddc17a -("include/linux/highmem.h: remove the second argument of k[un]map_atomic()") -removed the slot assignements, but the callers were not checked for now -redundant interrupt disabling. - -Remove the conditional interrupt disable. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - fs/ntfs/aops.c | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/fs/ntfs/aops.c -+++ b/fs/ntfs/aops.c -@@ -93,13 +93,11 @@ static void ntfs_end_buffer_async_read(s - ofs = 0; - if (file_ofs < init_size) - ofs = init_size - file_ofs; -- local_irq_save(flags); - kaddr = kmap_atomic(page); - memset(kaddr + bh_offset(bh) + ofs, 0, - bh->b_size - ofs); - flush_dcache_page(page); - kunmap_atomic(kaddr); -- local_irq_restore(flags); - } - } else { - clear_buffer_uptodate(bh); -@@ -146,13 +144,11 @@ static void ntfs_end_buffer_async_read(s - recs = PAGE_SIZE / rec_size; - /* Should have been verified before we got here... */ - BUG_ON(!recs); -- local_irq_save(flags); - kaddr = kmap_atomic(page); - for (i = 0; i < recs; i++) - post_read_mst_fixup((NTFS_RECORD*)(kaddr + - i * rec_size), rec_size); - kunmap_atomic(kaddr); -- local_irq_restore(flags); - flush_dcache_page(page); - if (likely(page_uptodate && !PageError(page))) - SetPageUptodate(page); diff --git a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch index 910cceb4f4fa..e40178dc0c0d 100644 --- a/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch +++ b/patches/of-allocate-free-phandle-cache-outside-of-the-devtre.patch @@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/of/base.c +++ b/drivers/of/base.c -@@ -108,46 +108,52 @@ void of_populate_phandle_cache(void) +@@ -130,46 +130,52 @@ void of_populate_phandle_cache(void) u32 cache_entries; struct device_node *np; u32 phandles = 0; diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch index 34fb095d4512..4b7f07e0bc87 100644 --- a/patches/oleg-signal-rt-fix.patch +++ b/patches/oleg-signal-rt-fix.patch @@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -875,6 +875,10 @@ struct task_struct { +@@ -881,6 +881,10 @@ struct task_struct { /* Restored if set_restore_sigmask() was used: */ sigset_t saved_sigmask; struct sigpending pending; @@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> unsigned int sas_ss_flags; --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -1185,8 +1185,8 @@ int do_send_sig_info(int sig, struct sig +@@ -1226,8 +1226,8 @@ int do_send_sig_info(int sig, struct sig * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { unsigned long int flags; int ret, blocked, ignored; -@@ -1215,6 +1215,39 @@ force_sig_info(int sig, struct siginfo * +@@ -1256,6 +1256,39 @@ force_sig_info(int sig, struct siginfo * return ret; } diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch index e5a6d818bfd0..84bfb547d896 100644 --- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch +++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch @@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -243,7 +243,19 @@ void rcu_sched_qs(void) +@@ -244,7 +244,19 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } @@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/sched/isolation.h> #include <uapi/linux/sched/types.h> #include "../time/tick-internal.h" -@@ -1336,7 +1337,7 @@ static void rcu_prepare_kthreads(int cpu +@@ -1407,7 +1408,7 @@ static void rcu_prepare_kthreads(int cpu #endif /* #else #ifdef CONFIG_RCU_BOOST */ @@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Check to see if any future RCU-related work will need to be done -@@ -1352,7 +1353,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex +@@ -1423,7 +1424,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex *nextevt = KTIME_MAX; return rcu_cpu_has_callbacks(NULL); } @@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. -@@ -1448,6 +1451,8 @@ static bool __maybe_unused rcu_try_advan +@@ -1520,6 +1523,8 @@ static bool __maybe_unused rcu_try_advan return cbs_ready; } @@ -88,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1490,6 +1495,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex +@@ -1562,6 +1567,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex *nextevt = basemono + dj * TICK_NSEC; return 0; } diff --git a/patches/percpu-include-irqflags.h-for-raw_local_irq_save.patch b/patches/percpu-include-irqflags.h-for-raw_local_irq_save.patch new file mode 100644 index 000000000000..86a018707694 --- /dev/null +++ b/patches/percpu-include-irqflags.h-for-raw_local_irq_save.patch @@ -0,0 +1,26 @@ +From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +Date: Thu, 11 Oct 2018 16:39:59 +0200 +Subject: [PATCH] percpu: include irqflags.h for raw_local_irq_save() + +The header percpu.h header file is using raw_local_irq_save() but does +not include irqflags.h for its definition. It compiles because the +header file is included via an other header file. +On -RT the build fails because raw_local_irq_save() is not defined. + +Include irqflags.h in percpu.h. + +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + include/asm-generic/percpu.h | 1 + + 1 file changed, 1 insertion(+) + +--- a/include/asm-generic/percpu.h ++++ b/include/asm-generic/percpu.h +@@ -5,6 +5,7 @@ + #include <linux/compiler.h> + #include <linux/threads.h> + #include <linux/percpu-defs.h> ++#include <linux/irqflags.h> + + #ifdef CONFIG_SMP + diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch index 3379ce6c9a0b..13f262ca1cd8 100644 --- a/patches/peter_zijlstra-frob-rcu.patch +++ b/patches/peter_zijlstra-frob-rcu.patch @@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h -@@ -512,7 +512,7 @@ void rcu_read_unlock_special(struct task +@@ -524,7 +524,7 @@ static void rcu_read_unlock_special(stru } /* Hardware IRQ handlers cannot block, complain if they get here. */ diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch index 8963a463607d..4b832db889ad 100644 --- a/patches/peterz-percpu-rwsem-rt.patch +++ b/patches/peterz-percpu-rwsem-rt.patch @@ -18,7 +18,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- a/fs/locks.c +++ b/fs/locks.c -@@ -945,7 +945,7 @@ static int flock_lock_inode(struct inode +@@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode return -ENOMEM; } @@ -27,7 +27,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; -@@ -986,7 +986,7 @@ static int flock_lock_inode(struct inode +@@ -977,7 +977,7 @@ static int flock_lock_inode(struct inode out: spin_unlock(&ctx->flc_lock); @@ -36,7 +36,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> if (new_fl) locks_free_lock(new_fl); locks_dispose_list(&dispose); -@@ -1023,7 +1023,7 @@ static int posix_lock_inode(struct inode +@@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode new_fl2 = locks_alloc_lock(); } @@ -45,7 +45,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); /* * New lock request. Walk all POSIX locks and look for conflicts. If -@@ -1195,7 +1195,7 @@ static int posix_lock_inode(struct inode +@@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode } out: spin_unlock(&ctx->flc_lock); @@ -54,7 +54,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> /* * Free any unused locks. */ -@@ -1470,7 +1470,7 @@ int __break_lease(struct inode *inode, u +@@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, u return error; } @@ -63,7 +63,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); -@@ -1522,13 +1522,13 @@ int __break_lease(struct inode *inode, u +@@ -1514,13 +1514,13 @@ int __break_lease(struct inode *inode, u locks_insert_block(fl, new_fl); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); @@ -79,7 +79,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); locks_delete_block(new_fl); -@@ -1545,7 +1545,7 @@ int __break_lease(struct inode *inode, u +@@ -1537,7 +1537,7 @@ int __break_lease(struct inode *inode, u } out: spin_unlock(&ctx->flc_lock); @@ -88,7 +88,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> locks_dispose_list(&dispose); locks_free_lock(new_fl); return error; -@@ -1617,7 +1617,7 @@ int fcntl_getlease(struct file *filp) +@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp) ctx = smp_load_acquire(&inode->i_flctx); if (ctx && !list_empty_careful(&ctx->flc_lease)) { @@ -97,7 +97,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { -@@ -1627,7 +1627,7 @@ int fcntl_getlease(struct file *filp) +@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp) break; } spin_unlock(&ctx->flc_lock); @@ -106,7 +106,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> locks_dispose_list(&dispose); } -@@ -1702,7 +1702,7 @@ generic_add_lease(struct file *filp, lon +@@ -1693,7 +1693,7 @@ generic_add_lease(struct file *filp, lon return -EINVAL; } @@ -115,7 +115,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); error = check_conflicting_open(dentry, arg, lease->fl_flags); -@@ -1773,7 +1773,7 @@ generic_add_lease(struct file *filp, lon +@@ -1764,7 +1764,7 @@ generic_add_lease(struct file *filp, lon lease->fl_lmops->lm_setup(lease, priv); out: spin_unlock(&ctx->flc_lock); @@ -124,7 +124,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> locks_dispose_list(&dispose); if (is_deleg) inode_unlock(inode); -@@ -1796,7 +1796,7 @@ static int generic_delete_lease(struct f +@@ -1787,7 +1787,7 @@ static int generic_delete_lease(struct f return error; } @@ -133,7 +133,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_file == filp && -@@ -1809,7 +1809,7 @@ static int generic_delete_lease(struct f +@@ -1800,7 +1800,7 @@ static int generic_delete_lease(struct f if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); @@ -142,7 +142,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> locks_dispose_list(&dispose); return error; } -@@ -2540,13 +2540,13 @@ locks_remove_lease(struct file *filp, st +@@ -2531,13 +2531,13 @@ locks_remove_lease(struct file *filp, st if (list_empty(&ctx->flc_lease)) return; diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch index 41a08e644a24..05e6dc552631 100644 --- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch +++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -827,6 +827,9 @@ struct task_struct { +@@ -832,6 +832,9 @@ struct task_struct { #ifdef CONFIG_POSIX_TIMERS struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Process credentials: */ --- a/init/init_task.c +++ b/init/init_task.c -@@ -43,6 +43,12 @@ static struct sighand_struct init_sighan +@@ -50,6 +50,12 @@ static struct sighand_struct init_sighan .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(init_sighand.signalfd_wqh), }; @@ -44,17 +44,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Set up the first task table, touch at your own risk!. Base=0, * limit=0x1fffff (=2MB) -@@ -112,6 +118,7 @@ struct task_struct init_task +@@ -119,6 +125,7 @@ struct task_struct init_task INIT_CPU_TIMERS(init_task) .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(init_task.pi_lock), .timer_slack_ns = 50000, /* 50 usec default slack */ + INIT_TIMER_LIST - .pids = { - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), + .thread_pid = &init_struct_pid, + .thread_group = LIST_HEAD_INIT(init_task.thread_group), + .thread_node = LIST_HEAD_INIT(init_signals.thread_head), --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1564,6 +1564,9 @@ static void rt_mutex_init_task(struct ta +@@ -1575,6 +1575,9 @@ static void rt_mutex_init_task(struct ta */ static void posix_cpu_timers_init(struct task_struct *tsk) { diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch index 53ee40882043..19eeb6ac5812 100644 --- a/patches/power-disable-highmem-on-rt.patch +++ b/patches/power-disable-highmem-on-rt.patch @@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -395,7 +395,7 @@ menu "Kernel options" +@@ -398,7 +398,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" @@ -19,4 +19,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + depends on PPC32 && !PREEMPT_RT_FULL source kernel/Kconfig.hz - source kernel/Kconfig.preempt + diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch index 9fd671f3873a..2e9979c76c49 100644 --- a/patches/powerpc-preempt-lazy-support.patch +++ b/patches/powerpc-preempt-lazy-support.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -215,6 +215,7 @@ config PPC +@@ -216,6 +216,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP @@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> select HAVE_RELIABLE_STACKTRACE if PPC64 && CPU_LITTLE_ENDIAN --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h -@@ -36,6 +36,8 @@ struct thread_info { +@@ -37,6 +37,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ @@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> unsigned long local_flags; /* private flags for thread */ #ifdef CONFIG_LIVEPATCH unsigned long *livepatch_sp; -@@ -80,7 +82,7 @@ extern int arch_dup_task_struct(struct t +@@ -81,7 +83,7 @@ extern int arch_dup_task_struct(struct t #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_FSCHECK 3 /* Check FS is USER_DS on return */ @@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_PATCH_PENDING 6 /* pending live patching update */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -99,6 +101,7 @@ extern int arch_dup_task_struct(struct t +@@ -100,6 +102,7 @@ extern int arch_dup_task_struct(struct t #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif #define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling TIF_NEED_RESCHED */ @@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) -@@ -118,6 +121,7 @@ extern int arch_dup_task_struct(struct t +@@ -119,6 +122,7 @@ extern int arch_dup_task_struct(struct t #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) #define _TIF_NOHZ (1<<TIF_NOHZ) @@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define _TIF_FSCHECK (1<<TIF_FSCHECK) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ -@@ -126,8 +130,9 @@ extern int arch_dup_task_struct(struct t +@@ -127,8 +131,9 @@ extern int arch_dup_task_struct(struct t #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ _TIF_RESTORE_TM | _TIF_PATCH_PENDING | \ @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S -@@ -873,7 +873,14 @@ user_exc_return: /* r10 contains MSR_KE +@@ -885,7 +885,14 @@ user_exc_return: /* r10 contains MSR_KE cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore andi. r8,r8,_TIF_NEED_RESCHED @@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> lwz r3,_MSR(r1) andi. r0,r3,MSR_EE /* interrupts off? */ beq restore /* don't schedule if so */ -@@ -884,11 +891,11 @@ user_exc_return: /* r10 contains MSR_KE +@@ -896,11 +903,11 @@ user_exc_return: /* r10 contains MSR_KE */ bl trace_hardirqs_off #endif @@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_TRACE_IRQFLAGS /* And now, to properly rebalance the above, we tell lockdep they * are being turned back on, which will happen when we return -@@ -1211,7 +1218,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE +@@ -1223,7 +1230,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ @@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ -@@ -1232,7 +1239,7 @@ do_resched: /* r10 contains MSR_KERNEL +@@ -1244,7 +1251,7 @@ do_resched: /* r10 contains MSR_KERNEL MTMSRD(r10) /* disable interrupts */ CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) @@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> beq restore_user --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S -@@ -168,7 +168,7 @@ system_call: /* label this so stack tr +@@ -171,7 +171,7 @@ system_call: /* label this so stack tr * based on caller's run-mode / personality. */ ld r11,SYS_CALL_TABLE@toc(2) @@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ clrldi r3,r3,32 -@@ -707,7 +707,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -763,7 +763,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG bl restore_math b restore #endif @@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> beq 2f bl restore_interrupts SCHEDULE_USER -@@ -769,10 +769,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -825,10 +825,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG #ifdef CONFIG_PREEMPT /* Check if we need to preempt */ @@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> cmpwi cr0,r8,0 bne restore ld r0,SOFTE(r1) -@@ -789,7 +797,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG +@@ -845,7 +853,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEG /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) ld r4,TI_FLAGS(r9) diff --git a/patches/powerpc-ps3-device-init.c-adapt-to-completions-using.patch b/patches/powerpc-ps3-device-init.c-adapt-to-completions-using.patch deleted file mode 100644 index a153e1cf017e..000000000000 --- a/patches/powerpc-ps3-device-init.c-adapt-to-completions-using.patch +++ /dev/null @@ -1,31 +0,0 @@ -From: Paul Gortmaker <paul.gortmaker@windriver.com> -Date: Sun, 31 May 2015 14:44:42 -0400 -Subject: powerpc: ps3/device-init.c - adapt to completions using swait vs wait - -To fix: - - cc1: warnings being treated as errors - arch/powerpc/platforms/ps3/device-init.c: In function 'ps3_notification_read_write': - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'prepare_to_wait_event' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'abort_exclusive_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.c:755:2: error: passing argument 1 of 'finish_wait' from incompatible pointer type - arch/powerpc/platforms/ps3/device-init.o] Error 1 - make[3]: *** Waiting for unfinished jobs.... - -Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - arch/powerpc/platforms/ps3/device-init.c | 2 +- - 1 file changed, 1 insertion(+), 1 deletion(-) - ---- a/arch/powerpc/platforms/ps3/device-init.c -+++ b/arch/powerpc/platforms/ps3/device-init.c -@@ -752,7 +752,7 @@ static int ps3_notification_read_write(s - } - pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - -- res = wait_event_interruptible(dev->done.wait, -+ res = swait_event_interruptible(dev->done.wait, - dev->done.done || kthread_should_stop()); - if (kthread_should_stop()) - res = -EINTR; diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 16a7eb3da880..dff4755ada32 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1723,6 +1723,44 @@ static inline int test_tsk_need_resched( +@@ -1715,6 +1715,44 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> default PREEMPT_NONE --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -498,6 +498,48 @@ void resched_curr(struct rq *rq) +@@ -492,6 +492,48 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } @@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2429,6 +2471,9 @@ int sched_fork(unsigned long clone_flags +@@ -2404,6 +2446,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3494,6 +3539,7 @@ static void __sched notrace __schedule(b +@@ -3471,6 +3516,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -3674,6 +3720,30 @@ static void __sched notrace preempt_sche +@@ -3651,6 +3697,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption -@@ -3688,7 +3758,8 @@ asmlinkage __visible void __sched notrac +@@ -3665,7 +3735,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3715,6 +3786,9 @@ asmlinkage __visible void __sched notrac +@@ -3692,6 +3763,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5483,7 +5557,9 @@ void init_idle(struct task_struct *idle, +@@ -5460,7 +5534,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The idle tasks have their own, simple scheduling class: */ -@@ -7199,6 +7275,7 @@ void migrate_disable(void) +@@ -7176,6 +7252,7 @@ void migrate_disable(void) } preempt_disable(); @@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pin_current_cpu(); migrate_disable_update_cpus_allowed(p); -@@ -7266,6 +7343,7 @@ void migrate_enable(void) +@@ -7243,6 +7320,7 @@ void migrate_enable(void) arg.dest_cpu = dest_cpu; unpin_current_cpu(); @@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_enable(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); -@@ -7274,6 +7352,7 @@ void migrate_enable(void) +@@ -7251,6 +7329,7 @@ void migrate_enable(void) } } unpin_current_cpu(); @@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(migrate_enable); --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4338,7 +4338,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4018,7 +4018,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { @@ -397,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. -@@ -4362,7 +4362,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq +@@ -4042,7 +4042,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq return; if (delta > ideal_runtime) @@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void -@@ -4504,7 +4504,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc +@@ -4184,7 +4184,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc * validating it and just reschedule. */ if (queued) { @@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } /* -@@ -4688,7 +4688,7 @@ static void __account_cfs_rq_runtime(str +@@ -4368,7 +4368,7 @@ static void __account_cfs_rq_runtime(str * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) @@ -424,7 +424,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static __always_inline -@@ -5342,7 +5342,7 @@ static void hrtick_start_fair(struct rq +@@ -5037,7 +5037,7 @@ static void hrtick_start_fair(struct rq if (delta < 0) { if (rq->curr == p) @@ -433,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return; } hrtick_start(rq, delta); -@@ -6883,7 +6883,7 @@ static void check_preempt_wakeup(struct +@@ -6578,7 +6578,7 @@ static void check_preempt_wakeup(struct return; preempt: @@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved -@@ -9969,7 +9969,7 @@ static void task_fork_fair(struct task_s +@@ -9689,7 +9689,7 @@ static void task_fork_fair(struct task_s * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); @@ -451,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } se->vruntime -= cfs_rq->min_vruntime; -@@ -9993,7 +9993,7 @@ prio_changed_fair(struct rq *rq, struct +@@ -9713,7 +9713,7 @@ prio_changed_fair(struct rq *rq, struct */ if (rq->curr == p) { if (p->prio > oldprio) @@ -474,7 +474,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1613,6 +1613,15 @@ extern void reweight_task(struct task_st +@@ -1641,6 +1641,15 @@ extern void reweight_task(struct task_st extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); @@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c -@@ -2133,6 +2133,7 @@ tracing_generic_entry_update(struct trac +@@ -2134,6 +2134,7 @@ tracing_generic_entry_update(struct trac struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; @@ -500,7 +500,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -2143,7 +2144,8 @@ tracing_generic_entry_update(struct trac +@@ -2144,7 +2145,8 @@ tracing_generic_entry_update(struct trac ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | @@ -510,7 +510,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; -@@ -3345,15 +3347,17 @@ get_total_entries(struct trace_buffer *b +@@ -3346,15 +3348,17 @@ get_total_entries(struct trace_buffer *b static void print_lat_help_header(struct seq_file *m) { @@ -537,7 +537,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -3389,15 +3393,17 @@ static void print_func_help_header_irq(s +@@ -3390,15 +3394,17 @@ static void print_func_help_header_irq(s tgid ? tgid_space : space); seq_printf(m, "# %s / _----=> need-resched\n", tgid ? tgid_space : space); @@ -580,7 +580,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #define TRACE_BUF_SIZE 1024 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c -@@ -447,6 +447,7 @@ int trace_print_lat_fmt(struct trace_seq +@@ -448,6 +448,7 @@ int trace_print_lat_fmt(struct trace_seq { char hardsoft_irq; char need_resched; @@ -588,7 +588,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> char irqs_off; int hardirq; int softirq; -@@ -477,6 +478,9 @@ int trace_print_lat_fmt(struct trace_seq +@@ -478,6 +479,9 @@ int trace_print_lat_fmt(struct trace_seq break; } @@ -598,7 +598,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> hardsoft_irq = (nmi && hardirq) ? 'Z' : nmi ? 'z' : -@@ -485,14 +489,20 @@ int trace_print_lat_fmt(struct trace_seq +@@ -486,14 +490,20 @@ int trace_print_lat_fmt(struct trace_seq softirq ? 's' : '.' ; diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch index db295c916906..95c2233c14a9 100644 --- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch +++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch @@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -434,6 +434,13 @@ asmlinkage void early_printk(const char +@@ -435,6 +435,13 @@ asmlinkage void early_printk(const char */ static bool __read_mostly printk_killswitch; diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch index ae24fc0f978b..bc904b60a3a9 100644 --- a/patches/printk-kill.patch +++ b/patches/printk-kill.patch @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PRINTK_NMI --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -404,6 +404,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); +@@ -405,6 +405,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock); printk_safe_exit_irqrestore(flags); \ } while (0) @@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -1886,6 +1938,13 @@ asmlinkage int vprintk_emit(int facility +@@ -1892,6 +1944,13 @@ asmlinkage int vprintk_emit(int facility bool in_sched = false; unsigned long flags; @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; in_sched = true; -@@ -2026,26 +2085,6 @@ static bool suppress_message_printing(in +@@ -2032,26 +2091,6 @@ static bool suppress_message_printing(in #endif /* CONFIG_PRINTK */ diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch index 27a620e04acd..ea3024c28ea1 100644 --- a/patches/printk-rt-aware.patch +++ b/patches/printk-rt-aware.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c -@@ -1606,6 +1606,7 @@ SYSCALL_DEFINE3(syslog, int, type, char +@@ -1612,6 +1612,7 @@ SYSCALL_DEFINE3(syslog, int, type, char return do_syslog(type, buf, len, SYSLOG_FROM_READER); } @@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Special console_lock variants that help to reduce the risk of soft-lockups. * They allow to pass console_lock to another printk() call using a busy wait. -@@ -1746,6 +1747,15 @@ static int console_trylock_spinning(void +@@ -1752,6 +1753,15 @@ static int console_trylock_spinning(void return 1; } @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Call the console drivers, asking them to write out * log_buf[start] to log_buf[end - 1]. -@@ -1761,6 +1771,7 @@ static void call_console_drivers(const c +@@ -1767,6 +1777,7 @@ static void call_console_drivers(const c if (!console_drivers) return; @@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; -@@ -1776,6 +1787,7 @@ static void call_console_drivers(const c +@@ -1782,6 +1793,7 @@ static void call_console_drivers(const c else con->write(con, text, len); } @@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } int printk_delay_msec __read_mostly; -@@ -1967,20 +1979,30 @@ asmlinkage int vprintk_emit(int facility +@@ -1973,20 +1985,30 @@ asmlinkage int vprintk_emit(int facility /* If called from the scheduler, we can not call up(). */ if (!in_sched) { @@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } wake_up_klogd(); -@@ -2432,6 +2454,10 @@ void console_unlock(void) +@@ -2439,6 +2461,10 @@ void console_unlock(void) console_seq++; raw_spin_unlock(&logbuf_lock); @@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * While actively printing out messages, if another printk() * were to occur on another CPU, it may wait for this one to -@@ -2450,6 +2476,7 @@ void console_unlock(void) +@@ -2457,6 +2483,7 @@ void console_unlock(void) } printk_safe_exit_irqrestore(flags); diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch index 40d3b5ad2429..3eb86b98a5a1 100644 --- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ (task->state & TASK_NOLOAD) == 0) -@@ -1707,6 +1703,51 @@ static inline int test_tsk_need_resched( +@@ -1699,6 +1695,51 @@ static inline int test_tsk_need_resched( return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } @@ -115,9 +115,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> spin_unlock_irq(&task->sighand->siglock); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1368,6 +1368,18 @@ int migrate_swap(struct task_struct *cur - return ret; +@@ -1348,6 +1348,18 @@ int migrate_swap(struct task_struct *cur } + #endif /* CONFIG_NUMA_BALANCING */ +static bool check_task_state(struct task_struct *p, long match_state) +{ @@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -1412,7 +1424,7 @@ unsigned long wait_task_inactive(struct +@@ -1392,7 +1404,7 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return 0; cpu_relax(); } -@@ -1427,7 +1439,8 @@ unsigned long wait_task_inactive(struct +@@ -1407,7 +1419,8 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; diff --git a/patches/radix-tree-use-local-locks.patch b/patches/radix-tree-use-local-locks.patch index 94045bb4e5b5..8cde3957b305 100644 --- a/patches/radix-tree-use-local-locks.patch +++ b/patches/radix-tree-use-local-locks.patch @@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/idr.h +++ b/include/linux/idr.h -@@ -158,10 +158,7 @@ static inline bool idr_is_empty(const st +@@ -169,10 +169,7 @@ static inline bool idr_is_empty(const st * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ @@ -151,10 +151,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +} +EXPORT_SYMBOL(idr_preload_end); + - /** - * ida_pre_get - reserve resources for ida allocation - * @ida: ida handle -@@ -2122,7 +2136,7 @@ int ida_pre_get(struct ida *ida, gfp_t g + int ida_pre_get(struct ida *ida, gfp_t gfp) + { + /* +@@ -2114,7 +2128,7 @@ int ida_pre_get(struct ida *ida, gfp_t g * to return to the ida_pre_get() step. */ if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE)) diff --git a/patches/random-Remove-preempt-disabled-region.patch b/patches/random-Remove-preempt-disabled-region.patch deleted file mode 100644 index 31d91dfc3fcb..000000000000 --- a/patches/random-Remove-preempt-disabled-region.patch +++ /dev/null @@ -1,47 +0,0 @@ -From: Ingo Molnar <mingo@elte.hu> -Date: Fri, 3 Jul 2009 08:29:30 -0500 -Subject: [PATCH] random: Remove preempt disabled region - -No need to keep preemption disabled across the whole function. - -mix_pool_bytes() uses a spin_lock() to protect the pool and there are -other places like write_pool() whhich invoke mix_pool_bytes() without -disabling preemption. -credit_entropy_bits() is invoked from other places like -add_hwgenerator_randomness() without disabling preemption. - -Before commit 95b709b6be49 ("random: drop trickle mode") the function -used __this_cpu_inc_return() which would require disabled preemption. -The preempt_disable() section was added in commit 43d5d3018c37 ("[PATCH] -random driver preempt robustness", history tree). It was claimed that -the code relied on "vt_ioctl() being called under BKL". - -Cc: "Theodore Ts'o" <tytso@mit.edu> -Signed-off-by: Ingo Molnar <mingo@elte.hu> -Signed-off-by: Thomas Gleixner <tglx@linutronix.de> -[bigeasy: enhance the commit message] -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ---- - drivers/char/random.c | 4 ---- - 1 file changed, 4 deletions(-) - ---- a/drivers/char/random.c -+++ b/drivers/char/random.c -@@ -1122,8 +1122,6 @@ static void add_timer_randomness(struct - } sample; - long delta, delta2, delta3; - -- preempt_disable(); -- - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; -@@ -1161,8 +1159,6 @@ static void add_timer_randomness(struct - * and limit entropy entimate to 12 bits. - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); -- -- preempt_enable(); - } - - void add_input_randomness(unsigned int type, unsigned int code, diff --git a/patches/random-avoid-preempt_disable-ed-section.patch b/patches/random-avoid-preempt_disable-ed-section.patch index d0d82130f0d0..846d4616ffb7 100644 --- a/patches/random-avoid-preempt_disable-ed-section.patch +++ b/patches/random-avoid-preempt_disable-ed-section.patch @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include <crypto/chacha20.h> #include <asm/processor.h> -@@ -2193,6 +2194,7 @@ static rwlock_t batched_entropy_reset_lo +@@ -2223,6 +2224,7 @@ static rwlock_t batched_entropy_reset_lo * at any point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> u64 get_random_u64(void) { u64 ret; -@@ -2213,7 +2215,7 @@ u64 get_random_u64(void) +@@ -2243,7 +2245,7 @@ u64 get_random_u64(void) warn_unseeded_randomness(&previous); use_lock = READ_ONCE(crng_init) < 2; @@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { -@@ -2223,12 +2225,13 @@ u64 get_random_u64(void) +@@ -2253,12 +2255,13 @@ u64 get_random_u64(void) ret = batch->entropy_u64[batch->position++]; if (use_lock) read_unlock_irqrestore(&batched_entropy_reset_lock, flags); @@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> u32 get_random_u32(void) { u32 ret; -@@ -2243,7 +2246,7 @@ u32 get_random_u32(void) +@@ -2273,7 +2276,7 @@ u32 get_random_u32(void) warn_unseeded_randomness(&previous); use_lock = READ_ONCE(crng_init) < 2; @@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { -@@ -2253,7 +2256,7 @@ u32 get_random_u32(void) +@@ -2283,7 +2286,7 @@ u32 get_random_u32(void) ret = batch->entropy_u32[batch->position++]; if (use_lock) read_unlock_irqrestore(&batched_entropy_reset_lock, flags); diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch index 9f2983aba2e5..ebab3ee7b681 100644 --- a/patches/random-make-it-work-on-rt.patch +++ b/patches/random-make-it-work-on-rt.patch @@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/drivers/char/random.c +++ b/drivers/char/random.c -@@ -1215,28 +1215,27 @@ static __u32 get_reg(struct fast_pool *f +@@ -1229,28 +1229,27 @@ static __u32 get_reg(struct fast_pool *f return *ptr; } @@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static int hv_ce_set_next_event(unsigned long delta, --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c -@@ -972,6 +972,8 @@ static void vmbus_isr(void) +@@ -991,6 +991,8 @@ static void vmbus_isr(void) void *page_addr = hv_cpu->synic_event_page; struct hv_message *msg; union hv_synic_event_flags *event; @@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> bool handled = false; if (unlikely(page_addr == NULL)) -@@ -1015,7 +1017,7 @@ static void vmbus_isr(void) +@@ -1034,7 +1036,7 @@ static void vmbus_isr(void) tasklet_schedule(&hv_cpu->msg_dpc); } @@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); } - + /* --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -70,6 +70,7 @@ struct irq_desc { @@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> note_interrupt(desc, retval); --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c -@@ -1065,6 +1065,12 @@ static int irq_thread(void *data) +@@ -1072,6 +1072,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch index ad05bca5869c..e186175bf504 100644 --- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch +++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -58,6 +58,13 @@ +@@ -61,6 +61,13 @@ #include <linux/trace_events.h> #include <linux/suspend.h> #include <linux/ftrace.h> @@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "tree.h" #include "rcu.h" -@@ -2819,18 +2826,17 @@ static void +@@ -2879,18 +2886,17 @@ static void /* * Do RCU core processing for the current CPU. */ @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, -@@ -2842,18 +2848,105 @@ static void invoke_rcu_callbacks(struct +@@ -2902,18 +2908,105 @@ static void invoke_rcu_callbacks(struct { if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; @@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Handle any core-RCU processing required by a call_rcu() invocation. -@@ -4122,7 +4215,6 @@ void __init rcu_init(void) +@@ -4179,7 +4272,6 @@ void __init rcu_init(void) if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); @@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * We don't need protection against CPU-hotplug here because --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h -@@ -432,12 +432,10 @@ extern struct rcu_state rcu_preempt_stat +@@ -423,12 +423,10 @@ extern struct rcu_state rcu_preempt_stat int rcu_dynticks_snap(struct rcu_dynticks *rdtp); @@ -193,8 +193,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifndef RCU_TREE_NONCORE -@@ -457,8 +455,8 @@ void call_rcu(struct rcu_head *head, rcu - static void __init __rcu_init_preempt(void); +@@ -451,8 +449,8 @@ static void dump_blkd_tasks(struct rcu_s + int ncheck); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); @@ -248,7 +248,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool __read_mostly rcu_nocb_poll; /* Offload kthread are to poll. */ -@@ -956,18 +930,21 @@ void exit_rcu(void) +@@ -1027,18 +1001,21 @@ dump_blkd_tasks(struct rcu_state *rsp, s #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ @@ -278,7 +278,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1106,23 +1083,6 @@ static void rcu_initiate_boost(struct rc +@@ -1177,23 +1154,6 @@ static void rcu_initiate_boost(struct rc } /* @@ -302,7 +302,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. */ -@@ -1176,67 +1136,6 @@ static int rcu_spawn_one_boost_kthread(s +@@ -1247,67 +1207,6 @@ static int rcu_spawn_one_boost_kthread(s return 0; } @@ -370,7 +370,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1267,26 +1166,12 @@ static void rcu_boost_kthread_setaffinit +@@ -1338,26 +1237,12 @@ static void rcu_boost_kthread_setaffinit free_cpumask_var(cm); } @@ -397,7 +397,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } -@@ -1309,11 +1194,6 @@ static void rcu_initiate_boost(struct rc +@@ -1380,11 +1265,6 @@ static void rcu_initiate_boost(struct rc raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch index 185a12ec5f65..a07b73e88c2b 100644 --- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch +++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch @@ -26,12 +26,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- include/linux/rcupdate.h | 19 +++++++++++++++++++ include/linux/rcutree.h | 8 ++++++++ - kernel/rcu/rcu.h | 14 +++++++++++--- + kernel/rcu/rcu.h | 11 +++++++++-- kernel/rcu/rcutorture.c | 7 +++++++ - kernel/rcu/tree.c | 24 ++++++++++++++++++++++++ + kernel/rcu/tree.c | 22 ++++++++++++++++++++++ kernel/rcu/tree.h | 2 ++ kernel/rcu/update.c | 2 ++ - 7 files changed, 73 insertions(+), 3 deletions(-) + 7 files changed, 69 insertions(+), 2 deletions(-) --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void synchronize_sched(void); void rcu_barrier_tasks(void); -@@ -264,7 +268,14 @@ extern struct lockdep_map rcu_sched_lock +@@ -263,7 +267,14 @@ extern struct lockdep_map rcu_sched_lock extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); @@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int rcu_read_lock_sched_held(void); #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ -@@ -665,10 +676,14 @@ static inline void rcu_read_unlock(void) +@@ -663,10 +674,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -678,10 +693,14 @@ static inline void rcu_read_lock_bh(void +@@ -676,10 +691,14 @@ static inline void rcu_read_lock_bh(void */ static inline void rcu_read_unlock_bh(void) { @@ -120,33 +120,29 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> unsigned long get_state_synchronize_rcu(void); --- a/kernel/rcu/rcu.h +++ b/kernel/rcu/rcu.h -@@ -478,20 +478,28 @@ static inline void show_rcu_gp_kthreads( - extern unsigned long rcutorture_testseq; - extern unsigned long rcutorture_vernum; - unsigned long rcu_batches_started(void); --unsigned long rcu_batches_started_bh(void); - unsigned long rcu_batches_started_sched(void); - unsigned long rcu_batches_completed(void); --unsigned long rcu_batches_completed_bh(void); - unsigned long rcu_batches_completed_sched(void); +@@ -528,7 +528,6 @@ static inline void show_rcu_gp_kthreads( + static inline int rcu_get_gp_kthreads_prio(void) { return 0; } + #else /* #ifdef CONFIG_TINY_RCU */ + unsigned long rcu_get_gp_seq(void); +-unsigned long rcu_bh_get_gp_seq(void); + unsigned long rcu_sched_get_gp_seq(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); - unsigned long srcu_batches_completed(struct srcu_struct *sp); +@@ -536,10 +535,18 @@ unsigned long srcu_batches_completed(str void show_rcu_gp_kthreads(void); + int rcu_get_gp_kthreads_prio(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); extern struct workqueue_struct *rcu_gp_wq; extern struct workqueue_struct *rcu_par_gp_wq; + -+#ifndef CONFIG_PREEMPT_RT_FULL -+void rcu_bh_force_quiescent_state(void); -+unsigned long rcu_batches_started_bh(void); -+unsigned long rcu_batches_completed_bh(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define rcu_bh_get_gp_seq rcu_get_gp_seq ++#define rcu_bh_force_quiescent_state rcu_force_quiescent_state +#else -+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+# define rcu_batches_completed_bh rcu_batches_completed -+# define rcu_batches_started_bh rcu_batches_completed ++unsigned long rcu_bh_get_gp_seq(void); ++void rcu_bh_force_quiescent_state(void); +#endif + #endif /* #else #ifdef CONFIG_TINY_RCU */ @@ -154,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_RCU_NOCB_CPU --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c -@@ -413,6 +413,7 @@ static struct rcu_torture_ops rcu_ops = +@@ -434,6 +434,7 @@ static struct rcu_torture_ops rcu_ops = .name = "rcu" }; @@ -162,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Definitions for rcu_bh torture testing. */ -@@ -452,6 +453,12 @@ static struct rcu_torture_ops rcu_bh_ops +@@ -475,6 +476,12 @@ static struct rcu_torture_ops rcu_bh_ops .name = "rcu_bh" }; @@ -177,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * The names includes "busted", and they really means it! --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -243,6 +243,7 @@ void rcu_sched_qs(void) +@@ -244,6 +244,7 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } @@ -185,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void rcu_bh_qs(void) { RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!"); -@@ -253,6 +254,7 @@ void rcu_bh_qs(void) +@@ -254,6 +255,7 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } @@ -193,37 +189,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Steal a bit from the bottom of ->dynticks for idle entry/exit -@@ -549,11 +551,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc - /* - * Return the number of RCU BH batches started thus far for debug & stats. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - unsigned long rcu_batches_started_bh(void) - { - return rcu_bh_state.gpnum; - } - EXPORT_SYMBOL_GPL(rcu_batches_started_bh); -+#endif - - /* - * Return the number of RCU batches completed thus far for debug & stats. -@@ -573,6 +577,7 @@ unsigned long rcu_batches_completed_sche +@@ -568,6 +570,7 @@ unsigned long rcu_sched_get_gp_seq(void) } - EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); +#ifndef CONFIG_PREEMPT_RT_FULL /* - * Return the number of RCU BH batches completed thus far for debug & stats. + * Return the number of RCU-bh GPs completed thus far for debug & stats. */ -@@ -581,6 +586,7 @@ unsigned long rcu_batches_completed_bh(v - return rcu_bh_state.completed; +@@ -576,6 +579,7 @@ unsigned long rcu_bh_get_gp_seq(void) + return READ_ONCE(rcu_bh_state.gp_seq); } - EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); + EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq); +#endif /* * Return the number of RCU expedited batches completed thus far for -@@ -604,6 +610,7 @@ unsigned long rcu_exp_batches_completed_ +@@ -599,6 +603,7 @@ unsigned long rcu_exp_batches_completed_ } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); @@ -231,7 +213,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Force a quiescent state. */ -@@ -622,6 +629,13 @@ void rcu_bh_force_quiescent_state(void) +@@ -617,6 +622,13 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); @@ -245,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Force a quiescent state for RCU-sched. */ -@@ -672,9 +686,11 @@ void rcutorture_get_gp_data(enum rcutort +@@ -674,9 +686,11 @@ void rcutorture_get_gp_data(enum rcutort case RCU_FLAVOR: rsp = rcu_state_p; break; @@ -257,7 +239,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; -@@ -2986,6 +3002,7 @@ void call_rcu_sched(struct rcu_head *hea +@@ -3040,6 +3054,7 @@ void call_rcu_sched(struct rcu_head *hea } EXPORT_SYMBOL_GPL(call_rcu_sched); @@ -265,7 +247,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. -@@ -3013,6 +3030,7 @@ void call_rcu_bh(struct rcu_head *head, +@@ -3067,6 +3082,7 @@ void call_rcu_bh(struct rcu_head *head, __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); @@ -273,7 +255,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Queue an RCU callback for lazy invocation after a grace period. -@@ -3098,6 +3116,7 @@ void synchronize_sched(void) +@@ -3152,6 +3168,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); @@ -281,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * -@@ -3124,6 +3143,7 @@ void synchronize_rcu_bh(void) +@@ -3178,6 +3195,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); @@ -289,7 +271,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * get_state_synchronize_rcu - Snapshot current RCU state -@@ -3457,6 +3477,7 @@ static void _rcu_barrier(struct rcu_stat +@@ -3485,6 +3503,7 @@ static void _rcu_barrier(struct rcu_stat mutex_unlock(&rsp->barrier_mutex); } @@ -297,7 +279,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ -@@ -3465,6 +3486,7 @@ void rcu_barrier_bh(void) +@@ -3493,6 +3512,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); @@ -305,7 +287,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. -@@ -4081,7 +4103,9 @@ void __init rcu_init(void) +@@ -4140,7 +4160,9 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); @@ -317,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_dump_rcu_node_tree(&rcu_sched_state); --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h -@@ -422,7 +422,9 @@ extern struct list_head rcu_struct_flavo +@@ -413,7 +413,9 @@ extern struct list_head rcu_struct_flavo */ extern struct rcu_state rcu_sched_state; diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index c87657adf043..0be872f90c11 100644 --- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7156,6 +7156,47 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7133,6 +7133,47 @@ const u32 sched_prio_to_wmult[40] = { #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) @@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void migrate_disable(void) { struct task_struct *p = current; -@@ -7179,10 +7220,9 @@ void migrate_disable(void) +@@ -7156,10 +7197,9 @@ void migrate_disable(void) } preempt_disable(); @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_enable(); } -@@ -7214,9 +7254,8 @@ void migrate_enable(void) +@@ -7191,9 +7231,8 @@ void migrate_enable(void) preempt_disable(); diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch index cb97d4d3d7b9..f96cff930944 100644 --- a/patches/rt-introduce-cpu-chill.patch +++ b/patches/rt-introduce-cpu-chill.patch @@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #endif /* defined(_LINUX_DELAY_H) */ --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c -@@ -1895,6 +1895,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct +@@ -1894,6 +1894,27 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct } #endif diff --git a/patches/rt-preempt-base-config.patch b/patches/rt-preempt-base-config.patch index 3f962fb7497c..dd7d86d8b12f 100644 --- a/patches/rt-preempt-base-config.patch +++ b/patches/rt-preempt-base-config.patch @@ -8,8 +8,8 @@ substitutions for testing. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- - kernel/Kconfig.preempt | 19 +++++++++++++++++-- - 1 file changed, 17 insertions(+), 2 deletions(-) + kernel/Kconfig.preempt | 21 ++++++++++++++++++--- + 1 file changed, 18 insertions(+), 3 deletions(-) --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -24,19 +24,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> choice prompt "Preemption Model" -@@ -33,9 +40,9 @@ config PREEMPT_VOLUNTARY +@@ -34,10 +41,10 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT__LL bool "Preemptible Kernel (Low-Latency Desktop)" + depends on !ARCH_NO_PREEMPT - select PREEMPT_COUNT + select PREEMPT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making -@@ -52,6 +59,14 @@ config PREEMPT +@@ -54,7 +61,15 @@ config PREEMPT embedded system with latency requirements in the milliseconds range. @@ -51,3 +52,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> endchoice config PREEMPT_COUNT +- bool +\ No newline at end of file ++ bool diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch index 64fbdf67259a..f6358fa79664 100644 --- a/patches/rtmutex-add-sleeping-lock-implementation.patch +++ b/patches/rtmutex-add-sleeping-lock-implementation.patch @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/include/linux/kernel.h +++ b/include/linux/kernel.h -@@ -226,6 +226,9 @@ extern int _cond_resched(void); +@@ -259,6 +259,9 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) @@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> # define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, -@@ -233,6 +236,7 @@ extern int _cond_resched(void); +@@ -266,6 +269,7 @@ extern int _cond_resched(void); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) @@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * set_special_state() should be used for those states when the blocking task * can not use the regular condition based wait-loop. In that case we must -@@ -908,6 +915,7 @@ struct task_struct { +@@ -914,6 +921,7 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; @@ -381,7 +381,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +#endif --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -894,6 +894,7 @@ static struct task_struct *dup_task_stru +@@ -895,6 +895,7 @@ static struct task_struct *dup_task_stru tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; @@ -1129,7 +1129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> # include "rtmutex-debug.h" --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -407,9 +407,15 @@ static bool set_nr_if_polling(struct tas +@@ -401,9 +401,15 @@ static bool set_nr_if_polling(struct tas #endif #endif @@ -1147,7 +1147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Atomically grab the task, if ->wake_q is !nil already it means -@@ -431,24 +437,32 @@ void wake_q_add(struct wake_q_head *head +@@ -425,24 +431,32 @@ void wake_q_add(struct wake_q_head *head head->lastp = &node->next; } @@ -1174,8 +1174,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> + else + task->wake_q.next = NULL; /* - * wake_up_process() implies a wmb() to pair with the queueing - * in wake_q_add() so as not to miss wakeups. + * wake_up_process() executes a full barrier, which pairs with + * the queueing in wake_q_add() so as not to miss wakeups. */ - wake_up_process(task); + if (sleeper) diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch index dd3f0f2103e6..d3f292def978 100644 --- a/patches/rtmutex-annotate-sleeping-lock-context.patch +++ b/patches/rtmutex-annotate-sleeping-lock-context.patch @@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #endif #ifdef CONFIG_PREEMPT_RCU -@@ -1800,6 +1809,23 @@ static __always_inline bool need_resched +@@ -1792,6 +1801,23 @@ static __always_inline bool need_resched return unlikely(tif_need_resched()); } @@ -222,7 +222,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h -@@ -325,9 +325,13 @@ static void rcu_preempt_note_context_swi +@@ -337,9 +337,13 @@ static void rcu_preempt_note_context_swi struct task_struct *t = current; struct rcu_data *rdp; struct rcu_node *rnp; @@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7325,4 +7325,49 @@ void migrate_enable(void) +@@ -7302,4 +7302,49 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); diff --git a/patches/rtmutex-wire-up-RT-s-locking.patch b/patches/rtmutex-wire-up-RT-s-locking.patch index 9c95a0a27133..0040b7ecfac3 100644 --- a/patches/rtmutex-wire-up-RT-s-locking.patch +++ b/patches/rtmutex-wire-up-RT-s-locking.patch @@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h -@@ -279,7 +279,11 @@ static inline void do_raw_spin_unlock(ra +@@ -298,7 +298,11 @@ static inline void do_raw_spin_unlock(ra }) /* Include rwlock functions */ @@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -290,6 +294,10 @@ static inline void do_raw_spin_unlock(ra +@@ -309,6 +313,10 @@ static inline void do_raw_spin_unlock(ra # include <linux/spinlock_api_up.h> #endif @@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ -@@ -410,6 +418,8 @@ static __always_inline int spin_is_conte +@@ -429,6 +437,8 @@ static __always_inline int spin_is_conte #define assert_spin_locked(lock) assert_raw_spin_locked(&(lock)->rlock) diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch index 4d3f3b83a717..d63e678a061c 100644 --- a/patches/rtmutex_dont_include_rcu.patch +++ b/patches/rtmutex_dont_include_rcu.patch @@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) -@@ -373,54 +374,6 @@ static inline void rcu_preempt_sleep_che +@@ -372,54 +373,6 @@ static inline void rcu_preempt_sleep_che }) /** diff --git a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch index 070b32a3a09c..cc06ce835c6c 100644 --- a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch +++ b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -926,7 +926,7 @@ static inline bool is_cpu_allowed(struct +@@ -903,7 +903,7 @@ static inline bool is_cpu_allowed(struct if (!cpumask_test_cpu(cpu, p->cpus_ptr)) return false; diff --git a/patches/sched-core-Remove-get_cpu-from-sched_fork.patch b/patches/sched-core-Remove-get_cpu-from-sched_fork.patch deleted file mode 100644 index 5f2c036af9a8..000000000000 --- a/patches/sched-core-Remove-get_cpu-from-sched_fork.patch +++ /dev/null @@ -1,87 +0,0 @@ -From: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Date: Fri, 6 Jul 2018 15:06:15 +0200 -Subject: [PATCH] sched/core: Remove get_cpu() from sched_fork() - -[ Upstream commit af0fffd9300b97d8875aa745bc78e2f6fdb3c1f0 ] - -get_cpu() disables preemption for the entire sched_fork() function. -This get_cpu() was introduced in commit: - - dd41f596cda0 ("sched: cfs core code") - -... which also invoked sched_balance_self() and this function -required preemption do be off. - -Today, sched_balance_self() seems to be moved to ->task_fork callback -which is invoked while the ->pi_lock is held. - -set_load_weight() could invoke reweight_task() which then via $callchain -might end up in smp_processor_id() but since `update_load' is false -this won't happen. - -I didn't find any this_cpu*() or similar usage during the initialisation -of the task_struct. - -The `cpu' value (from get_cpu()) is only used later in __set_task_cpu() -while the ->pi_lock lock is held. - -Based on this it is possible to remove get_cpu() and use -smp_processor_id() for the `cpu' variable without breaking anything. - -Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> -Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> -Cc: Linus Torvalds <torvalds@linux-foundation.org> -Cc: Peter Zijlstra <peterz@infradead.org> -Cc: Thomas Gleixner <tglx@linutronix.de> -Link: http://lkml.kernel.org/r/20180706130615.g2ex2kmfu5kcvlq6@linutronix.de -Signed-off-by: Ingo Molnar <mingo@kernel.org> ---- - kernel/sched/core.c | 13 ++++--------- - 1 file changed, 4 insertions(+), 9 deletions(-) - ---- a/kernel/sched/core.c -+++ b/kernel/sched/core.c -@@ -2317,7 +2317,6 @@ static inline void init_schedstats(void) - int sched_fork(unsigned long clone_flags, struct task_struct *p) - { - unsigned long flags; -- int cpu = get_cpu(); - - __sched_fork(clone_flags, p); - /* -@@ -2353,14 +2352,12 @@ int sched_fork(unsigned long clone_flags - p->sched_reset_on_fork = 0; - } - -- if (dl_prio(p->prio)) { -- put_cpu(); -+ if (dl_prio(p->prio)) - return -EAGAIN; -- } else if (rt_prio(p->prio)) { -+ else if (rt_prio(p->prio)) - p->sched_class = &rt_sched_class; -- } else { -+ else - p->sched_class = &fair_sched_class; -- } - - init_entity_runnable_average(&p->se); - -@@ -2376,7 +2373,7 @@ int sched_fork(unsigned long clone_flags - * We're setting the CPU for the first time, we don't migrate, - * so use __set_task_cpu(). - */ -- __set_task_cpu(p, cpu); -+ __set_task_cpu(p, smp_processor_id()); - if (p->sched_class->task_fork) - p->sched_class->task_fork(p); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); -@@ -2393,8 +2390,6 @@ int sched_fork(unsigned long clone_flags - plist_node_init(&p->pushable_tasks, MAX_PRIO); - RB_CLEAR_NODE(&p->pushable_dl_tasks); - #endif -- -- put_cpu(); - return 0; - } - diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch index 73100683a4cd..b416b66f1b14 100644 --- a/patches/sched-delay-put-task.patch +++ b/patches/sched-delay-put-task.patch @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1173,6 +1173,9 @@ struct task_struct { +@@ -1185,6 +1185,9 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif @@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -670,7 +670,9 @@ static inline void put_signal_struct(str +@@ -671,7 +671,9 @@ static inline void put_signal_struct(str if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); -@@ -687,7 +689,18 @@ void __put_task_struct(struct task_struc +@@ -688,7 +690,18 @@ void __put_task_struct(struct task_struc if (!profile_handoff_task(tsk)) free_task(tsk); } diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch index f28ca14d354f..c4e678fbc3d7 100644 --- a/patches/sched-disable-rt-group-sched-on-rt.patch +++ b/patches/sched-disable-rt-group-sched-on-rt.patch @@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/init/Kconfig +++ b/init/Kconfig -@@ -741,6 +741,7 @@ config CFS_BANDWIDTH +@@ -776,6 +776,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch index 0f21cd5606da..ba1947d2aff5 100644 --- a/patches/sched-limit-nr-migrate.patch +++ b/patches/sched-limit-nr-migrate.patch @@ -12,15 +12,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -42,7 +42,11 @@ const_debug unsigned int sysctl_sched_fe +@@ -44,7 +44,11 @@ const_debug unsigned int sysctl_sched_fe * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ -+#ifndef CONFIG_PREEMPT_RT_FULL - const_debug unsigned int sysctl_sched_nr_migrate = 32; -+#else ++#ifdef CONFIG_PREEMPT_RT_FULL +const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#else + const_debug unsigned int sysctl_sched_nr_migrate = 32; +#endif /* - * period over which we average the RT time consumption, measured + * period over which we measure -rt task CPU usage in us. diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch index 1378bd3ba12c..ae58e0520646 100644 --- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch +++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h -@@ -74,6 +74,11 @@ void synchronize_rcu(void); +@@ -73,6 +73,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) @@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #else /* #ifdef CONFIG_PREEMPT_RCU */ -@@ -99,6 +104,8 @@ static inline int rcu_preempt_depth(void +@@ -98,6 +103,8 @@ static inline int rcu_preempt_depth(void return 0; } @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Internal to kernel */ --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6182,7 +6182,7 @@ void __init sched_init(void) +@@ -6149,7 +6149,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { diff --git a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch index 1b119a9a8a48..f61bc0f6120c 100644 --- a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch +++ b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch @@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1035,6 +1035,7 @@ int __migrate_disabled(struct task_struc +@@ -1012,6 +1012,7 @@ int __migrate_disabled(struct task_struc { return p->migrate_disable; } diff --git a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch index 2d2120ec40c5..5dbfdae7e323 100644 --- a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch +++ b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch @@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #endif --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1053,7 +1053,7 @@ void set_cpus_allowed_common(struct task +@@ -1030,7 +1030,7 @@ void set_cpus_allowed_common(struct task p->nr_cpus_allowed = cpumask_weight(new_mask); } @@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> int __migrate_disabled(struct task_struct *p) { return p->migrate_disable; -@@ -1093,7 +1093,7 @@ static void __do_set_cpus_allowed_tail(s +@@ -1070,7 +1070,7 @@ static void __do_set_cpus_allowed_tail(s void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { @@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (__migrate_disabled(p)) { lockdep_assert_held(&p->pi_lock); -@@ -1166,7 +1166,7 @@ static int __set_cpus_allowed_ptr(struct +@@ -1143,7 +1143,7 @@ static int __set_cpus_allowed_ptr(struct if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; @@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (__migrate_disabled(p)) { p->migrate_disable_update = 1; goto out; -@@ -7181,7 +7181,7 @@ const u32 sched_prio_to_wmult[40] = { +@@ -7158,7 +7158,7 @@ const u32 sched_prio_to_wmult[40] = { #undef CREATE_TRACE_POINTS @@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static inline void update_nr_migratory(struct task_struct *p, long delta) -@@ -7329,45 +7329,44 @@ EXPORT_SYMBOL(migrate_enable); +@@ -7306,45 +7306,44 @@ EXPORT_SYMBOL(migrate_enable); #elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE) void migrate_disable(void) { @@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #endif --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c -@@ -979,7 +979,7 @@ void proc_sched_show_task(struct task_st +@@ -978,7 +978,7 @@ void proc_sched_show_task(struct task_st P(dl.runtime); P(dl.deadline); } diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch index da5079f3a077..f19de139845f 100644 --- a/patches/sched-mmdrop-delayed.patch +++ b/patches/sched-mmdrop-delayed.patch @@ -23,15 +23,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> -@@ -483,6 +484,9 @@ struct mm_struct { - bool tlb_flush_batched; +@@ -482,6 +483,9 @@ struct mm_struct { + bool tlb_flush_batched; #endif - struct uprobes_state uprobes_state; + struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; ++ struct rcu_head delayed_drop; +#endif #ifdef CONFIG_HUGETLB_PAGE - atomic_long_t hugetlb_usage; + atomic_long_t hugetlb_usage; #endif --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * @mm: The address space to pin. --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -636,6 +636,19 @@ void __mmdrop(struct mm_struct *mm) +@@ -637,6 +637,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> struct mm_struct *mm; --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2753,9 +2753,13 @@ static struct rq *finish_task_switch(str +@@ -2728,9 +2728,13 @@ static struct rq *finish_task_switch(str * provided by mmdrop(), * - a sync_core for SYNC_CORE. */ @@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) -@@ -5581,6 +5585,8 @@ void sched_setnuma(struct task_struct *p +@@ -5558,6 +5562,8 @@ void sched_setnuma(struct task_struct *p #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -5596,7 +5602,11 @@ void idle_task_exit(void) +@@ -5573,7 +5579,11 @@ void idle_task_exit(void) current->active_mm = &init_mm; finish_arch_post_lock_switch(); } @@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -5913,6 +5923,10 @@ int sched_cpu_dying(unsigned int cpu) +@@ -5882,6 +5892,10 @@ int sched_cpu_dying(unsigned int cpu) update_max_interval(); nohz_balance_exit_idle(rq); hrtick_clear(rq); diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch index 16a715a4f9e4..1c303e10aaf7 100644 --- a/patches/sched-rt-mutex-wakeup.patch +++ b/patches/sched-rt-mutex-wakeup.patch @@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * This begins the randomizable portion of task_struct. Only -@@ -1611,6 +1613,7 @@ extern struct task_struct *find_get_task +@@ -1603,6 +1605,7 @@ extern struct task_struct *find_get_task extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2023,8 +2023,27 @@ try_to_wake_up(struct task_struct *p, un +@@ -1998,8 +1998,27 @@ try_to_wake_up(struct task_struct *p, un */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> trace_sched_waking(p); -@@ -2188,6 +2207,18 @@ int wake_up_process(struct task_struct * +@@ -2163,6 +2182,18 @@ int wake_up_process(struct task_struct * } EXPORT_SYMBOL(wake_up_process); @@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return try_to_wake_up(p, state, 0); --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -1418,6 +1418,7 @@ static inline int task_on_rq_migrating(s +@@ -1446,6 +1446,7 @@ static inline int task_on_rq_migrating(s #define WF_SYNC 0x01 /* Waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* Child wakeup after fork */ #define WF_MIGRATED 0x4 /* Internal use, task got migrated */ diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index 61eb5a91c096..f53b321ce5f2 100644 --- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3520,8 +3520,10 @@ static void __sched notrace __schedule(b +@@ -3497,8 +3497,10 @@ static void __sched notrace __schedule(b * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. diff --git a/patches/series b/patches/series index 2485ed7972a6..e86026944b8e 100644 --- a/patches/series +++ b/patches/series @@ -6,31 +6,6 @@ # UPSTREAM merged ############################################################ -#misc -SCSI-libsas-remove-irq-save-in-sas_ata_qc_issue.patch -SCSI-qla2xxx-remove-irq-save-in-qla2x00_poll.patch -cgroup-tracing-Move-taking-of-spin-lock-out-of-trace.patch -sched-core-Remove-get_cpu-from-sched_fork.patch -random-Remove-preempt-disabled-region.patch -iommu-amd-drop-irqs_disabled-warn_on.patch -# atomic_dec_and_lock_irqsave / refcount_dec_and_lock_irqsave -0002-drivers-md-raid5-Use-irqsave-variant-of-atomic_dec_a.patch -0003-drivers-md-raid5-Do-not-disable-irq-on-release_inact.patch -0001-bdi-use-refcount_t-for-reference-counting-instead-at.patch -0002-userns-use-refcount_t-for-reference-counting-instead.patch -0005-bdi-Use-irqsave-variant-of-refcount_dec_and_lock.patch -0006-userns-Use-irqsave-variant-of-refcount_dec_and_lock.patch -libata-remove-ata_sff_data_xfer_noirq.patch -ntfs-avoid-disabling-interrupts-during-kmap_atomic.patch -# mm/workingset -0001-mm-workingset-remove-local_irq_disable-from-count_sh.patch -0002-mm-workingset-make-shadow_lru_isolate-use-locking-su.patch -0001-mm-list_lru-use-list_lru_walk_one-in-list_lru_walk_n.patch -0002-mm-list_lru-Move-locking-from-__list_lru_walk_one-to.patch -0003-mm-list_lru-Pass-struct-list_lru_node-as-an-argument.patch -0004-mm-list_lru-Introduce-list_lru_shrink_walk_irq.patch -irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch - ############################################################ # POSTED by others ############################################################ @@ -57,6 +32,7 @@ arm-unwind-use_raw_lock.patch cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch fscache-initialize-cookie-hash-table-raw-spinlocks.patch Drivers-hv-vmbus-include-header-for-get_irq_regs.patch +percpu-include-irqflags.h-for-raw_local_irq_save.patch ############################################################ # Ready for posting @@ -353,9 +329,6 @@ printk-rt-aware.patch kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch HACK-printk-drop-the-logbuf_lock-more-often.patch -# POWERC -powerpc-ps3-device-init.c-adapt-to-completions-using.patch - # ARM ARM-enable-irq-in-translation-section-permission-fau.patch genirq-update-irq_set_irqchip_state-documentation.patch diff --git a/patches/signal-revert-ptrace-preempt-magic.patch b/patches/signal-revert-ptrace-preempt-magic.patch index 729bfda8c303..2dd895a1c0b0 100644 --- a/patches/signal-revert-ptrace-preempt-magic.patch +++ b/patches/signal-revert-ptrace-preempt-magic.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/signal.c +++ b/kernel/signal.c -@@ -2007,15 +2007,7 @@ static void ptrace_stop(int exit_code, i +@@ -2052,15 +2052,7 @@ static void ptrace_stop(int exit_code, i if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch index 4dbdfd666b20..35422669f1b7 100644 --- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch +++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch @@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -889,6 +889,8 @@ struct task_struct { +@@ -895,6 +895,8 @@ struct task_struct { /* Signal handlers: */ struct signal_struct *signal; struct sighand_struct *sighand; @@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static inline int valid_signal(unsigned long sig) --- a/kernel/exit.c +++ b/kernel/exit.c -@@ -159,7 +159,7 @@ static void __exit_signal(struct task_st +@@ -160,7 +160,7 @@ static void __exit_signal(struct task_st * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ @@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -1760,6 +1760,7 @@ static __latent_entropy struct task_stru +@@ -1802,6 +1802,7 @@ static __latent_entropy struct task_stru spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); @@ -67,8 +67,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> -@@ -362,13 +363,30 @@ static bool task_participate_group_stop( - return false; +@@ -388,13 +389,30 @@ void task_join_group_stop(struct task_st + } } +static inline struct sigqueue *get_task_cache(struct task_struct *t) @@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { struct sigqueue *q = NULL; struct user_struct *user; -@@ -385,7 +403,10 @@ static struct sigqueue * +@@ -411,7 +429,10 @@ static struct sigqueue * if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { @@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } else { print_dropped_signal(sig); } -@@ -402,6 +423,13 @@ static struct sigqueue * +@@ -428,6 +449,13 @@ static struct sigqueue * return q; } @@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) -@@ -411,6 +439,21 @@ static void __sigqueue_free(struct sigqu +@@ -437,6 +465,21 @@ static void __sigqueue_free(struct sigqu kmem_cache_free(sigqueue_cachep, q); } @@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; -@@ -424,6 +467,21 @@ void flush_sigqueue(struct sigpending *q +@@ -450,6 +493,21 @@ void flush_sigqueue(struct sigpending *q } /* @@ -169,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * Flush all pending signals for this kthread. */ void flush_signals(struct task_struct *t) -@@ -544,7 +602,7 @@ static void collect_signal(int sig, stru +@@ -572,7 +630,7 @@ static void collect_signal(int sig, stru (info->si_code == SI_TIMER) && (info->si_sys_private); @@ -178,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } else { /* * Ok, it wasn't in the queue. This must be -@@ -581,6 +639,8 @@ int dequeue_signal(struct task_struct *t +@@ -609,6 +667,8 @@ int dequeue_signal(struct task_struct *t bool resched_timer = false; int signr; @@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ -@@ -1664,7 +1724,8 @@ EXPORT_SYMBOL(kill_pid); +@@ -1705,7 +1765,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch index 531e3c3260fa..fa90be782684 100644 --- a/patches/skbufhead-raw-lock.patch +++ b/patches/skbufhead-raw-lock.patch @@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h -@@ -2923,6 +2923,7 @@ struct softnet_data { +@@ -2967,6 +2967,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; @@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> }; struct sk_buff; -@@ -1666,6 +1667,12 @@ static inline void skb_queue_head_init(s +@@ -1675,6 +1676,12 @@ static inline void skb_queue_head_init(s __skb_queue_head_init(list); } @@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -219,14 +219,14 @@ static inline struct hlist_head *dev_ind +@@ -217,14 +217,14 @@ static inline struct hlist_head *dev_ind static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif } -@@ -4831,7 +4831,7 @@ static void flush_backlog(struct work_st +@@ -5239,7 +5239,7 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); @@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> input_queue_head_incr(sd); } } -@@ -4841,11 +4841,14 @@ static void flush_backlog(struct work_st +@@ -5249,11 +5249,14 @@ static void flush_backlog(struct work_st skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); @@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static void flush_all_backlogs(void) -@@ -5393,7 +5396,9 @@ static int process_backlog(struct napi_s +@@ -5827,7 +5830,9 @@ static int process_backlog(struct napi_s while (again) { struct sk_buff *skb; @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); -@@ -5401,9 +5406,9 @@ static int process_backlog(struct napi_s +@@ -5835,9 +5840,9 @@ static int process_backlog(struct napi_s if (++work >= quota) return work; @@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* -@@ -5843,13 +5848,21 @@ static __latent_entropy void net_rx_acti +@@ -6299,13 +6304,21 @@ static __latent_entropy void net_rx_acti unsigned long time_limit = jiffies + usecs_to_jiffies(netdev_budget_usecs); int budget = netdev_budget; @@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> for (;;) { struct napi_struct *n; -@@ -8814,10 +8827,13 @@ static int dev_cpu_dead(unsigned int old +@@ -9291,10 +9304,13 @@ static int dev_cpu_dead(unsigned int old netif_rx_ni(skb); input_queue_head_incr(oldsd); } @@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> return 0; } -@@ -9123,8 +9139,9 @@ static int __init net_dev_init(void) +@@ -9603,8 +9619,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch index b79cc6c13cf7..94e14bea5b42 100644 --- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch +++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/init/Kconfig +++ b/init/Kconfig -@@ -1655,7 +1655,7 @@ config SLAB_FREELIST_HARDENED +@@ -1692,7 +1692,7 @@ config SLAB_FREELIST_HARDENED config SLUB_CPU_PARTIAL default y diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch index c1ea8babe259..7ff4785d749c 100644 --- a/patches/slub-enable-irqs-for-no-wait.patch +++ b/patches/slub-enable-irqs-for-no-wait.patch @@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/mm/slub.c +++ b/mm/slub.c -@@ -1572,10 +1572,17 @@ static struct page *allocate_slab(struct +@@ -1570,10 +1570,17 @@ static struct page *allocate_slab(struct void *start, *p; int idx, order; bool shuffle; @@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> local_irq_enable(); flags |= s->allocflags; -@@ -1634,7 +1641,7 @@ static struct page *allocate_slab(struct +@@ -1632,7 +1639,7 @@ static struct page *allocate_slab(struct page->frozen = 1; out: diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch index 6603614eb0fc..0160ba746f7e 100644 --- a/patches/softirq-disable-softirq-stacks-for-rt.patch +++ b/patches/softirq-disable-softirq-stacks-for-rt.patch @@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c -@@ -745,6 +745,7 @@ void irq_ctx_init(void) +@@ -766,6 +766,7 @@ void irq_ctx_init(void) } } @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void do_softirq_own_stack(void) { struct thread_info *curtp, *irqtp; -@@ -762,6 +763,7 @@ void do_softirq_own_stack(void) +@@ -783,6 +784,7 @@ void do_softirq_own_stack(void) if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S -@@ -41,6 +41,7 @@ +@@ -42,6 +42,7 @@ * We store the saved ksp_limit in the unused part * of the STACK_FRAME_OVERHEAD */ @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) -@@ -57,6 +58,7 @@ +@@ -58,6 +59,7 @@ stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S -@@ -31,6 +31,7 @@ +@@ -32,6 +32,7 @@ .text @@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) -@@ -41,6 +42,7 @@ +@@ -42,6 +43,7 @@ ld r0,16(r1) mtlr r0 blr @@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void fixup_irqs(void) --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -1038,6 +1038,7 @@ EXPORT_SYMBOL(native_load_gs_index) +@@ -1039,6 +1039,7 @@ EXPORT_SYMBOL(native_load_gs_index) jmp 2b .previous @@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp -@@ -1048,6 +1049,7 @@ ENTRY(do_softirq_own_stack) +@@ -1049,6 +1050,7 @@ ENTRY(do_softirq_own_stack) leaveq ret ENDPROC(do_softirq_own_stack) diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch index d42297146937..72f8d80b5669 100644 --- a/patches/softirq-preempt-fix-3-re.patch +++ b/patches/softirq-preempt-fix-3-re.patch @@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -2527,6 +2527,7 @@ static void __netif_reschedule(struct Qd +@@ -2707,6 +2707,7 @@ static void __netif_reschedule(struct Qd sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } void __netif_schedule(struct Qdisc *q) -@@ -2589,6 +2590,7 @@ void __dev_kfree_skb_irq(struct sk_buff +@@ -2769,6 +2770,7 @@ void __dev_kfree_skb_irq(struct sk_buff __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); @@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(__dev_kfree_skb_irq); -@@ -4008,6 +4010,7 @@ static int enqueue_to_backlog(struct sk_ +@@ -4241,6 +4243,7 @@ static int enqueue_to_backlog(struct sk_ rps_unlock(sd); local_irq_restore(flags); @@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); -@@ -5355,12 +5358,14 @@ static void net_rps_action_and_irq_enabl +@@ -5789,12 +5792,14 @@ static void net_rps_action_and_irq_enabl sd->rps_ipi_list = NULL; local_irq_enable(); @@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -5438,6 +5443,7 @@ void __napi_schedule(struct napi_struct +@@ -5872,6 +5877,7 @@ void __napi_schedule(struct napi_struct local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); @@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } EXPORT_SYMBOL(__napi_schedule); -@@ -8796,6 +8802,7 @@ static int dev_cpu_dead(unsigned int old +@@ -9273,6 +9279,7 @@ static int dev_cpu_dead(unsigned int old raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch index 7a9308b932b2..afd0bcff2fcb 100644 --- a/patches/softirq-split-locks.patch +++ b/patches/softirq-split-locks.patch @@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET))) --- a/include/linux/sched.h +++ b/include/linux/sched.h -@@ -1177,6 +1177,8 @@ struct task_struct { +@@ -1189,6 +1189,8 @@ struct task_struct { #endif #ifdef CONFIG_PREEMPT_RT_BASE struct rcu_head put_rcu; @@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; -@@ -1394,6 +1396,7 @@ extern struct pid *cad_pid; +@@ -1386,6 +1388,7 @@ extern struct pid *cad_pid; /* * Per process flags */ diff --git a/patches/srcu-replace-local_irqsave-with-a-locallock.patch b/patches/srcu-replace-local_irqsave-with-a-locallock.patch index d48841781d58..5f44929fbf84 100644 --- a/patches/srcu-replace-local_irqsave-with-a-locallock.patch +++ b/patches/srcu-replace-local_irqsave-with-a-locallock.patch @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c -@@ -37,6 +37,7 @@ +@@ -39,6 +39,7 @@ #include <linux/module.h> #include <linux/srcu.h> #include <linux/cpu.h> @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "rcu.h" #include "rcu_segcblist.h" -@@ -752,6 +753,8 @@ static void srcu_flip(struct srcu_struct +@@ -758,6 +759,8 @@ static void srcu_flip(struct srcu_struct * negligible when amoritized over that time period, and the extra latency * of a needlessly non-expedited grace period is similarly negligible. */ @@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> static bool srcu_might_be_idle(struct srcu_struct *sp) { unsigned long curseq; -@@ -760,13 +763,13 @@ static bool srcu_might_be_idle(struct sr +@@ -766,13 +769,13 @@ static bool srcu_might_be_idle(struct sr unsigned long t; /* If the local srcu_data structure has callbacks, not idle. */ @@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * No local callbacks, so probabalistically probe global state. -@@ -844,7 +847,7 @@ void __call_srcu(struct srcu_struct *sp, +@@ -850,7 +853,7 @@ void __call_srcu(struct srcu_struct *sp, return; } rhp->func = func; @@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> sdp = this_cpu_ptr(sp->sda); spin_lock_rcu_node(sdp); rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false); -@@ -860,7 +863,8 @@ void __call_srcu(struct srcu_struct *sp, +@@ -866,7 +869,8 @@ void __call_srcu(struct srcu_struct *sp, sdp->srcu_gp_seq_needed_exp = s; needexp = true; } diff --git a/patches/srcu-use-cpu_online-instead-custom-check.patch b/patches/srcu-use-cpu_online-instead-custom-check.patch index 5751f93d1840..0b7924b7cd08 100644 --- a/patches/srcu-use-cpu_online-instead-custom-check.patch +++ b/patches/srcu-use-cpu_online-instead-custom-check.patch @@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/rcu/srcutree.c +++ b/kernel/rcu/srcutree.c -@@ -36,6 +36,7 @@ +@@ -38,6 +38,7 @@ #include <linux/delay.h> #include <linux/module.h> #include <linux/srcu.h> @@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> #include "rcu.h" #include "rcu_segcblist.h" -@@ -456,21 +457,6 @@ static void srcu_gp_start(struct srcu_st +@@ -459,21 +460,6 @@ static void srcu_gp_start(struct srcu_st } /* @@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * Place the workqueue handler on the specified CPU if online, otherwise * just run it whereever. This is useful for placing workqueue handlers * that are to invoke the specified CPU's callbacks. -@@ -481,12 +467,12 @@ static bool srcu_queue_delayed_work_on(i +@@ -484,12 +470,12 @@ static bool srcu_queue_delayed_work_on(i { bool ret; @@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c -@@ -3732,8 +3732,6 @@ int rcutree_online_cpu(unsigned int cpu) +@@ -3767,8 +3767,6 @@ int rcutree_online_cpu(unsigned int cpu) rnp->ffmask |= rdp->grpmask; raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } @@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE) return 0; /* Too early in boot for scheduler work. */ sync_sched_exp_online_cleanup(cpu); -@@ -3761,8 +3759,6 @@ int rcutree_offline_cpu(unsigned int cpu +@@ -3796,8 +3794,6 @@ int rcutree_offline_cpu(unsigned int cpu } rcutree_affinity_setting(cpu, cpu); diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch index 67afb9fa5096..6ab104ae7598 100644 --- a/patches/timekeeping-split-jiffies-lock.patch +++ b/patches/timekeeping-split-jiffies-lock.patch @@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c -@@ -2362,8 +2362,10 @@ EXPORT_SYMBOL(hardpps); +@@ -2415,8 +2415,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch index 3a5cb4f945b2..e8682c83ae64 100644 --- a/patches/timers-prepare-for-full-preemption.patch +++ b/patches/timers-prepare-for-full-preemption.patch @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> # define del_timer_sync(t) del_timer(t) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -503,11 +503,14 @@ void resched_cpu(int cpu) +@@ -497,11 +497,14 @@ void resched_cpu(int cpu) */ int get_nohz_timer_target(void) { @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); for_each_domain(cpu, sd) { -@@ -526,6 +529,8 @@ int get_nohz_timer_target(void) +@@ -520,6 +523,8 @@ int get_nohz_timer_target(void) cpu = housekeeping_any_cpu(HK_FLAG_TIMER); unlock: rcu_read_unlock(); @@ -91,8 +91,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + return; + + base = get_timer_base(tf); -+ swait_event(base->wait_for_running_timer, -+ base->running_timer != timer); ++ swait_event_exclusive(base->wait_for_running_timer, ++ base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch index 697364e0ac60..f883ac294576 100644 --- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch +++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c -@@ -3221,10 +3221,8 @@ void serial8250_console_write(struct uar +@@ -3239,10 +3239,8 @@ void serial8250_console_write(struct uar serial8250_rpm_get(up); diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch index beea5a401a82..c4921b8ed32b 100644 --- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch +++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch @@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/net/core/dev.c +++ b/net/core/dev.c -@@ -4232,7 +4232,7 @@ static int netif_rx_internal(struct sk_b +@@ -4465,7 +4465,7 @@ static int netif_rx_internal(struct sk_b struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -4242,14 +4242,14 @@ static int netif_rx_internal(struct sk_b +@@ -4475,14 +4475,14 @@ static int netif_rx_internal(struct sk_b ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); diff --git a/patches/work-simple-Simple-work-queue-implemenation.patch b/patches/work-simple-Simple-work-queue-implemenation.patch index 67b845a29302..3aa8f7b5a665 100644 --- a/patches/work-simple-Simple-work-queue-implemenation.patch +++ b/patches/work-simple-Simple-work-queue-implemenation.patch @@ -51,7 +51,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> -obj-y += wait.o wait_bit.o swait.o completion.o +obj-y += wait.o wait_bit.o swait.o swork.o completion.o - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o --- /dev/null +++ b/kernel/sched/swork.c @@ -104,8 +104,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> + struct sworker *worker = arg; + + for (;;) { -+ swait_event_interruptible(worker->wq, -+ swork_readable(worker)); ++ swait_event_interruptible_exclusive(worker->wq, ++ swork_readable(worker)); + if (kthread_should_stop()) + break; + @@ -175,7 +175,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de> + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock_irqrestore(&glob_worker->lock, flags); + -+ swake_up(&glob_worker->wq); ++ swake_up_one(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch index 5771ad67d29b..b976f3198b6e 100644 --- a/patches/workqueue-distangle-from-rq-lock.patch +++ b/patches/workqueue-distangle-from-rq-lock.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1723,10 +1723,6 @@ static inline void ttwu_activate(struct +@@ -1703,10 +1703,6 @@ static inline void ttwu_activate(struct { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } /* -@@ -2167,56 +2163,6 @@ try_to_wake_up(struct task_struct *p, un +@@ -2143,56 +2139,6 @@ try_to_wake_up(struct task_struct *p, un } /** @@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * wake_up_process - Wake up a specific process * @p: The process to be woken up. * -@@ -3542,21 +3488,6 @@ static void __sched notrace __schedule(b +@@ -3519,21 +3465,6 @@ static void __sched notrace __schedule(b atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } @@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } switch_count = &prev->nvcsw; } -@@ -3616,6 +3547,20 @@ static inline void sched_submit_work(str +@@ -3593,6 +3524,20 @@ static inline void sched_submit_work(str { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; @@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -3624,6 +3569,12 @@ static inline void sched_submit_work(str +@@ -3601,6 +3546,12 @@ static inline void sched_submit_work(str blk_schedule_flush_plug(tsk); } @@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; -@@ -3634,6 +3585,7 @@ asmlinkage __visible void __sched schedu +@@ -3611,6 +3562,7 @@ asmlinkage __visible void __sched schedu __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch index ebc2c54051ff..942f2b1d6dbf 100644 --- a/patches/workqueue-prevent-deadlock-stall.patch +++ b/patches/workqueue-prevent-deadlock-stall.patch @@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3591,9 +3591,8 @@ void __noreturn do_task_dead(void) +@@ -3568,9 +3568,8 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { @@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. -@@ -3607,6 +3606,9 @@ static inline void sched_submit_work(str +@@ -3584,6 +3583,9 @@ static inline void sched_submit_work(str preempt_enable_no_resched(); } diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch index 11f5372a4efa..22fc8a75478d 100644 --- a/patches/x86-kvm-require-const-tsc-for-rt.patch +++ b/patches/x86-kvm-require-const-tsc-for-rt.patch @@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c -@@ -6605,6 +6605,13 @@ int kvm_arch_init(void *opaque) +@@ -6690,6 +6690,13 @@ int kvm_arch_init(void *opaque) goto out; } diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch index d13edf92d5b7..0ef3d80ef781 100644 --- a/patches/x86-preempt-lazy.patch +++ b/patches/x86-preempt-lazy.patch @@ -17,12 +17,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -178,6 +178,7 @@ config X86 +@@ -180,6 +180,7 @@ config X86 select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE + select HAVE_RCU_TABLE_FREE if PARAVIRT select HAVE_RCU_TABLE_INVALIDATE if HAVE_RCU_TABLE_FREE select HAVE_REGS_AND_STACK_ACCESS_API --- a/arch/x86/entry/common.c @@ -47,16 +47,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef ARCH_RT_DELAYS_SIGNAL_SEND --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S -@@ -350,8 +350,25 @@ END(ret_from_exception) +@@ -764,8 +764,25 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) .Lneed_resched: + # preempt count == 0 + NEED_RS set? cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all + jnz restore_all_kernel +#else -+ jz test_int_off ++ jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) @@ -68,14 +68,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> + + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) + jz restore_all -+test_int_off: ++ test_int_off: +#endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all + jz restore_all_kernel call preempt_schedule_irq --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S -@@ -704,7 +704,23 @@ GLOBAL(swapgs_restore_regs_and_return_to +@@ -705,7 +705,23 @@ GLOBAL(swapgs_restore_regs_and_return_to btl $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch index 2bc450e30145..0fe3da4e8a56 100644 --- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch +++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch @@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig -@@ -262,8 +262,11 @@ config ARCH_MAY_HAVE_PC_FDC +@@ -264,8 +264,11 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API |