diff options
author | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-03-23 21:59:07 +0100 |
---|---|---|
committer | Sebastian Andrzej Siewior <bigeasy@linutronix.de> | 2018-03-23 21:59:07 +0100 |
commit | 8a2b387bda61abe4c07f1e3b660223c5784be942 (patch) | |
tree | e8628f4085638040ece4ee1436fefbd04eb74adc | |
parent | 1e708fa30b6273f9d509adf51643023840de7a46 (diff) | |
download | linux-rt-8a2b387bda61abe4c07f1e3b660223c5784be942.tar.gz |
[ANNOUNCE] v4.14.29-rt25v4.14.29-rt25-patches
Dear RT folks!
I'm pleased to announce the v4.14.29-rt25 patch set.
Changes since v4.14.29-rt24:
- There are checks for disabled interrupts in the target code which do
not work on RT. Reported by Arnaldo Carvalho de Melo.
Known issues
- A warning triggered in "rcu_note_context_switch" originated from
SyS_timer_gettime(). The issue was always there, it is now
visible. Reported by Grygorii Strashko and Daniel Wagner.
The delta patch against v4.14.29-rt24 is appended below and can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/incr/patch-4.14.29-rt24-rt25.patch.xz
You can get this release via the git tree at:
git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.14.29-rt25
The RT patch against v4.14.29 can be found here:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patch-4.14.29-rt25.patch.xz
The split quilt queue is available at:
https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.14/older/patches-4.14.29-rt25.tar.xz
Sebastian
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
33 files changed, 156 insertions, 95 deletions
diff --git a/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch b/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch index 927cc0fa7d2d..cf7687a21e86 100644 --- a/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch +++ b/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch @@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; -@@ -1418,7 +1418,7 @@ static void mac80211_hwsim_stop(struct i +@@ -1423,7 +1423,7 @@ static void mac80211_hwsim_stop(struct i { struct mac80211_hwsim_data *data = hw->priv; data->started = false; @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> wiphy_debug(hw->wiphy, "%s\n", __func__); } -@@ -1541,14 +1541,12 @@ static enum hrtimer_restart +@@ -1546,14 +1546,12 @@ static enum hrtimer_restart mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_data *data = @@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, -@@ -1560,11 +1558,9 @@ mac80211_hwsim_beacon(struct hrtimer *ti +@@ -1565,11 +1563,9 @@ mac80211_hwsim_beacon(struct hrtimer *ti data->bcn_delta = 0; } @@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static const char * const hwsim_chanwidths[] = { -@@ -1638,15 +1634,15 @@ static int mac80211_hwsim_config(struct +@@ -1643,15 +1639,15 @@ static int mac80211_hwsim_config(struct mutex_unlock(&data->mutex); if (!data->started || !data->beacon_int) @@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } return 0; -@@ -1709,7 +1705,7 @@ static void mac80211_hwsim_bss_info_chan +@@ -1714,7 +1710,7 @@ static void mac80211_hwsim_bss_info_chan info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && @@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; -@@ -1717,9 +1713,9 @@ static void mac80211_hwsim_bss_info_chan +@@ -1722,9 +1718,9 @@ static void mac80211_hwsim_bss_info_chan tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); @@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( -@@ -1728,7 +1724,7 @@ static void mac80211_hwsim_bss_info_chan +@@ -1733,7 +1729,7 @@ static void mac80211_hwsim_bss_info_chan wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { @@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> data->beacon_int = 0; } } -@@ -2720,9 +2716,9 @@ static int mac80211_hwsim_new_radio(stru +@@ -2725,9 +2721,9 @@ static int mac80211_hwsim_new_radio(stru data->debugfs, data, &hwsim_simulate_radar); diff --git a/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch index 86486d57504e..126376ba282b 100644 --- a/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch +++ b/patches/RCU-skip-the-schedule-in-RCU-section-warning-on-UP-t.patch @@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> WARN_ON_ONCE(!preempt && t->rcu_read_lock_nesting > 0 && !mg_counter); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -7019,4 +7019,49 @@ void migrate_enable(void) +@@ -7020,4 +7020,49 @@ void migrate_enable(void) preempt_enable(); } EXPORT_SYMBOL(migrate_enable); diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch index 0b915a8cef33..d6b48f1faa2a 100644 --- a/patches/add_migrate_disable.patch +++ b/patches/add_migrate_disable.patch @@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable() * boot command line: --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1022,7 +1022,15 @@ void set_cpus_allowed_common(struct task +@@ -1023,7 +1023,15 @@ void set_cpus_allowed_common(struct task p->nr_cpus_allowed = cpumask_weight(new_mask); } @@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable() { struct rq *rq = task_rq(p); bool queued, running; -@@ -1051,6 +1059,20 @@ void do_set_cpus_allowed(struct task_str +@@ -1052,6 +1060,20 @@ void do_set_cpus_allowed(struct task_str set_curr_task(rq, p); } @@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable() /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on -@@ -1109,9 +1131,16 @@ static int __set_cpus_allowed_ptr(struct +@@ -1110,9 +1132,16 @@ static int __set_cpus_allowed_ptr(struct } /* Can the task run on the task's current CPU? If so, we're done */ @@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable() dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); if (task_running(rq, p) || p->state == TASK_WAKING) { struct migration_arg arg = { p, dest_cpu }; -@@ -6759,3 +6788,100 @@ const u32 sched_prio_to_wmult[40] = { +@@ -6760,3 +6789,100 @@ const u32 sched_prio_to_wmult[40] = { /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717, /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153, }; diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch index 30a21eacfc7c..b47f83d8b70d 100644 --- a/patches/completion-use-simple-wait-queues.patch +++ b/patches/completion-use-simple-wait-queues.patch @@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> break; --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c -@@ -1610,7 +1610,7 @@ static void ffs_data_put(struct ffs_data +@@ -1609,7 +1609,7 @@ static void ffs_data_put(struct ffs_data pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || @@ -290,7 +290,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> EXPORT_SYMBOL(completion_done); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6816,7 +6816,10 @@ void migrate_disable(void) +@@ -6817,7 +6817,10 @@ void migrate_disable(void) return; } #ifdef CONFIG_SCHED_DEBUG @@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #endif if (p->migrate_disable) { -@@ -6846,7 +6849,10 @@ void migrate_enable(void) +@@ -6847,7 +6850,10 @@ void migrate_enable(void) } #ifdef CONFIG_SCHED_DEBUG diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch index cd94d0a92fe3..e617510f33d6 100644 --- a/patches/cond-resched-softirq-rt.patch +++ b/patches/cond-resched-softirq-rt.patch @@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> { --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -4945,6 +4945,7 @@ int __cond_resched_lock(spinlock_t *lock +@@ -4946,6 +4946,7 @@ int __cond_resched_lock(spinlock_t *lock } EXPORT_SYMBOL(__cond_resched_lock); @@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); -@@ -4958,6 +4959,7 @@ int __sched __cond_resched_softirq(void) +@@ -4959,6 +4960,7 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); diff --git a/patches/fs-dcache-bringt-back-explicit-INIT_HLIST_BL_HEAD-in.patch b/patches/fs-dcache-bringt-back-explicit-INIT_HLIST_BL_HEAD-in.patch index 574dd4c6aee9..f9165c560e69 100644 --- a/patches/fs-dcache-bringt-back-explicit-INIT_HLIST_BL_HEAD-in.patch +++ b/patches/fs-dcache-bringt-back-explicit-INIT_HLIST_BL_HEAD-in.patch @@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -3576,6 +3576,8 @@ static int __init set_dhash_entries(char +@@ -3581,6 +3581,8 @@ static int __init set_dhash_entries(char static void __init dcache_init_early(void) { @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* If hashes are distributed across NUMA nodes, defer * hash allocation until vmalloc space is available. */ -@@ -3592,10 +3594,14 @@ static void __init dcache_init_early(voi +@@ -3597,10 +3599,14 @@ static void __init dcache_init_early(voi &d_hash_mask, 0, 0); @@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature -@@ -3618,6 +3624,10 @@ static void __init dcache_init(void) +@@ -3623,6 +3629,10 @@ static void __init dcache_init(void) &d_hash_mask, 0, 0); diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch index 68808ecef1a1..b7ecd0e976ca 100644 --- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch +++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2405,9 +2405,10 @@ EXPORT_SYMBOL(d_rehash); +@@ -2410,9 +2410,10 @@ EXPORT_SYMBOL(d_rehash); static inline unsigned start_dir_add(struct inode *dir) { @@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return n; cpu_relax(); } -@@ -2415,7 +2416,8 @@ static inline unsigned start_dir_add(str +@@ -2420,7 +2421,8 @@ static inline unsigned start_dir_add(str static inline void end_dir_add(struct inode *dir, unsigned n) { @@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> } static void d_wait_lookup(struct dentry *dentry) -@@ -2448,7 +2450,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2453,7 +2455,7 @@ struct dentry *d_alloc_parallel(struct d retry: rcu_read_lock(); @@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> r_seq = read_seqbegin(&rename_lock); dentry = __d_lookup_rcu(parent, name, &d_seq); if (unlikely(dentry)) { -@@ -2470,7 +2472,7 @@ struct dentry *d_alloc_parallel(struct d +@@ -2475,7 +2477,7 @@ struct dentry *d_alloc_parallel(struct d goto retry; } hlist_bl_lock(b); diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch index 8f93cfbc89ed..25edf498947a 100644 --- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch +++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch @@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #include <linux/slab.h> #include <linux/init.h> #include <linux/hash.h> -@@ -784,6 +785,8 @@ static inline bool fast_dput(struct dent +@@ -789,6 +790,8 @@ static inline bool fast_dput(struct dent */ void dput(struct dentry *dentry) { @@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (unlikely(!dentry)) return; -@@ -820,9 +823,18 @@ void dput(struct dentry *dentry) +@@ -825,9 +828,18 @@ void dput(struct dentry *dentry) return; kill_it: @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> goto repeat; } } -@@ -2360,7 +2372,7 @@ void d_delete(struct dentry * dentry) +@@ -2365,7 +2377,7 @@ void d_delete(struct dentry * dentry) if (dentry->d_lockref.count == 1) { if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch index 3ed590283e72..9742763a8449 100644 --- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch +++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch @@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/fs/dcache.c +++ b/fs/dcache.c -@@ -2434,21 +2434,24 @@ static inline void end_dir_add(struct in +@@ -2439,21 +2439,24 @@ static inline void end_dir_add(struct in static void d_wait_lookup(struct dentry *dentry) { @@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); -@@ -2557,7 +2560,7 @@ void __d_lookup_done(struct dentry *dent +@@ -2562,7 +2565,7 @@ void __d_lookup_done(struct dentry *dent hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); @@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* --- a/fs/namei.c +++ b/fs/namei.c -@@ -1628,7 +1628,7 @@ static struct dentry *lookup_slow(const +@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const { struct dentry *dentry = ERR_PTR(-ENOENT), *old; struct inode *inode = dir->d_inode; @@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> inode_lock_shared(inode); /* Don't go there if it's already dead */ -@@ -3101,7 +3101,7 @@ static int lookup_open(struct nameidata +@@ -3099,7 +3099,7 @@ static int lookup_open(struct nameidata struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch index 5034b3586616..824a043105d4 100644 --- a/patches/futex-requeue-pi-fix.patch +++ b/patches/futex-requeue-pi-fix.patch @@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -1742,6 +1743,35 @@ int __rt_mutex_start_proxy_lock(struct r +@@ -1743,6 +1744,35 @@ int __rt_mutex_start_proxy_lock(struct r if (try_to_take_rt_mutex(lock, task, NULL)) return 1; diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch index bd8b9f91d82a..c41794c2aaef 100644 --- a/patches/hotplug-light-get-online-cpus.patch +++ b/patches/hotplug-light-get-online-cpus.patch @@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void cpus_read_lock(void) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6914,6 +6914,7 @@ void migrate_disable(void) +@@ -6915,6 +6915,7 @@ void migrate_disable(void) } preempt_disable(); @@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> migrate_disable_update_cpus_allowed(p); p->migrate_disable = 1; -@@ -6979,12 +6980,15 @@ void migrate_enable(void) +@@ -6980,12 +6981,15 @@ void migrate_enable(void) arg.task = p; arg.dest_cpu = dest_cpu; diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch index 2ddec9c9b276..8a8a338c84f4 100644 --- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch +++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch @@ -344,7 +344,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * parent) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -959,7 +959,7 @@ static struct rq *__migrate_task(struct +@@ -960,7 +960,7 @@ static struct rq *__migrate_task(struct } /* Affinity changed (again). */ @@ -353,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return rq; update_rq_clock(rq); -@@ -987,7 +987,7 @@ static int migration_cpu_stop(void *data +@@ -988,7 +988,7 @@ static int migration_cpu_stop(void *data local_irq_disable(); /* * We need to explicitly wake pending tasks before running @@ -362,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test. */ sched_ttwu_pending(); -@@ -1018,7 +1018,7 @@ static int migration_cpu_stop(void *data +@@ -1019,7 +1019,7 @@ static int migration_cpu_stop(void *data */ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) { @@ -371,7 +371,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> p->nr_cpus_allowed = cpumask_weight(new_mask); } -@@ -1088,7 +1088,7 @@ static int __set_cpus_allowed_ptr(struct +@@ -1089,7 +1089,7 @@ static int __set_cpus_allowed_ptr(struct goto out; } @@ -380,7 +380,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; if (!cpumask_intersects(new_mask, cpu_valid_mask)) { -@@ -1249,10 +1249,10 @@ static int migrate_swap_stop(void *data) +@@ -1250,10 +1250,10 @@ static int migrate_swap_stop(void *data) if (task_cpu(arg->src_task) != arg->src_cpu) goto unlock; @@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto unlock; __migrate_swap_task(arg->src_task, arg->dst_cpu); -@@ -1293,10 +1293,10 @@ int migrate_swap(struct task_struct *cur +@@ -1294,10 +1294,10 @@ int migrate_swap(struct task_struct *cur if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu)) goto out; @@ -406,7 +406,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> goto out; trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu); -@@ -1440,7 +1440,7 @@ void kick_process(struct task_struct *p) +@@ -1441,7 +1441,7 @@ void kick_process(struct task_struct *p) EXPORT_SYMBOL_GPL(kick_process); /* @@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * * A few notes on cpu_active vs cpu_online: * -@@ -1480,14 +1480,14 @@ static int select_fallback_rq(int cpu, s +@@ -1481,14 +1481,14 @@ static int select_fallback_rq(int cpu, s for_each_cpu(dest_cpu, nodemask) { if (!cpu_active(dest_cpu)) continue; @@ -432,7 +432,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu)) continue; if (!cpu_online(dest_cpu)) -@@ -1532,7 +1532,7 @@ static int select_fallback_rq(int cpu, s +@@ -1533,7 +1533,7 @@ static int select_fallback_rq(int cpu, s } /* @@ -441,7 +441,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ static inline int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) -@@ -1542,11 +1542,11 @@ int select_task_rq(struct task_struct *p +@@ -1543,11 +1543,11 @@ int select_task_rq(struct task_struct *p if (p->nr_cpus_allowed > 1) cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); else @@ -455,7 +455,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * CPU. * * Since this is common to all placement strategies, this lives here. -@@ -1554,7 +1554,7 @@ int select_task_rq(struct task_struct *p +@@ -1555,7 +1555,7 @@ int select_task_rq(struct task_struct *p * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ @@ -464,7 +464,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); -@@ -2444,7 +2444,7 @@ void wake_up_new_task(struct task_struct +@@ -2445,7 +2445,7 @@ void wake_up_new_task(struct task_struct #ifdef CONFIG_SMP /* * Fork balancing, do it here and not earlier because: @@ -473,7 +473,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> * - any previously selected CPU might disappear through hotplug * * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq, -@@ -4161,7 +4161,7 @@ static int __sched_setscheduler(struct t +@@ -4162,7 +4162,7 @@ static int __sched_setscheduler(struct t * the entire root_domain to become SCHED_DEADLINE. We * will also fail if there's no bandwidth available. */ @@ -482,7 +482,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> rq->rd->dl_bw.bw == 0) { task_rq_unlock(rq, p, &rf); return -EPERM; -@@ -4755,7 +4755,7 @@ long sched_getaffinity(pid_t pid, struct +@@ -4756,7 +4756,7 @@ long sched_getaffinity(pid_t pid, struct goto out_unlock; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -491,7 +491,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> raw_spin_unlock_irqrestore(&p->pi_lock, flags); out_unlock: -@@ -5320,7 +5320,7 @@ int task_can_attach(struct task_struct * +@@ -5321,7 +5321,7 @@ int task_can_attach(struct task_struct * * allowed nodes is unnecessary. Thus, cpusets are not * applicable for such threads. This prevents checking for * success of set_cpus_allowed_ptr() on all attached tasks @@ -500,7 +500,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> */ if (p->flags & PF_NO_SETAFFINITY) { ret = -EINVAL; -@@ -5347,7 +5347,7 @@ int migrate_task_to(struct task_struct * +@@ -5348,7 +5348,7 @@ int migrate_task_to(struct task_struct * if (curr_cpu == target_cpu) return 0; @@ -509,7 +509,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return -EINVAL; /* TODO: This is not properly updating schedstats */ -@@ -5484,7 +5484,7 @@ static void migrate_tasks(struct rq *dea +@@ -5485,7 +5485,7 @@ static void migrate_tasks(struct rq *dea put_prev_task(rq, next); /* diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch index 94b0c11eef28..be104163a798 100644 --- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch +++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch @@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> security_task_free(tsk); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2705,15 +2705,6 @@ static struct rq *finish_task_switch(str +@@ -2706,15 +2706,6 @@ static struct rq *finish_task_switch(str if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); diff --git a/patches/localversion.patch b/patches/localversion.patch index 619fa3040f83..ad0785c83de1 100644 --- a/patches/localversion.patch +++ b/patches/localversion.patch @@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ -+-rt24 ++-rt25 diff --git a/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch b/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch index 3cbe9b669004..a149a5a800db 100644 --- a/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch +++ b/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch @@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c -@@ -1765,7 +1765,6 @@ int __rt_mutex_start_proxy_lock(struct r +@@ -1766,7 +1766,6 @@ int __rt_mutex_start_proxy_lock(struct r raw_spin_lock(&task->pi_lock); if (task->pi_blocked_on) { raw_spin_unlock(&task->pi_lock); diff --git a/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch index c295c4008c61..810fe38374a8 100644 --- a/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch +++ b/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch @@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c -@@ -2272,6 +2272,14 @@ void rt_mutex_init_proxy_locked(struct r +@@ -2273,6 +2273,14 @@ void rt_mutex_init_proxy_locked(struct r struct task_struct *proxy_owner) { __rt_mutex_init(lock, NULL, NULL); diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch index 5c3f1213ca97..ec91b73dd0a2 100644 --- a/patches/preempt-lazy-support.patch +++ b/patches/preempt-lazy-support.patch @@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); -@@ -2444,6 +2486,9 @@ int sched_fork(unsigned long clone_flags +@@ -2445,6 +2487,9 @@ int sched_fork(unsigned long clone_flags p->on_cpu = 0; #endif init_task_preempt_count(p); @@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -3361,6 +3406,7 @@ static void __sched notrace __schedule(b +@@ -3362,6 +3407,7 @@ static void __sched notrace __schedule(b next = pick_next_task(rq, prev, &rf); clear_tsk_need_resched(prev); @@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> clear_preempt_need_resched(); if (likely(prev != next)) { -@@ -3551,6 +3597,30 @@ static void __sched notrace preempt_sche +@@ -3552,6 +3598,30 @@ static void __sched notrace preempt_sche } while (need_resched()); } @@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption -@@ -3565,7 +3635,8 @@ asmlinkage __visible void __sched notrac +@@ -3566,7 +3636,8 @@ asmlinkage __visible void __sched notrac */ if (likely(!preemptible())) return; @@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); -@@ -3592,6 +3663,9 @@ asmlinkage __visible void __sched notrac +@@ -3593,6 +3664,9 @@ asmlinkage __visible void __sched notrac if (likely(!preemptible())) return; @@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> do { /* * Because the function tracer can trace preempt_count_sub() -@@ -5331,7 +5405,9 @@ void init_idle(struct task_struct *idle, +@@ -5332,7 +5406,9 @@ void init_idle(struct task_struct *idle, /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); @@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * The idle tasks have their own, simple scheduling class: */ -@@ -6887,6 +6963,7 @@ void migrate_disable(void) +@@ -6888,6 +6964,7 @@ void migrate_disable(void) } preempt_disable(); @@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> pin_current_cpu(); migrate_disable_update_cpus_allowed(p); -@@ -6954,6 +7031,7 @@ void migrate_enable(void) +@@ -6955,6 +7032,7 @@ void migrate_enable(void) arg.dest_cpu = dest_cpu; unpin_current_cpu(); @@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> preempt_enable(); stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); tlb_migrate_finish(p->mm); -@@ -6962,6 +7040,7 @@ void migrate_enable(void) +@@ -6963,6 +7041,7 @@ void migrate_enable(void) } } unpin_current_cpu(); diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch index fb5b064c4713..04f4b2dde53c 100644 --- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch +++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch @@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> spin_unlock_irq(&task->sighand->siglock); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1358,6 +1358,18 @@ int migrate_swap(struct task_struct *cur +@@ -1359,6 +1359,18 @@ int migrate_swap(struct task_struct *cur return ret; } @@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * wait_task_inactive - wait for a thread to unschedule. * -@@ -1402,7 +1414,7 @@ unsigned long wait_task_inactive(struct +@@ -1403,7 +1415,7 @@ unsigned long wait_task_inactive(struct * is actually now running somewhere else! */ while (task_running(rq, p)) { @@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> return 0; cpu_relax(); } -@@ -1417,7 +1429,8 @@ unsigned long wait_task_inactive(struct +@@ -1418,7 +1430,8 @@ unsigned long wait_task_inactive(struct running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch index 6dbb90f1fd62..e5441486d3c2 100644 --- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch +++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch @@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6850,6 +6850,47 @@ const u32 sched_prio_to_wmult[40] = { +@@ -6851,6 +6851,47 @@ const u32 sched_prio_to_wmult[40] = { #if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP) @@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> void migrate_disable(void) { struct task_struct *p = current; -@@ -6873,10 +6914,9 @@ void migrate_disable(void) +@@ -6874,10 +6915,9 @@ void migrate_disable(void) } preempt_disable(); @@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> preempt_enable(); } -@@ -6908,9 +6948,8 @@ void migrate_enable(void) +@@ -6909,9 +6949,8 @@ void migrate_enable(void) preempt_disable(); diff --git a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch index 4ebc55416c10..1673b3cfdb46 100644 --- a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch +++ b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch @@ -21,7 +21,7 @@ Cc: stable-rt@vger.kernel.org --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c -@@ -1728,7 +1728,7 @@ int __rt_mutex_start_proxy_lock(struct r +@@ -1729,7 +1729,7 @@ int __rt_mutex_start_proxy_lock(struct r ret = 0; } diff --git a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch index 9aee46801404..612a8b8b1e71 100644 --- a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch +++ b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch @@ -282,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> rt_mutex_slowlock); if (ret) mutex_release(&lock->dep_map, 1, _RET_IP_); -@@ -2266,7 +2414,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m +@@ -2267,7 +2415,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m raw_spin_lock_irq(&lock->wait_lock); /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); @@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. -@@ -2350,3 +2498,99 @@ bool rt_mutex_cleanup_proxy_lock(struct +@@ -2351,3 +2499,99 @@ bool rt_mutex_cleanup_proxy_lock(struct return cleanup; } diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch index f505d221ebde..93f554941463 100644 --- a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch +++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch @@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2206,7 +2206,7 @@ EXPORT_SYMBOL(wake_up_process); +@@ -2207,7 +2207,7 @@ EXPORT_SYMBOL(wake_up_process); */ int wake_up_lock_sleeper(struct task_struct *p) { diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch index 8d9175cbc9c8..20068ac44695 100644 --- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch +++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* Internal to kernel */ --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -6049,7 +6049,7 @@ void __init sched_init(void) +@@ -6050,7 +6050,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch index f4e3e295f320..d6192782efcc 100644 --- a/patches/sched-mmdrop-delayed.patch +++ b/patches/sched-mmdrop-delayed.patch @@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> VM_BUG_ON(atomic_read(&mm->mm_users)); --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2695,8 +2695,12 @@ static struct rq *finish_task_switch(str +@@ -2696,8 +2696,12 @@ static struct rq *finish_task_switch(str finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); @@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); -@@ -5433,6 +5437,8 @@ void sched_setnuma(struct task_struct *p +@@ -5434,6 +5438,8 @@ void sched_setnuma(struct task_struct *p #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU @@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * Ensure that the idle task is using init_mm right before its CPU goes * offline. -@@ -5447,7 +5453,12 @@ void idle_task_exit(void) +@@ -5448,7 +5454,12 @@ void idle_task_exit(void) switch_mm(mm, &init_mm, current); finish_arch_post_lock_switch(); } @@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -5750,6 +5761,10 @@ int sched_cpu_dying(unsigned int cpu) +@@ -5751,6 +5762,10 @@ int sched_cpu_dying(unsigned int cpu) update_max_interval(); nohz_balance_exit_idle(cpu); hrtick_clear(rq); diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch index 5e5735f43be2..16e061834480 100644 --- a/patches/sched-rt-mutex-wakeup.patch +++ b/patches/sched-rt-mutex-wakeup.patch @@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> #ifdef CONFIG_SMP --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2015,8 +2015,25 @@ try_to_wake_up(struct task_struct *p, un +@@ -2016,8 +2016,25 @@ try_to_wake_up(struct task_struct *p, un */ raw_spin_lock_irqsave(&p->pi_lock, flags); smp_mb__after_spinlock(); @@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> trace_sched_waking(p); -@@ -2180,6 +2197,18 @@ int wake_up_process(struct task_struct * +@@ -2181,6 +2198,18 @@ int wake_up_process(struct task_struct * } EXPORT_SYMBOL(wake_up_process); diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch index ba04c81fd85c..eaf5e7294227 100644 --- a/patches/sched-ttwu-ensure-success-return-is-correct.patch +++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch @@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -2022,8 +2022,10 @@ try_to_wake_up(struct task_struct *p, un +@@ -2023,8 +2023,10 @@ try_to_wake_up(struct task_struct *p, un * if the wakeup condition is true. */ if (!(wake_flags & WF_LOCK_SLEEPER)) { diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch index aaa80817e8c5..fd96248b4745 100644 --- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch +++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch @@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3387,8 +3387,10 @@ static void __sched notrace __schedule(b +@@ -3388,8 +3388,10 @@ static void __sched notrace __schedule(b * If a worker went to sleep, notify and ask workqueue * whether it wants to wake up a task to maintain * concurrency. diff --git a/patches/series b/patches/series index 1a4f56f00e87..c6d1b07bedc2 100644 --- a/patches/series +++ b/patches/series @@ -108,6 +108,7 @@ kernel-SRCU-provide-a-static-initializer.patch ############################################################ # Submitted on LKML ############################################################ +target-drop-spin_lock_assert-irqs_disabled-combo-che.patch ############################################################ # Submitted to net-dev diff --git a/patches/target-drop-spin_lock_assert-irqs_disabled-combo-che.patch b/patches/target-drop-spin_lock_assert-irqs_disabled-combo-che.patch new file mode 100644 index 000000000000..27ae736e1f18 --- /dev/null +++ b/patches/target-drop-spin_lock_assert-irqs_disabled-combo-che.patch @@ -0,0 +1,60 @@ +From: "bigeasy@linutronix.de" <bigeasy@linutronix.de> +Date: Fri, 23 Mar 2018 18:17:36 +0100 +Subject: [PATCH] target: drop spin_lock_assert() + irqs_disabled() combo + checks + +There are a few functions which check for if the lock is held +(spin_lock_assert()) and the interrupts are disabled (irqs_disabled()). +>From looking at the code, each function is static, the caller is near by +and does spin_lock_irq|safe(). As Linus puts it: + +|It's not like this is some function that is exported to random users, +|and we should check that the calling convention is right. +| +|This looks like "it may have been useful during coding to document +|things, but it's not useful long-term". + +Remove those checks. + +Reviewed-by: Bart Van Assche <bart.vanassche@wdc.com> +Reported-by: Arnaldo Carvalho de Melo <acme@kernel.org> +Suggested-by: Linus Torvalds <torvalds@linux-foundation.org> +Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> +--- + drivers/target/target_core_tmr.c | 2 -- + drivers/target/target_core_transport.c | 6 ------ + 2 files changed, 8 deletions(-) + +--- a/drivers/target/target_core_tmr.c ++++ b/drivers/target/target_core_tmr.c +@@ -114,8 +114,6 @@ static bool __target_check_io_state(stru + { + struct se_session *sess = se_cmd->se_sess; + +- assert_spin_locked(&sess->sess_cmd_lock); +- WARN_ON_ONCE(!irqs_disabled()); + /* + * If command already reached CMD_T_COMPLETE state within + * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown, +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -2966,9 +2966,6 @@ static bool + __acquires(&cmd->t_state_lock) + { + +- assert_spin_locked(&cmd->t_state_lock); +- WARN_ON_ONCE(!irqs_disabled()); +- + if (fabric_stop) + cmd->transport_state |= CMD_T_FABRIC_STOP; + +@@ -3238,9 +3235,6 @@ static int __transport_check_aborted_sta + { + int ret; + +- assert_spin_locked(&cmd->t_state_lock); +- WARN_ON_ONCE(!irqs_disabled()); +- + if (!(cmd->transport_state & CMD_T_ABORTED)) + return 0; + /* diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch index 71e266faa00b..f6d247e5f17f 100644 --- a/patches/timers-prepare-for-full-preemption.patch +++ b/patches/timers-prepare-for-full-preemption.patch @@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> # define del_timer_sync(t) del_timer(t) --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -522,11 +522,14 @@ void resched_cpu(int cpu) +@@ -523,11 +523,14 @@ void resched_cpu(int cpu) */ int get_nohz_timer_target(void) { @@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> rcu_read_lock(); for_each_domain(cpu, sd) { -@@ -545,6 +548,8 @@ int get_nohz_timer_target(void) +@@ -546,6 +549,8 @@ int get_nohz_timer_target(void) cpu = housekeeping_any_cpu(); unlock: rcu_read_unlock(); diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch index 36d8712714e6..d52134f1eec8 100644 --- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch +++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch @@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3604,7 +3604,16 @@ asmlinkage __visible void __sched notrac +@@ -3605,7 +3605,16 @@ asmlinkage __visible void __sched notrac * an infinite recursion. */ prev_ctx = exception_enter(); diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch index 1a30c9a7139c..f9790ce06a4d 100644 --- a/patches/workqueue-distangle-from-rq-lock.patch +++ b/patches/workqueue-distangle-from-rq-lock.patch @@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -1715,10 +1715,6 @@ static inline void ttwu_activate(struct +@@ -1716,10 +1716,6 @@ static inline void ttwu_activate(struct { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; @@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } /* -@@ -2159,56 +2155,6 @@ try_to_wake_up(struct task_struct *p, un +@@ -2160,56 +2156,6 @@ try_to_wake_up(struct task_struct *p, un } /** @@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> * wake_up_process - Wake up a specific process * @p: The process to be woken up. * -@@ -3409,21 +3355,6 @@ static void __sched notrace __schedule(b +@@ -3410,21 +3356,6 @@ static void __sched notrace __schedule(b atomic_inc(&rq->nr_iowait); delayacct_blkio_start(); } @@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> } switch_count = &prev->nvcsw; } -@@ -3499,6 +3430,14 @@ static inline void sched_submit_work(str +@@ -3500,6 +3431,14 @@ static inline void sched_submit_work(str { if (!tsk->state || tsk_is_pi_blocked(tsk)) return; @@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. -@@ -3507,6 +3446,12 @@ static inline void sched_submit_work(str +@@ -3508,6 +3447,12 @@ static inline void sched_submit_work(str blk_schedule_flush_plug(tsk); } @@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de> asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; -@@ -3517,6 +3462,7 @@ asmlinkage __visible void __sched schedu +@@ -3518,6 +3463,7 @@ asmlinkage __visible void __sched schedu __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch index 2553ec7920f0..74d79e9a1abe 100644 --- a/patches/workqueue-prevent-deadlock-stall.patch +++ b/patches/workqueue-prevent-deadlock-stall.patch @@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> --- a/kernel/sched/core.c +++ b/kernel/sched/core.c -@@ -3474,9 +3474,8 @@ void __noreturn do_task_dead(void) +@@ -3475,9 +3475,8 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { @@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org> /* * If a worker went to sleep, notify and ask workqueue whether * it wants to wake up a task to maintain concurrency. -@@ -3484,6 +3483,10 @@ static inline void sched_submit_work(str +@@ -3485,6 +3484,10 @@ static inline void sched_submit_work(str if (tsk->flags & PF_WQ_WORKER) wq_worker_sleeping(tsk); |