summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-01-20 11:44:45 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-01-20 11:44:45 +0100
commit17e1f681bcd0a15ae81acc473d39726f4c42c6ca (patch)
tree007f369c4781f29c455dadc4d5f4fab8eb860012
parentfce891d958aaa0f15dbf1c9914318866ca316901 (diff)
downloadlinux-rt-17e1f681bcd0a15ae81acc473d39726f4c42c6ca.tar.gz
[ANNOUNCE] v5.4.13-rt7v5.4.13-rt7-patches
Dear RT folks! I'm pleased to announce the v5.4.13-rt7 patch set. Changes since v5.4.13-rt6: - Remove early state change to STATE_RUNNING in do_nanosleep(), it is no longer needed. - Remove memory allocation in on_each_cpu_cond_mask() which may lead to a "sleeping-while-atomic" warning. Reported by Scott Wood. Known issues - None The delta patch against v5.4.13-rt6 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/incr/patch-5.4.13-rt6-rt7.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.4.13-rt7 The RT patch against v5.4.13 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patch-5.4.13-rt7.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.4/older/patches-5.4.13-rt7.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch2
-rw-r--r--patches/0011-printk_safe-remove-printk-safe-code.patch4
-rw-r--r--patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch4
-rw-r--r--patches/Use-CONFIG_PREEMPTION.patch4
-rw-r--r--patches/add_migrate_disable.patch2
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch2
-rw-r--r--patches/arm-preempt-lazy-support.patch2
-rw-r--r--patches/arm-remove-printk_nmi_.patch2
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch4
-rw-r--r--patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch44
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-compaction-Disable-compact_unevictable_allowed-on.patch2
-rw-r--r--patches/preempt-lazy-support.patch8
-rw-r--r--patches/series11
-rw-r--r--patches/smp-Add-a-smp_cond_func_t-argument-to-smp_call_funct.patch139
-rw-r--r--patches/smp-Remove-allocation-mask-from-on_each_cpu_cond.patch127
-rw-r--r--patches/smp-Use-smp_cond_func_t-as-type-for-the-conditional-.patch99
17 files changed, 392 insertions, 66 deletions
diff --git a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
index a58ef462ea5f..8524e8f20d4a 100644
--- a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
+++ b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ LIST_HEAD(tofree);
+ int cpu;
+
- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
+ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
+ for_each_online_cpu(cpu) {
+ struct slub_free_list *f;
+
diff --git a/patches/0011-printk_safe-remove-printk-safe-code.patch b/patches/0011-printk_safe-remove-printk-safe-code.patch
index 4be8cb82dc6f..a93615fb613c 100644
--- a/patches/0011-printk_safe-remove-printk-safe-code.patch
+++ b/patches/0011-printk_safe-remove-printk-safe-code.patch
@@ -666,7 +666,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -8916,7 +8916,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8927,7 +8927,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -674,7 +674,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -8993,7 +8992,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9004,7 +9003,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index a503b934ea44..3cd319baa1ea 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2956,7 +2956,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2965,7 +2965,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
-@@ -2998,7 +2998,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -3007,7 +3007,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
diff --git a/patches/Use-CONFIG_PREEMPTION.patch b/patches/Use-CONFIG_PREEMPTION.patch
index d1c0574a3e12..e94104e71163 100644
--- a/patches/Use-CONFIG_PREEMPTION.patch
+++ b/patches/Use-CONFIG_PREEMPTION.patch
@@ -796,7 +796,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bne r4, r0, restore_all
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
-@@ -81,7 +81,7 @@ config STACK_GROWSUP
+@@ -82,7 +82,7 @@ config STACK_GROWSUP
config GENERIC_LOCKBREAK
bool
default y
@@ -1412,7 +1412,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3654,6 +3654,8 @@ print_trace_header(struct seq_file *m, s
+@@ -3660,6 +3660,8 @@ print_trace_header(struct seq_file *m, s
"desktop",
#elif defined(CONFIG_PREEMPT)
"preempt",
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index 6221c3720850..38bc3bd6361d 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
-@@ -222,6 +222,9 @@ static inline int get_boot_cpu_id(void)
+@@ -221,6 +221,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); __smp_processor_id(); })
#define put_cpu() preempt_enable()
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 72b4076320f8..bb262195d915 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -21,7 +21,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -158,6 +158,7 @@ config ARM64
+@@ -159,6 +159,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index 7055d9e10867..17ca580ad0c0 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -102,6 +102,7 @@ config ARM
+@@ -103,6 +103,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
diff --git a/patches/arm-remove-printk_nmi_.patch b/patches/arm-remove-printk_nmi_.patch
index c197e73ad908..616948ff1680 100644
--- a/patches/arm-remove-printk_nmi_.patch
+++ b/patches/arm-remove-printk_nmi_.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -678,11 +678,9 @@ void handle_IPI(int ipinr, struct pt_reg
+@@ -682,11 +682,9 @@ void handle_IPI(int ipinr, struct pt_reg
break;
case IPI_CPU_BACKTRACE:
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index 03374e6fbb99..6f4347b259e9 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2348,6 +2348,8 @@ tracing_generic_entry_update(struct trac
+@@ -2354,6 +2354,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -3578,9 +3580,10 @@ static void print_lat_help_header(struct
+@@ -3584,9 +3586,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
diff --git a/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch b/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
deleted file mode 100644
index 814f6fb29d7e..000000000000
--- a/patches/hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
+++ /dev/null
@@ -1,44 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 6 Dec 2018 10:15:13 +0100
-Subject: [PATCH] hrtimer: move state change before hrtimer_cancel in
- do_nanosleep()
-
-There is a small window between setting t->task to NULL and waking the
-task up (which would set TASK_RUNNING). So the timer would fire, run and
-set ->task to NULL while the other side/do_nanosleep() wouldn't enter
-freezable_schedule(). After all we are peemptible here (in
-do_nanosleep() and on the timer wake up path) and on KVM/virt the
-virt-CPU might get preempted.
-So do_nanosleep() wouldn't enter freezable_schedule() but cancel the
-timer which is still running and wait for it via
-hrtimer_wait_for_timer(). Then wait_event()/might_sleep() would complain
-that it is invoked with state != TASK_RUNNING.
-This isn't a problem since it would be reset to TASK_RUNNING later
-anyway and we don't rely on the previous state.
-
-Move the state update to TASK_RUNNING before hrtimer_cancel() so there
-are no complains from might_sleep() about wrong state.
-
-Cc: stable-rt@vger.kernel.org
-Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -1873,12 +1873,12 @@ static int __sched do_nanosleep(struct h
- if (likely(t->task))
- freezable_schedule();
-
-+ __set_current_state(TASK_RUNNING);
- hrtimer_cancel(&t->timer);
- mode = HRTIMER_MODE_ABS;
-
- } while (t->task && !signal_pending(current));
-
-- __set_current_state(TASK_RUNNING);
-
- if (!t->task)
- return 0;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4c1841b6475d..bbb08330835d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt6
++-rt7
diff --git a/patches/mm-compaction-Disable-compact_unevictable_allowed-on.patch b/patches/mm-compaction-Disable-compact_unevictable_allowed-on.patch
index 3c0c7588c40a..4d0605cf5345 100644
--- a/patches/mm-compaction-Disable-compact_unevictable_allowed-on.patch
+++ b/patches/mm-compaction-Disable-compact_unevictable_allowed-on.patch
@@ -5,7 +5,7 @@ Subject: [PATCH] mm/compaction: Disable compact_unevictable_allowed on RT
Since commit
5bbe3547aa3ba ("mm: allow compaction of unevictable pages")
-it is allowed to examine mlocked pages for pages to compact by default.
+it is allowed to examine mlocked pages and compact them by default.
On -RT even minor pagefaults are problematic because it may take a few
100us to resolve them and until then the task is blocked.
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 15f31426df25..2df65a4894a1 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -483,7 +483,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2335,6 +2335,7 @@ tracing_generic_entry_update(struct trac
+@@ -2341,6 +2341,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -491,7 +491,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->type = type;
entry->flags =
-@@ -2346,7 +2347,8 @@ tracing_generic_entry_update(struct trac
+@@ -2352,7 +2353,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -501,7 +501,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3575,15 +3577,17 @@ unsigned long trace_total_entries(struct
+@@ -3581,15 +3583,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -528,7 +528,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3619,11 +3623,12 @@ static void print_func_help_header_irq(s
+@@ -3625,11 +3629,12 @@ static void print_func_help_header_irq(s
seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space);
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
diff --git a/patches/series b/patches/series
index 892ab591e902..e0641ef2503f 100644
--- a/patches/series
+++ b/patches/series
@@ -101,8 +101,16 @@ thermal-x86_pkg_temp-make-pkg_temp_lock-a-raw-spinlo.patch
perf-core-Add-SRCU-annotation-for-pmus-list-walk.patch
# 20191219170834.4tah3prf2gdothz4@linutronix.de
+# in akpm
kmemleak-Turn-kmemleak_lock-and-object-lock-to-raw_s.patch
+# 20200117090137.1205765-2-bigeasy@linutronix.de
+smp-Use-smp_cond_func_t-as-type-for-the-conditional-.patch
+# 20200117144106.hptwafmclfoyptdq@linutronix.de
+smp-Add-a-smp_cond_func_t-argument-to-smp_call_funct.patch
+# 20200117090137.1205765-4-bigeasy@linutronix.de
+smp-Remove-allocation-mask-from-on_each_cpu_cond.patch
+
############################################################
# Ready for posting
############################################################
@@ -278,9 +286,6 @@ completion-use-simple-wait-queues.patch
# Check whether schedule_hrtimeout() could be hard always
hrtimer-Allow-raw-wakeups-during-boot.patch
-# Check, Changelog ...
-hrtimer-move-state-change-before-hrtimer_cancel-in-d.patch
-
# POSIX-CPU-TIMERS
posix-timers-thread-posix-cpu-timers-on-rt.patch
posix-timers-expiry-lock.patch
diff --git a/patches/smp-Add-a-smp_cond_func_t-argument-to-smp_call_funct.patch b/patches/smp-Add-a-smp_cond_func_t-argument-to-smp_call_funct.patch
new file mode 100644
index 000000000000..0dc396cf4a0c
--- /dev/null
+++ b/patches/smp-Add-a-smp_cond_func_t-argument-to-smp_call_funct.patch
@@ -0,0 +1,139 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 16 Jan 2020 12:14:38 +0100
+Subject: [PATCH] smp: Add a smp_cond_func_t argument to
+ smp_call_function_many()
+
+on_each_cpu_cond_mask() allocates a new CPU mask. The newly allocated
+mask is a subset of the provided mask based on the conditional function.
+This memory allocation could be avoided by extending
+smp_call_function_many() with the conditional function and performing the
+remote function call based on the mask and the conditional function.
+
+Rename smp_call_function_many() to smp_call_function_many_cond() and add
+the smp_cond_func_t argument. If smp_cond_func_t is provided then it is
+used before invoking the function.
+Provide smp_call_function_many() with cond_func set to NULL.
+Let on_each_cpu_cond_mask() use smp_call_function_many_cond().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/smp.c | 77 +++++++++++++++++++++++++++--------------------------------
+ 1 file changed, 36 insertions(+), 41 deletions(-)
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -395,22 +395,9 @@ int smp_call_function_any(const struct c
+ }
+ EXPORT_SYMBOL_GPL(smp_call_function_any);
+
+-/**
+- * smp_call_function_many(): Run a function on a set of other CPUs.
+- * @mask: The set of cpus to run on (only runs on online subset).
+- * @func: The function to run. This must be fast and non-blocking.
+- * @info: An arbitrary pointer to pass to the function.
+- * @wait: If true, wait (atomically) until function has completed
+- * on other CPUs.
+- *
+- * If @wait is true, then returns once @func has returned.
+- *
+- * You must not call this function with disabled interrupts or from a
+- * hardware interrupt handler or from a bottom half handler. Preemption
+- * must be disabled when calling this function.
+- */
+-void smp_call_function_many(const struct cpumask *mask,
+- smp_call_func_t func, void *info, bool wait)
++static void smp_call_function_many_cond(const struct cpumask *mask,
++ smp_call_func_t func, void *info,
++ bool wait, smp_cond_func_t cond_func)
+ {
+ struct call_function_data *cfd;
+ int cpu, next_cpu, this_cpu = smp_processor_id();
+@@ -448,7 +435,8 @@ void smp_call_function_many(const struct
+
+ /* Fastpath: do that cpu by itself. */
+ if (next_cpu >= nr_cpu_ids) {
+- smp_call_function_single(cpu, func, info, wait);
++ if (!cond_func || cond_func(cpu, info))
++ smp_call_function_single(cpu, func, info, wait);
+ return;
+ }
+
+@@ -465,6 +453,9 @@ void smp_call_function_many(const struct
+ for_each_cpu(cpu, cfd->cpumask) {
+ call_single_data_t *csd = per_cpu_ptr(cfd->csd, cpu);
+
++ if (cond_func && !cond_func(cpu, info))
++ continue;
++
+ csd_lock(csd);
+ if (wait)
+ csd->flags |= CSD_FLAG_SYNCHRONOUS;
+@@ -486,6 +477,26 @@ void smp_call_function_many(const struct
+ }
+ }
+ }
++
++/**
++ * smp_call_function_many(): Run a function on a set of other CPUs.
++ * @mask: The set of cpus to run on (only runs on online subset).
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @wait: If true, wait (atomically) until function has completed
++ * on other CPUs.
++ *
++ * If @wait is true, then returns once @func has returned.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler. Preemption
++ * must be disabled when calling this function.
++ */
++void smp_call_function_many(const struct cpumask *mask,
++ smp_call_func_t func, void *info, bool wait)
++{
++ smp_call_function_many_cond(mask, func, info, wait, NULL);
++}
+ EXPORT_SYMBOL(smp_call_function_many);
+
+ /**
+@@ -684,33 +695,17 @@ void on_each_cpu_cond_mask(smp_cond_func
+ void *info, bool wait, gfp_t gfp_flags,
+ const struct cpumask *mask)
+ {
+- cpumask_var_t cpus;
+- int cpu, ret;
++ int cpu = get_cpu();
+
+- might_sleep_if(gfpflags_allow_blocking(gfp_flags));
++ smp_call_function_many_cond(mask, func, info, wait, cond_func);
++ if (cpumask_test_cpu(cpu, mask) && cond_func(cpu, info)) {
++ unsigned long flags;
+
+- if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
+- preempt_disable();
+- for_each_cpu(cpu, mask)
+- if (cond_func(cpu, info))
+- __cpumask_set_cpu(cpu, cpus);
+- on_each_cpu_mask(cpus, func, info, wait);
+- preempt_enable();
+- free_cpumask_var(cpus);
+- } else {
+- /*
+- * No free cpumask, bother. No matter, we'll
+- * just have to IPI them one by one.
+- */
+- preempt_disable();
+- for_each_cpu(cpu, mask)
+- if (cond_func(cpu, info)) {
+- ret = smp_call_function_single(cpu, func,
+- info, wait);
+- WARN_ON_ONCE(ret);
+- }
+- preempt_enable();
++ local_irq_save(flags);
++ func(info);
++ local_irq_restore(flags);
+ }
++ put_cpu();
+ }
+ EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
diff --git a/patches/smp-Remove-allocation-mask-from-on_each_cpu_cond.patch b/patches/smp-Remove-allocation-mask-from-on_each_cpu_cond.patch
new file mode 100644
index 000000000000..aabbe648e636
--- /dev/null
+++ b/patches/smp-Remove-allocation-mask-from-on_each_cpu_cond.patch
@@ -0,0 +1,127 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 16 Jan 2020 13:13:41 +0100
+Subject: [PATCH] smp: Remove allocation mask from on_each_cpu_cond.*()
+
+The allocation mask is no longer used by on_each_cpu_cond() and
+on_each_cpu_cond_mask() and ca be removed.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/mm/tlb.c | 2 +-
+ fs/buffer.c | 2 +-
+ include/linux/smp.h | 5 ++---
+ kernel/smp.c | 13 +++----------
+ kernel/up.c | 7 +++----
+ mm/slub.c | 2 +-
+ 6 files changed, 11 insertions(+), 20 deletions(-)
+
+--- a/arch/x86/mm/tlb.c
++++ b/arch/x86/mm/tlb.c
+@@ -708,7 +708,7 @@ void native_flush_tlb_others(const struc
+ (void *)info, 1);
+ else
+ on_each_cpu_cond_mask(tlb_is_not_lazy, flush_tlb_func_remote,
+- (void *)info, 1, GFP_ATOMIC, cpumask);
++ (void *)info, 1, cpumask);
+ }
+
+ /*
+--- a/fs/buffer.c
++++ b/fs/buffer.c
+@@ -1387,7 +1387,7 @@ static bool has_bh_in_lru(int cpu, void
+
+ void invalidate_bh_lrus(void)
+ {
+- on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1, GFP_KERNEL);
++ on_each_cpu_cond(has_bh_in_lru, invalidate_bh_lru, NULL, 1);
+ }
+ EXPORT_SYMBOL_GPL(invalidate_bh_lrus);
+
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -51,11 +51,10 @@ void on_each_cpu_mask(const struct cpuma
+ * processor.
+ */
+ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags);
++ void *info, bool wait);
+
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags,
+- const struct cpumask *mask);
++ void *info, bool wait, const struct cpumask *mask);
+
+ int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -679,11 +679,6 @@ EXPORT_SYMBOL(on_each_cpu_mask);
+ * @info: An arbitrary pointer to pass to both functions.
+ * @wait: If true, wait (atomically) until function has
+ * completed on other CPUs.
+- * @gfp_flags: GFP flags to use when allocating the cpumask
+- * used internally by the function.
+- *
+- * The function might sleep if the GFP flags indicates a non
+- * atomic allocation is allowed.
+ *
+ * Preemption is disabled to protect against CPUs going offline but not online.
+ * CPUs going online during the call will not be seen or sent an IPI.
+@@ -692,8 +687,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
+ * from a hardware interrupt handler or from a bottom half handler.
+ */
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags,
+- const struct cpumask *mask)
++ void *info, bool wait, const struct cpumask *mask)
+ {
+ int cpu = get_cpu();
+
+@@ -710,10 +704,9 @@ void on_each_cpu_cond_mask(smp_cond_func
+ EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags)
++ void *info, bool wait)
+ {
+- on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
+- cpu_online_mask);
++ on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
+ }
+ EXPORT_SYMBOL(on_each_cpu_cond);
+
+--- a/kernel/up.c
++++ b/kernel/up.c
+@@ -69,8 +69,7 @@ EXPORT_SYMBOL(on_each_cpu_mask);
+ * same condtions in UP and SMP.
+ */
+ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags,
+- const struct cpumask *mask)
++ void *info, bool wait, const struct cpumask *mask)
+ {
+ unsigned long flags;
+
+@@ -85,9 +84,9 @@ void on_each_cpu_cond_mask(smp_cond_func
+ EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+ void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+- void *info, bool wait, gfp_t gfp_flags)
++ void *info, bool wait)
+ {
+- on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
++ on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
+ }
+ EXPORT_SYMBOL(on_each_cpu_cond);
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2340,7 +2340,7 @@ static bool has_cpu_slab(int cpu, void *
+
+ static void flush_all(struct kmem_cache *s)
+ {
+- on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
++ on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1);
+ }
+
+ /*
diff --git a/patches/smp-Use-smp_cond_func_t-as-type-for-the-conditional-.patch b/patches/smp-Use-smp_cond_func_t-as-type-for-the-conditional-.patch
new file mode 100644
index 000000000000..293d2901d233
--- /dev/null
+++ b/patches/smp-Use-smp_cond_func_t-as-type-for-the-conditional-.patch
@@ -0,0 +1,99 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 16 Jan 2020 12:00:31 +0100
+Subject: [PATCH] smp: Use smp_cond_func_t as type for the conditional
+ function
+
+Use a typdef for the conditional function instead defining it each time in
+the function prototype.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/smp.h | 14 +++++++-------
+ kernel/smp.c | 11 +++++------
+ kernel/up.c | 11 +++++------
+ 3 files changed, 17 insertions(+), 19 deletions(-)
+
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -15,6 +15,7 @@
+ #include <linux/llist.h>
+
+ typedef void (*smp_call_func_t)(void *info);
++typedef bool (*smp_cond_func_t)(int cpu, void *info);
+ struct __call_single_data {
+ struct llist_node llist;
+ smp_call_func_t func;
+@@ -49,13 +50,12 @@ void on_each_cpu_mask(const struct cpuma
+ * cond_func returns a positive value. This may include the local
+ * processor.
+ */
+-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags);
+-
+-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags, const struct cpumask *mask);
++void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags);
++
++void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags,
++ const struct cpumask *mask);
+
+ int smp_call_function_single_async(int cpu, call_single_data_t *csd);
+
+--- a/kernel/smp.c
++++ b/kernel/smp.c
+@@ -680,9 +680,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
+ * You must not call this function with disabled interrupts or
+ * from a hardware interrupt handler or from a bottom half handler.
+ */
+-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags, const struct cpumask *mask)
++void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags,
++ const struct cpumask *mask)
+ {
+ cpumask_var_t cpus;
+ int cpu, ret;
+@@ -714,9 +714,8 @@ void on_each_cpu_cond_mask(bool (*cond_f
+ }
+ EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags)
++void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags)
+ {
+ on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags,
+ cpu_online_mask);
+--- a/kernel/up.c
++++ b/kernel/up.c
+@@ -68,9 +68,9 @@ EXPORT_SYMBOL(on_each_cpu_mask);
+ * Preemption is disabled here to make sure the cond_func is called under the
+ * same condtions in UP and SMP.
+ */
+-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags, const struct cpumask *mask)
++void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags,
++ const struct cpumask *mask)
+ {
+ unsigned long flags;
+
+@@ -84,9 +84,8 @@ void on_each_cpu_cond_mask(bool (*cond_f
+ }
+ EXPORT_SYMBOL(on_each_cpu_cond_mask);
+
+-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
+- smp_call_func_t func, void *info, bool wait,
+- gfp_t gfp_flags)
++void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
++ void *info, bool wait, gfp_t gfp_flags)
+ {
+ on_each_cpu_cond_mask(cond_func, func, info, wait, gfp_flags, NULL);
+ }