summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch72
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/random-avoid-preempt_disable-ed-section.patch74
-rw-r--r--patches/series1
4 files changed, 31 insertions, 118 deletions
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index d6893f871c19..237485ad3875 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -22,12 +22,12 @@ Mike Galbraith,
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/irq_work.h | 8 ++++++
- kernel/irq_work.c | 60 ++++++++++++++++++++++++++++++++++++-----------
+ kernel/irq_work.c | 59 +++++++++++++++++++++++++++++++++++++----------
kernel/rcu/tree.c | 1
kernel/sched/topology.c | 1
kernel/time/tick-sched.c | 1
kernel/time/timer.c | 2 +
- 6 files changed, 60 insertions(+), 13 deletions(-)
+ 6 files changed, 60 insertions(+), 12 deletions(-)
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -61,52 +61,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/processor.h>
-@@ -64,6 +65,8 @@ void __weak arch_irq_work_raise(void)
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
-+ struct llist_head *list;
-+
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
-
-@@ -76,7 +79,12 @@ bool irq_work_queue_on(struct irq_work *
- if (!irq_work_claim(work))
- return false;
-
-- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
-+ list = &per_cpu(lazy_list, cpu);
-+ else
-+ list = &per_cpu(raised_list, cpu);
-+
-+ if (llist_add(&work->llnode, list))
- arch_send_call_function_single_ipi(cpu);
-
- #else /* #ifdef CONFIG_SMP */
-@@ -89,6 +97,9 @@ bool irq_work_queue_on(struct irq_work *
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
+@@ -59,13 +60,19 @@ void __weak arch_irq_work_raise(void)
+ /* Enqueue on current CPU, work must already be claimed and preempt disabled */
+ static void __irq_work_queue_local(struct irq_work *work)
{
+ struct llist_head *list;
+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
-@@ -96,13 +107,15 @@ bool irq_work_queue(struct irq_work *wor
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
-
-- /* If the work is "lazy", handle it from next tick if any */
++ lazy_work = work->flags & IRQ_WORK_LAZY;
++
+ /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
- } else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-+ lazy_work = work->flags & IRQ_WORK_LAZY;
-+
+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+ list = this_cpu_ptr(&lazy_list);
+ else
@@ -116,8 +86,26 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!lazy_work || tick_nohz_tick_stopped())
arch_irq_work_raise();
}
+ }
+@@ -107,9 +114,16 @@ bool irq_work_queue_on(struct irq_work *
-@@ -119,9 +132,8 @@ bool irq_work_needs_cpu(void)
+ preempt_disable();
+ if (cpu != smp_processor_id()) {
++ struct llist_head *list;
++
+ /* Arch remote IPI send/receive backend aren't NMI safe */
+ WARN_ON_ONCE(in_nmi());
+- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
++ list = &per_cpu(lazy_list, cpu);
++ else
++ list = &per_cpu(raised_list, cpu);
++
++ if (llist_add(&work->llnode, list))
+ arch_send_call_function_single_ipi(cpu);
+ } else {
+ __irq_work_queue_local(work);
+@@ -128,9 +142,8 @@ bool irq_work_needs_cpu(void)
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
@@ -129,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -135,8 +147,12 @@ static void irq_work_run_list(struct lli
+@@ -144,8 +157,12 @@ static void irq_work_run_list(struct lli
struct llist_node *llnode;
unsigned long flags;
@@ -143,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (llist_empty(list))
return;
-@@ -168,7 +184,16 @@ static void irq_work_run_list(struct lli
+@@ -177,7 +194,16 @@ static void irq_work_run_list(struct lli
void irq_work_run(void)
{
irq_work_run_list(this_cpu_ptr(&raised_list));
@@ -161,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(irq_work_run);
-@@ -178,8 +203,17 @@ void irq_work_tick(void)
+@@ -187,8 +213,17 @@ void irq_work_tick(void)
if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 58842b503a27..12bd473a33f5 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt11
++-rt12
diff --git a/patches/random-avoid-preempt_disable-ed-section.patch b/patches/random-avoid-preempt_disable-ed-section.patch
deleted file mode 100644
index 11b8ccf9dd1f..000000000000
--- a/patches/random-avoid-preempt_disable-ed-section.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 12 May 2017 15:46:17 +0200
-Subject: [PATCH] random: avoid preempt_disable()ed section
-
-extract_crng() will use sleeping locks while in a preempt_disable()
-section due to get_cpu_var().
-Work around it with local_locks.
-
-Cc: stable-rt@vger.kernel.org # where it applies to
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/char/random.c | 11 +++++++----
- 1 file changed, 7 insertions(+), 4 deletions(-)
-
---- a/drivers/char/random.c
-+++ b/drivers/char/random.c
-@@ -265,6 +265,7 @@
- #include <linux/syscalls.h>
- #include <linux/completion.h>
- #include <linux/uuid.h>
-+#include <linux/locallock.h>
- #include <crypto/chacha.h>
-
- #include <asm/processor.h>
-@@ -2222,6 +2223,7 @@ static rwlock_t batched_entropy_reset_lo
- * at any point prior.
- */
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
- u64 get_random_u64(void)
- {
- u64 ret;
-@@ -2242,7 +2244,7 @@ u64 get_random_u64(void)
- warn_unseeded_randomness(&previous);
-
- use_lock = READ_ONCE(crng_init) < 2;
-- batch = &get_cpu_var(batched_entropy_u64);
-+ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
-@@ -2252,12 +2254,13 @@ u64 get_random_u64(void)
- ret = batch->entropy_u64[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-- put_cpu_var(batched_entropy_u64);
-+ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
- return ret;
- }
- EXPORT_SYMBOL(get_random_u64);
-
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
- u32 get_random_u32(void)
- {
- u32 ret;
-@@ -2272,7 +2275,7 @@ u32 get_random_u32(void)
- warn_unseeded_randomness(&previous);
-
- use_lock = READ_ONCE(crng_init) < 2;
-- batch = &get_cpu_var(batched_entropy_u32);
-+ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
- if (use_lock)
- read_lock_irqsave(&batched_entropy_reset_lock, flags);
- if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
-@@ -2282,7 +2285,7 @@ u32 get_random_u32(void)
- ret = batch->entropy_u32[batch->position++];
- if (use_lock)
- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
-- put_cpu_var(batched_entropy_u32);
-+ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
- return ret;
- }
- EXPORT_SYMBOL(get_random_u32);
diff --git a/patches/series b/patches/series
index 092998614c87..2e468df0da12 100644
--- a/patches/series
+++ b/patches/series
@@ -430,7 +430,6 @@ panic-disable-random-on-rt.patch
x86-stackprot-no-random-on-rt.patch
powerpc-stackprotector-work-around-stack-guard-init-.patch
random-make-it-work-on-rt.patch
-random-avoid-preempt_disable-ed-section.patch
# HOTPLUG
cpu-hotplug--Implement-CPU-pinning.patch