summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-02-03 18:49:40 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-02-03 18:49:40 +0100
commitc7c611f9bd063cc82d62e055e1fbf05d79759680 (patch)
tree427e0b42e3307704ea4a42869c4e04d447d18f68
parenta3b5a7aa8819ab15fd619655c94a62bb605bc74b (diff)
downloadlinux-rt-5.10.12-rt26-patches.tar.gz
[ANNOUNCE] v5.10.12-rt26v5.10.12-rt26-patches
Dear RT folks! I'm pleased to announce the v5.10.12-rt26 patch set. Changes since v5.10.12-rt25: - Updated the "tracing: Merge irqflags + preempt counter." patch to the version Steven posted for upstream inclusion. - Update the work-in-progress softirq patch. One difference is that tasklet_disable() becomes now sleeping if the tasklet is running instead busy-spinning until it is done. Driver which invoke the function in atomic context on !RT have been converted. Known issues - kdb/kgdb can easily deadlock. - kmsg dumpers expecting not to be called in parallel can clobber their temp buffer. - netconsole triggers WARN. The delta patch against v5.10.12-rt25 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/incr/patch-5.10.12-rt25-rt26.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.10.12-rt26 The RT patch against v5.10.12 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patch-5.10.12-rt26.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.12-rt26.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch6
-rw-r--r--patches/0001-printk-inline-log_output-log_store-in-vprintk_store.patch4
-rw-r--r--patches/0001-softirq-Replace-barrier-with-cpu_relax-in-tasklet_un.patch (renamed from patches/softirq_Replace_barrier_with_cpu_relax_in_tasklet_unlock_wait_.patch)11
-rw-r--r--patches/0001-tracing-Merge-irqflags-preempt-counter.patch94
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch16
-rw-r--r--patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch2
-rw-r--r--patches/0002-printk-remove-logbuf_lock-writer-protection-of-ringb.patch10
-rw-r--r--patches/0002-tasklets-Use-static-inlines-for-stub-implementations.patch (renamed from patches/tasklets_Use_static_inlines_for_stub_implementations.patch)8
-rw-r--r--patches/0002-tracing-Inline-tracing_gen_ctx_flags.patch173
-rw-r--r--patches/0003-printk-use-seqcount_latch-for-clear_seq.patch12
-rw-r--r--patches/0003-tasklets-Provide-tasklet_disable_in_atomic.patch60
-rw-r--r--patches/0003-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch (renamed from patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch)10
-rw-r--r--patches/0004-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch75
-rw-r--r--patches/0004-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch (renamed from patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch)8
-rw-r--r--patches/0005-printk-add-syslog_lock.patch18
-rw-r--r--patches/0005-tasklets-Replace-spin-wait-in-tasklet_kill.patch66
-rw-r--r--patches/0006-define-CONSOLE_LOG_MAX-in-printk.h.patch10
-rw-r--r--patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch2
-rw-r--r--patches/0006-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch (renamed from patches/tasklets_Prevent_kill_unlock_wait_deadlock_on_RT.patch)71
-rw-r--r--patches/0007-printk-track-limit-recursion.patch12
-rw-r--r--patches/0007-softirq-Add-RT-specific-softirq-accounting.patch (renamed from patches/softirq_Add_RT_specific_softirq_accounting.patch)10
-rw-r--r--patches/0008-irqtime-Make-accounting-correct-on-RT.patch46
-rw-r--r--patches/0008-printk-invoke-dumper-using-a-copy-of-the-iterator.patch4
-rw-r--r--patches/0009-printk-remove-logbuf_lock.patch46
-rw-r--r--patches/0009-softirq-Move-various-protections-into-inline-helpers.patch (renamed from patches/softirq_Move_various_protections_into_inline_helpers.patch)18
-rw-r--r--patches/0010-softirq-Make-softirq-control-and-processing-RT-aware.patch (renamed from patches/softirq_Make_softirq_control_and_processing_RT_aware.patch)18
-rw-r--r--patches/0011-printk-refactor-kmsg_dump_get_buffer.patch4
-rw-r--r--patches/0011-tick-sched-Prevent-false-positive-softirq-pending-wa.patch (renamed from patches/tick_sched_Prevent_false_positive_softirq_pending_warnings_on_RT.patch)13
-rw-r--r--patches/0012-console-add-write_atomic-interface.patch2
-rw-r--r--patches/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch2
-rw-r--r--patches/0012-rcu-Prevent-false-positive-softirq-warning-on-RT.patch (renamed from patches/rcu_Prevent_false_positive_softirq_warning_on_RT.patch)10
-rw-r--r--patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch2
-rw-r--r--patches/0013-net-jme-Replace-link-change-tasklet-with-a-worker.patch78
-rw-r--r--patches/0014-net-arcnet-Fix-RESET-flag-handling.patch287
-rw-r--r--patches/0014-printk-relocate-printk_delay-and-vprintk_default.patch6
-rw-r--r--patches/0015-net-sundance-Use-tasklet_disable_in_atomic.patch25
-rw-r--r--patches/0015-printk-combine-boot_delay_msec-into-printk_delay.patch4
-rw-r--r--patches/0016-ath9k-Use-tasklet_disable_in_atomic.patch32
-rw-r--r--patches/0016-printk-change-console_seq-to-atomic64_t.patch16
-rw-r--r--patches/0017-PCI-hv-Use-tasklet_disable_in_atomic.patch39
-rw-r--r--patches/0017-printk-introduce-kernel-sync-mode.patch22
-rw-r--r--patches/0018-atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch28
-rw-r--r--patches/0018-printk-move-console-printing-to-kthreads.patch32
-rw-r--r--patches/0019-firewire-ohci-Use-tasklet_disable_in_atomic-in-the-t.patch45
-rw-r--r--patches/0019-printk-remove-deferred-printing.patch12
-rw-r--r--patches/0020-printk-add-console-handover.patch6
-rw-r--r--patches/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch8
-rw-r--r--patches/0021-printk-add-pr_flush.patch4
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch2
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch12
-rw-r--r--patches/irqtime-Use-irq_count-instead-of-preempt_count.patch32
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch10
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch12
-rw-r--r--patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch2
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch26
-rw-r--r--patches/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch2
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch4
-rw-r--r--patches/preempt-lazy-support.patch41
-rw-r--r--patches/series36
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/skbufhead-raw-lock.patch2
-rw-r--r--patches/softirq-preempt-fix-3-re.patch2
-rw-r--r--patches/x86-fpu-Make-kernel-FPU-protection-RT-friendly.patch2
-rw-r--r--patches/x86-fpu-Simplify-fpregs_-un-lock.patch2
65 files changed, 1297 insertions, 381 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 9a900e3f4313..050b6daf1f49 100644
--- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2937,13 +2943,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2938,13 +2944,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2959,14 +2970,21 @@ static void drain_pages_zone(unsigned in
+@@ -2960,14 +2971,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3195,7 +3213,10 @@ static void free_unref_page_commit(struc
+@@ -3196,7 +3214,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/0001-printk-inline-log_output-log_store-in-vprintk_store.patch b/patches/0001-printk-inline-log_output-log_store-in-vprintk_store.patch
index f7f8530692ab..4a89b866d07b 100644
--- a/patches/0001-printk-inline-log_output-log_store-in-vprintk_store.patch
+++ b/patches/0001-printk-inline-log_output-log_store-in-vprintk_store.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int dmesg_restrict = IS_ENABLED(CONFIG_SECURITY_DMESG_RESTRICT);
static int syslog_action_restricted(int type)
-@@ -1907,44 +1861,28 @@ static inline u32 printk_caller_id(void)
+@@ -1925,44 +1879,28 @@ static inline u32 printk_caller_id(void)
0x80000000 + raw_smp_processor_id();
}
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The printf needs to come first; we need the syslog
-@@ -1983,7 +1921,58 @@ int vprintk_store(int facility, int leve
+@@ -2001,7 +1939,58 @@ int vprintk_store(int facility, int leve
if (dev_info)
lflags |= LOG_NEWLINE;
diff --git a/patches/softirq_Replace_barrier_with_cpu_relax_in_tasklet_unlock_wait_.patch b/patches/0001-softirq-Replace-barrier-with-cpu_relax-in-tasklet_un.patch
index 6c5bfbcdcd55..72cfd21db800 100644
--- a/patches/softirq_Replace_barrier_with_cpu_relax_in_tasklet_unlock_wait_.patch
+++ b/patches/0001-softirq-Replace-barrier-with-cpu_relax-in-tasklet_un.patch
@@ -1,14 +1,15 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: softirq: Replace barrier() with cpu_relax() in tasklet_unlock_wait()
-Date: Fri, 13 Nov 2020 15:02:24 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:58 +0100
+Subject: [PATCH 01/19] softirq: Replace barrier() with cpu_relax() in
+ tasklet_unlock_wait()
A barrier() in a tight loop which waits for something to happen on a remote
CPU is a pointless exercise. Replace it with cpu_relax() which allows HT
siblings to make progress.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20201204170805.430113367@linutronix.de
---
include/linux/interrupt.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
@@ -20,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void tasklet_unlock_wait(struct tasklet_struct *t)
{
- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-+ while (test_bit(TASKLET_STATE_RUN, &(t)->state))
++ while (test_bit(TASKLET_STATE_RUN, &t->state))
+ cpu_relax();
}
#else
diff --git a/patches/0001-tracing-Merge-irqflags-preempt-counter.patch b/patches/0001-tracing-Merge-irqflags-preempt-counter.patch
index cfb3017ce1d3..c7fc72b3939e 100644
--- a/patches/0001-tracing-Merge-irqflags-preempt-counter.patch
+++ b/patches/0001-tracing-Merge-irqflags-preempt-counter.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Jan 2021 15:58:53 +0100
-Subject: [PATCH 1/3] tracing: Merge irqflags + preempt counter.
+Date: Wed, 3 Feb 2021 11:05:23 -0500
+Subject: [PATCH 1/4] tracing: Merge irqflags + preempt counter.
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
@@ -51,6 +51,10 @@ A build with all tracers enabled on x86-64 with and without the patch:
text shrank by 379 bytes, data remained constant.
+Link: https://lkml.kernel.org/r/20210125194511.3924915-2-bigeasy@linutronix.de
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/trace_events.h | 25 +++-
@@ -94,9 +98,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ entry->flags = trace_ctx >> 16;
+}
+
-+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags);
-+unsigned int tracing_gen_ctx_flags(void);
-+unsigned int tracing_gen_ctx_flags_dect(void);
++unsigned int tracing_gen_ctx_flags(unsigned long irqflags);
++unsigned int tracing_gen_ctx(void);
++unsigned int tracing_gen_ctx_dec(void);
+
struct trace_event_file;
@@ -134,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (blk_tracer) {
buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
-+ trace_ctx = _tracing_gen_ctx_flags(0);
++ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len + cgid_len,
- 0, pc);
@@ -167,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
buffer = blk_tr->array_buffer.buffer;
- pc = preempt_count();
-+ trace_ctx = _tracing_gen_ctx_flags(0);
++ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len + cgid_len,
- 0, pc);
@@ -276,7 +280,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
alloc = sizeof(*entry) + size + 2; /* possible \n added */
- local_save_flags(irq_flags);
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
- event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
@@ -314,7 +318,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
- local_save_flags(irq_flags);
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
buffer = global_trace.array_buffer.buffer;
ring_buffer_nest_start(buffer);
@@ -340,7 +344,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-void
-tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
- unsigned long flags, int pc)
-+unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
++unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
{
- struct task_struct *tsk = current;
+ unsigned int trace_flags = 0;
@@ -381,7 +385,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return (trace_flags << 16) | (pc & 0xff);
+}
+
-+unsigned int tracing_gen_ctx_flags(void)
++unsigned int tracing_gen_ctx(void)
+{
+ unsigned long irqflags;
+
@@ -390,14 +394,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#else
+ irqflags = 0;
+#endif
-+ return _tracing_gen_ctx_flags(irqflags);
++ return tracing_gen_ctx_flags(irqflags);
+}
+
-+unsigned int tracing_gen_ctx_flags_dect(void)
++unsigned int tracing_gen_ctx_dec(void)
+{
+ unsigned int trace_ctx;
+
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
+
+ /*
+ * Subtract one from the preeption counter if preemption is enabled,
@@ -585,7 +589,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
- flags, skip, preempt_count(), NULL);
-+ tracing_gen_ctx_flags(), skip, NULL);
++ tracing_gen_ctx(), skip, NULL);
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
@@ -633,7 +637,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pause_graph_tracing();
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
tbuffer = get_trace_buf();
@@ -677,7 +681,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pause_graph_tracing();
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
@@ -724,7 +728,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
- irq_flags, preempt_count());
-+ tracing_gen_ctx_flags());
++ tracing_gen_ctx());
if (unlikely(!event))
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -748,7 +752,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
- irq_flags, preempt_count());
-+ tracing_gen_ctx_flags());
++ tracing_gen_ctx());
if (!event)
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
@@ -893,7 +897,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
- pc = preempt_count();
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
- sizeof(*entry), flags, pc);
@@ -912,7 +916,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(flags);
- tracing_generic_entry_update(entry, type, flags, pc);
-+ tracing_generic_entry_update(entry, type, tracing_gen_ctx_flags());
++ tracing_generic_entry_update(entry, type, tracing_gen_ctx());
}
NOKPROBE_SYMBOL(perf_trace_buf_update);
@@ -932,7 +936,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
- if (IS_ENABLED(CONFIG_PREEMPTION))
- fbuffer->pc--;
-+ fbuffer->trace_ctx = tracing_gen_ctx_flags_dect();
++ fbuffer->trace_ctx = tracing_gen_ctx_dec();
fbuffer->trace_file = trace_file;
fbuffer->event =
@@ -954,7 +958,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- int pc;
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
@@ -998,7 +1002,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- tracing_generic_entry_update(entry, call->event.type, irq_flags,
- preempt_count());
+ tracing_generic_entry_update(entry, call->event.type,
-+ tracing_gen_ctx_flags());
++ tracing_gen_ctx());
while ((len = parse_field(str, call, &field, &val)) > 0) {
if (is_function_field(field))
@@ -1018,7 +1022,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
bit = trace_test_and_set_recursion(TRACE_FTRACE_START, TRACE_FTRACE_MAX);
@@ -1052,7 +1056,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- pc = preempt_count();
- trace_function(tr, ip, parent_ip, flags, pc);
- __trace_stack(tr, flags, STACK_SKIP, pc);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ trace_function(tr, ip, parent_ip, trace_ctx);
+ __trace_stack(tr, trace_ctx, STACK_SKIP);
}
@@ -1068,7 +1072,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(flags);
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
- __trace_stack(tr, flags, FTRACE_STACK_SKIP, pc);
+ __trace_stack(tr, trace_ctx, FTRACE_STACK_SKIP);
@@ -1114,7 +1118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (likely(disabled == 1)) {
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
} else {
ret = 0;
@@ -1182,7 +1186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (likely(disabled == 1)) {
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
}
atomic_dec(&data->disabled);
@@ -1201,7 +1205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
event = trace_buffer_lock_reserve(buffer, TRACE_HWLAT, sizeof(*entry),
- flags, pc);
-+ tracing_gen_ctx_flags());
++ tracing_gen_ctx());
if (!event)
return;
entry = ring_buffer_event_data(event);
@@ -1217,7 +1221,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
- trace_function(tr, ip, parent_ip, flags, preempt_count());
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+
+ trace_function(tr, ip, parent_ip, trace_ctx);
@@ -1239,7 +1243,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- pc = preempt_count();
- ret = __trace_graph_entry(tr, trace, flags, pc);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ ret = __trace_graph_entry(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
@@ -1258,7 +1262,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- pc = preempt_count();
- __trace_graph_return(tr, trace, flags, pc);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+ __trace_graph_return(tr, trace, trace_ctx);
atomic_dec(&data->disabled);
}
@@ -1293,7 +1297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(flags);
-
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
if (!report_latency(tr, delta))
goto out;
@@ -1335,7 +1339,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(flags);
-
- __trace_function(tr, ip, parent_ip, flags, pc);
-+ __trace_function(tr, ip, parent_ip, tracing_gen_ctx_flags());
++ __trace_function(tr, ip, parent_ip, tracing_gen_ctx());
per_cpu(tracing_cpu, cpu) = 1;
@@ -1360,7 +1364,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(flags);
- __trace_function(tr, ip, parent_ip, flags, pc);
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
+ __trace_function(tr, ip, parent_ip, trace_ctx);
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
@@ -1445,7 +1449,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
-+ fbuffer.trace_ctx = tracing_gen_ctx_flags();
++ fbuffer.trace_ctx = tracing_gen_ctx();
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
@@ -1464,7 +1468,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(fbuffer.flags);
- fbuffer.pc = preempt_count();
-+ fbuffer.trace_ctx = tracing_gen_ctx_flags();
++ fbuffer.trace_ctx = tracing_gen_ctx();
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
@@ -1486,7 +1490,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- int pc = preempt_count();
+ unsigned int trace_ctx;
-+ trace_ctx = _tracing_gen_ctx_flags(0);
++ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
@@ -1509,7 +1513,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- int pc = preempt_count();
+ unsigned int trace_ctx;
-+ trace_ctx = _tracing_gen_ctx_flags(0);
++ trace_ctx = tracing_gen_ctx_flags(0);
event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
- sizeof(*entry), 0, pc);
+ sizeof(*entry), trace_ctx);
@@ -1541,7 +1545,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
- *pc = preempt_count();
-+ *trace_ctx = tracing_gen_ctx_flags();
++ *trace_ctx = tracing_gen_ctx();
preempt_disable_notrace();
cpu = raw_smp_processor_id();
@@ -1698,7 +1702,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
local_irq_save(flags);
-+ trace_ctx = _tracing_gen_ctx_flags(flags);
++ trace_ctx = tracing_gen_ctx_flags(flags);
+
arch_spin_lock(&wakeup_lock);
@@ -1736,7 +1740,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(disabled != 1))
goto out;
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
+
/* interrupts should be off from try_to_wake_up */
arch_spin_lock(&wakeup_lock);
@@ -1783,7 +1787,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(irq_flags);
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
@@ -1817,7 +1821,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- local_save_flags(irq_flags);
- pc = preempt_count();
-+ trace_ctx = tracing_gen_ctx_flags();
++ trace_ctx = tracing_gen_ctx();
buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer,
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 4ea0a4e233ba..078ab9495525 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2954,7 +2969,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2955,7 +2970,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2984,7 +2999,7 @@ static void drain_pages_zone(unsigned in
+@@ -2985,7 +3000,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3183,7 +3198,8 @@ static bool free_unref_page_prepare(stru
+@@ -3184,7 +3199,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -3213,10 +3229,8 @@ static void free_unref_page_commit(struc
+@@ -3214,10 +3230,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3227,13 +3241,17 @@ void free_unref_page(struct page *page)
+@@ -3228,13 +3242,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3244,6 +3262,11 @@ void free_unref_page_list(struct list_he
+@@ -3245,6 +3263,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -3256,10 +3279,12 @@ void free_unref_page_list(struct list_he
+@@ -3257,10 +3280,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -3272,6 +3297,21 @@ void free_unref_page_list(struct list_he
+@@ -3273,6 +3298,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch b/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
index b317425feb04..72e8df0d4f00 100644
--- a/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
+++ b/patches/0002-locking-rtmutex-Remove-output-from-deadlock-detector.patch
@@ -252,7 +252,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
-@@ -1764,8 +1757,6 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1763,8 +1756,6 @@ int __rt_mutex_start_proxy_lock(struct r
ret = 0;
}
diff --git a/patches/0002-printk-remove-logbuf_lock-writer-protection-of-ringb.patch b/patches/0002-printk-remove-logbuf_lock-writer-protection-of-ringb.patch
index 16be2f592542..5a9980a1aee9 100644
--- a/patches/0002-printk-remove-logbuf_lock-writer-protection-of-ringb.patch
+++ b/patches/0002-printk-remove-logbuf_lock-writer-protection-of-ringb.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (seq != prb_next_seq(&printk_rb_static)) {
pr_err("dropped %llu messages\n",
-@@ -1861,18 +1861,90 @@ static inline u32 printk_caller_id(void)
+@@ -1879,18 +1879,90 @@ static inline u32 printk_caller_id(void)
0x80000000 + raw_smp_processor_id();
}
@@ -145,7 +145,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u16 text_len;
u64 ts_nsec;
-@@ -1885,35 +1957,21 @@ int vprintk_store(int facility, int leve
+@@ -1903,35 +1975,21 @@ int vprintk_store(int facility, int leve
ts_nsec = local_clock();
/*
@@ -196,7 +196,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (level == LOGLEVEL_DEFAULT)
level = default_message_loglevel;
-@@ -1922,9 +1980,10 @@ int vprintk_store(int facility, int leve
+@@ -1940,9 +1998,10 @@ int vprintk_store(int facility, int leve
lflags |= LOG_NEWLINE;
if (lflags & LOG_CONT) {
@@ -209,7 +209,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
r.info->text_len += text_len;
if (lflags & LOG_NEWLINE) {
-@@ -1943,18 +2002,18 @@ int vprintk_store(int facility, int leve
+@@ -1961,18 +2020,18 @@ int vprintk_store(int facility, int leve
* prb_reserve_in_last() and prb_reserve() purposely invalidate the
* structure when they fail.
*/
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (trunc_msg_len)
memcpy(&r.text_buf[text_len], trunc_msg, trunc_msg_len);
r.info->text_len = text_len + trunc_msg_len;
-@@ -1995,10 +2054,9 @@ asmlinkage int vprintk_emit(int facility
+@@ -2013,10 +2072,9 @@ asmlinkage int vprintk_emit(int facility
boot_delay_msec(level);
printk_delay();
diff --git a/patches/tasklets_Use_static_inlines_for_stub_implementations.patch b/patches/0002-tasklets-Use-static-inlines-for-stub-implementations.patch
index 6b92dba44d51..197d513cfc1a 100644
--- a/patches/tasklets_Use_static_inlines_for_stub_implementations.patch
+++ b/patches/0002-tasklets-Use-static-inlines-for-stub-implementations.patch
@@ -1,12 +1,12 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: tasklets: Use static inlines for stub implementations
-Date: Fri, 13 Nov 2020 15:02:25 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:59 +0100
+Subject: [PATCH 02/19] tasklets: Use static inlines for stub implementations
Inlines exist for a reason.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20201204170805.535920431@linutronix.de
---
include/linux/interrupt.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/patches/0002-tracing-Inline-tracing_gen_ctx_flags.patch b/patches/0002-tracing-Inline-tracing_gen_ctx_flags.patch
new file mode 100644
index 000000000000..d38378578051
--- /dev/null
+++ b/patches/0002-tracing-Inline-tracing_gen_ctx_flags.patch
@@ -0,0 +1,173 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 3 Feb 2021 11:05:24 -0500
+Subject: [PATCH 2/4] tracing: Inline tracing_gen_ctx_flags()
+
+Inline tracing_gen_ctx_flags(). This allows to have one ifdef
+CONFIG_TRACE_IRQFLAGS_SUPPORT.
+
+This requires to move `trace_flag_type' so tracing_gen_ctx_flags() can
+use it.
+
+Link: https://lkml.kernel.org/r/20210125194511.3924915-3-bigeasy@linutronix.de
+
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Link: https://lkml.kernel.org/r/20210125140323.6b1ff20c@gandalf.local.home
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/trace_events.h | 54 ++++++++++++++++++++++++++++++++++++++++---
+ kernel/trace/trace.c | 38 +-----------------------------
+ kernel/trace/trace.h | 19 ---------------
+ 3 files changed, 53 insertions(+), 58 deletions(-)
+
+--- a/include/linux/trace_events.h
++++ b/include/linux/trace_events.h
+@@ -160,9 +160,57 @@ static inline void tracing_generic_entry
+ entry->flags = trace_ctx >> 16;
+ }
+
+-unsigned int tracing_gen_ctx_flags(unsigned long irqflags);
+-unsigned int tracing_gen_ctx(void);
+-unsigned int tracing_gen_ctx_dec(void);
++unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
++
++enum trace_flag_type {
++ TRACE_FLAG_IRQS_OFF = 0x01,
++ TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
++ TRACE_FLAG_NEED_RESCHED = 0x04,
++ TRACE_FLAG_HARDIRQ = 0x08,
++ TRACE_FLAG_SOFTIRQ = 0x10,
++ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
++ TRACE_FLAG_NMI = 0x40,
++};
++
++#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
++static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
++{
++ unsigned int irq_status = irqs_disabled_flags(irqflags) ?
++ TRACE_FLAG_IRQS_OFF : 0;
++ return tracing_gen_ctx_irq_test(irq_status);
++}
++static inline unsigned int tracing_gen_ctx(void)
++{
++ unsigned long irqflags;
++
++ local_save_flags(irqflags);
++ return tracing_gen_ctx_flags(irqflags);
++}
++#else
++
++static inline unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
++{
++ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
++}
++static inline unsigned int tracing_gen_ctx(void)
++{
++ return tracing_gen_ctx_irq_test(TRACE_FLAG_IRQS_NOSUPPORT);
++}
++#endif
++
++static inline unsigned int tracing_gen_ctx_dec(void)
++{
++ unsigned int trace_ctx;
++
++ trace_ctx = tracing_gen_ctx();
++ /*
++ * Subtract one from the preeption counter if preemption is enabled,
++ * see trace_event_buffer_reserve()for details.
++ */
++ if (IS_ENABLED(CONFIG_PREEMPTION))
++ trace_ctx--;
++ return trace_ctx;
++}
+
+ struct trace_event_file;
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2578,20 +2578,13 @@ enum print_line_t trace_handle_return(st
+ }
+ EXPORT_SYMBOL_GPL(trace_handle_return);
+
+-unsigned int tracing_gen_ctx_flags(unsigned long irqflags)
++unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
+ {
+- unsigned int trace_flags = 0;
++ unsigned int trace_flags = irqs_status;
+ unsigned int pc;
+
+ pc = preempt_count();
+
+-#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+- if (irqs_disabled_flags(irqflags))
+- trace_flags |= TRACE_FLAG_IRQS_OFF;
+-#else
+- trace_flags |= TRACE_FLAG_IRQS_NOSUPPORT;
+-#endif
+-
+ if (pc & NMI_MASK)
+ trace_flags |= TRACE_FLAG_NMI;
+ if (pc & HARDIRQ_MASK)
+@@ -2607,33 +2600,6 @@ unsigned int tracing_gen_ctx_flags(unsig
+ return (trace_flags << 16) | (pc & 0xff);
+ }
+
+-unsigned int tracing_gen_ctx(void)
+-{
+- unsigned long irqflags;
+-
+-#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
+- local_save_flags(irqflags);
+-#else
+- irqflags = 0;
+-#endif
+- return tracing_gen_ctx_flags(irqflags);
+-}
+-
+-unsigned int tracing_gen_ctx_dec(void)
+-{
+- unsigned int trace_ctx;
+-
+- trace_ctx = tracing_gen_ctx();
+-
+- /*
+- * Subtract one from the preeption counter if preemption is enabled,
+- * see trace_event_buffer_reserve()for details.
+- */
+- if (IS_ENABLED(CONFIG_PREEMPTION))
+- trace_ctx--;
+- return trace_ctx;
+-}
+-
+ struct ring_buffer_event *
+ trace_buffer_lock_reserve(struct trace_buffer *buffer,
+ int type,
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -136,25 +136,6 @@ struct kretprobe_trace_entry_head {
+ unsigned long ret_ip;
+ };
+
+-/*
+- * trace_flag_type is an enumeration that holds different
+- * states when a trace occurs. These are:
+- * IRQS_OFF - interrupts were disabled
+- * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
+- * NEED_RESCHED - reschedule is requested
+- * HARDIRQ - inside an interrupt handler
+- * SOFTIRQ - inside a softirq handler
+- */
+-enum trace_flag_type {
+- TRACE_FLAG_IRQS_OFF = 0x01,
+- TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
+- TRACE_FLAG_NEED_RESCHED = 0x04,
+- TRACE_FLAG_HARDIRQ = 0x08,
+- TRACE_FLAG_SOFTIRQ = 0x10,
+- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+- TRACE_FLAG_NMI = 0x40,
+-};
+-
+ #define TRACE_BUF_SIZE 1024
+
+ struct trace_array;
diff --git a/patches/0003-printk-use-seqcount_latch-for-clear_seq.patch b/patches/0003-printk-use-seqcount_latch-for-clear_seq.patch
index ecddf7c8cc3f..6f514aace938 100644
--- a/patches/0003-printk-use-seqcount_latch-for-clear_seq.patch
+++ b/patches/0003-printk-use-seqcount_latch-for-clear_seq.patch
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -1476,6 +1517,7 @@ static int syslog_print_all(char __user
+@@ -1494,6 +1535,7 @@ static int syslog_print_all(char __user
struct printk_info info;
unsigned int line_count;
struct printk_record r;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
char *text;
int len = 0;
u64 seq;
-@@ -1487,15 +1529,17 @@ static int syslog_print_all(char __user
+@@ -1505,15 +1547,17 @@ static int syslog_print_all(char __user
time = printk_time;
logbuf_lock_irq();
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (len <= size)
break;
len -= get_record_print_text_size(&info, line_count, true, time);
-@@ -1526,7 +1570,7 @@ static int syslog_print_all(char __user
+@@ -1544,7 +1588,7 @@ static int syslog_print_all(char __user
}
if (clear)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
kfree(text);
-@@ -1536,7 +1580,7 @@ static int syslog_print_all(char __user
+@@ -1554,7 +1598,7 @@ static int syslog_print_all(char __user
static void syslog_clear(void)
{
logbuf_lock_irq();
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
}
-@@ -3269,7 +3313,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3287,7 +3331,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
dumper->active = true;
logbuf_lock_irqsave(flags);
@@ -146,7 +146,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
dumper->next_seq = prb_next_seq(prb);
logbuf_unlock_irqrestore(flags);
-@@ -3476,7 +3520,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
+@@ -3494,7 +3538,7 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
*/
void kmsg_dump_rewind_nolock(struct kmsg_dumper *dumper)
{
diff --git a/patches/0003-tasklets-Provide-tasklet_disable_in_atomic.patch b/patches/0003-tasklets-Provide-tasklet_disable_in_atomic.patch
new file mode 100644
index 000000000000..03ac96b4942c
--- /dev/null
+++ b/patches/0003-tasklets-Provide-tasklet_disable_in_atomic.patch
@@ -0,0 +1,60 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 25 Jan 2021 11:45:00 +0100
+Subject: [PATCH 03/19] tasklets: Provide tasklet_disable_in_atomic()
+
+Replacing the spin wait loops in tasklet_unlock_wait() with
+wait_var_event() is not possible as a handful of tasklet_disable()
+invocations are happening in atomic context. All other invocations are in
+teardown paths which can sleep.
+
+Provide tasklet_disable_in_atomic() and tasklet_unlock_spin_wait() to
+convert the few atomic use cases over, which allows to change
+tasklet_disable() and tasklet_unlock_wait() in a later step.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/interrupt.h | 22 ++++++++++++++++++++++
+ 1 file changed, 22 insertions(+)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -671,10 +671,21 @@ static inline void tasklet_unlock_wait(s
+ while (test_bit(TASKLET_STATE_RUN, &t->state))
+ cpu_relax();
+ }
++
++/*
++ * Do not use in new code. There is no real reason to invoke this from
++ * atomic contexts.
++ */
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
++{
++ while (test_bit(TASKLET_STATE_RUN, &t->state))
++ cpu_relax();
++}
+ #else
+ static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
+ static inline void tasklet_unlock(struct tasklet_struct *t) { }
+ static inline void tasklet_unlock_wait(struct tasklet_struct *t) { }
++static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) { }
+ #endif
+
+ extern void __tasklet_schedule(struct tasklet_struct *t);
+@@ -699,6 +710,17 @@ static inline void tasklet_disable_nosyn
+ smp_mb__after_atomic();
+ }
+
++/*
++ * Do not use in new code. There is no real reason to invoke this from
++ * atomic contexts.
++ */
++static inline void tasklet_disable_in_atomic(struct tasklet_struct *t)
++{
++ tasklet_disable_nosync(t);
++ tasklet_unlock_spin_wait(t);
++ smp_mb();
++}
++
+ static inline void tasklet_disable(struct tasklet_struct *t)
+ {
+ tasklet_disable_nosync(t);
diff --git a/patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch b/patches/0003-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch
index 0cf23aad2ca6..788d07f1cdd8 100644
--- a/patches/0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch
+++ b/patches/0003-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Jan 2021 20:37:54 +0100
-Subject: [PATCH 2/3] tracing: Use in_serving_softirq() to deduct softirq
+Date: Wed, 3 Feb 2021 11:05:25 -0500
+Subject: [PATCH 3/4] tracing: Use in_serving_softirq() to deduct softirq
status.
PREEMPT_RT does not report "serving softirq" because the tracing core
@@ -18,6 +18,10 @@ The only difference I noticed by using in_serving_softirq() on
reading FLAG, jmp _tracing_gen_ctx_flags(). Without in_serving_softirq()
it inlined _tracing_gen_ctx_flags() into tracing_gen_ctx_flags().
+Link: https://lkml.kernel.org/r/20210125194511.3924915-4-bigeasy@linutronix.de
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/trace/trace.c | 3 +--
@@ -25,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2596,8 +2596,7 @@ unsigned int _tracing_gen_ctx_flags(unsi
+@@ -2589,8 +2589,7 @@ unsigned int tracing_gen_ctx_irq_test(un
trace_flags |= TRACE_FLAG_NMI;
if (pc & HARDIRQ_MASK)
trace_flags |= TRACE_FLAG_HARDIRQ;
diff --git a/patches/0004-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch b/patches/0004-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
new file mode 100644
index 000000000000..9b55f55a9f78
--- /dev/null
+++ b/patches/0004-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
@@ -0,0 +1,75 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 7 Dec 2020 12:39:58 +0100
+Subject: [PATCH 04/19] tasklets: Replace spin wait in tasklet_unlock_wait()
+
+tasklet_unlock_wait() spin waits for TASKLET_STATE_RUN to be cleared. This
+is wasting CPU cycles in a tight loop which is especially painful in a
+guest when the CPU running the tasklet is scheduled out.
+
+tasklet_unlock_wait() is invoked from tasklet_kill() and tasklet_disable()
+which are used in teardown paths and not performance critical at
+all. Replace the spin wait with wait_var_event().
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/interrupt.h | 13 ++-----------
+ kernel/softirq.c | 18 ++++++++++++++++++
+ 2 files changed, 20 insertions(+), 11 deletions(-)
+
+--- a/include/linux/interrupt.h
++++ b/include/linux/interrupt.h
+@@ -660,17 +660,8 @@ static inline int tasklet_trylock(struct
+ return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
+ }
+
+-static inline void tasklet_unlock(struct tasklet_struct *t)
+-{
+- smp_mb__before_atomic();
+- clear_bit(TASKLET_STATE_RUN, &(t)->state);
+-}
+-
+-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+-{
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
+- cpu_relax();
+-}
++void tasklet_unlock(struct tasklet_struct *t);
++void tasklet_unlock_wait(struct tasklet_struct *t);
+
+ /*
+ * Do not use in new code. There is no real reason to invoke this from
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -25,6 +25,7 @@
+ #include <linux/smpboot.h>
+ #include <linux/tick.h>
+ #include <linux/irq.h>
++#include <linux/wait_bit.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -619,6 +620,23 @@ void tasklet_kill(struct tasklet_struct
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
++#ifdef CONFIG_SMP
++void tasklet_unlock(struct tasklet_struct *t)
++{
++ smp_mb__before_atomic();
++ clear_bit(TASKLET_STATE_RUN, &t->state);
++ smp_mb__after_atomic();
++ wake_up_var(&t->state);
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock);
++
++void tasklet_unlock_wait(struct tasklet_struct *t)
++{
++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
++}
++EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
++#endif
++
+ void __init softirq_init(void)
+ {
+ int cpu;
diff --git a/patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch b/patches/0004-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch
index 10e686b4bf29..05e77c971d52 100644
--- a/patches/0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch
+++ b/patches/0004-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 12 Jan 2021 21:01:38 +0100
-Subject: [PATCH 3/3] tracing: Remove NULL check from current in
+Date: Wed, 3 Feb 2021 11:05:26 -0500
+Subject: [PATCH 4/4] tracing: Remove NULL check from current in
tracing_generic_entry_update().
I can't imagine when or why `current' would return a NULL pointer. This
@@ -11,6 +11,10 @@ but it doesn't give me hint why it was needed.
Assume `current' never returns a NULL pointer and remove the check.
+Link: https://lkml.kernel.org/r/20210125194511.3924915-5-bigeasy@linutronix.de
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/trace_events.h | 4 +---
diff --git a/patches/0005-printk-add-syslog_lock.patch b/patches/0005-printk-add-syslog_lock.patch
index aa80c498dfff..9064b82c7216 100644
--- a/patches/0005-printk-add-syslog_lock.patch
+++ b/patches/0005-printk-add-syslog_lock.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void latched_seq_write(struct latched_seq *ls, u64 val)
{
raw_write_seqcount_latch(&ls->latch);
-@@ -1462,7 +1466,9 @@ static int syslog_print(char __user *buf
+@@ -1480,7 +1484,9 @@ static int syslog_print(char __user *buf
size_t skip;
logbuf_lock_irq();
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
break;
}
-@@ -1492,6 +1498,7 @@ static int syslog_print(char __user *buf
+@@ -1510,6 +1516,7 @@ static int syslog_print(char __user *buf
syslog_partial += n;
} else
n = 0;
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
if (!n)
-@@ -1569,8 +1576,11 @@ static int syslog_print_all(char __user
+@@ -1587,8 +1594,11 @@ static int syslog_print_all(char __user
break;
}
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
kfree(text);
-@@ -1580,7 +1590,9 @@ static int syslog_print_all(char __user
+@@ -1598,7 +1608,9 @@ static int syslog_print_all(char __user
static void syslog_clear(void)
{
logbuf_lock_irq();
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
}
-@@ -1589,6 +1601,7 @@ int do_syslog(int type, char __user *buf
+@@ -1607,6 +1619,7 @@ int do_syslog(int type, char __user *buf
bool clear = false;
static int saved_console_loglevel = LOGLEVEL_DEFAULT;
int error;
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = check_syslog_permissions(type, source);
if (error)
-@@ -1606,8 +1619,14 @@ int do_syslog(int type, char __user *buf
+@@ -1624,8 +1637,14 @@ int do_syslog(int type, char __user *buf
return 0;
if (!access_ok(buf, len))
return -EFAULT;
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (error)
return error;
error = syslog_print(buf, len);
-@@ -1656,6 +1675,7 @@ int do_syslog(int type, char __user *buf
+@@ -1674,6 +1693,7 @@ int do_syslog(int type, char __user *buf
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
logbuf_lock_irq();
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (syslog_seq < prb_first_valid_seq(prb)) {
/* messages are gone, move to first one */
syslog_seq = prb_first_valid_seq(prb);
-@@ -1682,6 +1702,7 @@ int do_syslog(int type, char __user *buf
+@@ -1700,6 +1720,7 @@ int do_syslog(int type, char __user *buf
}
error -= syslog_partial;
}
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
logbuf_unlock_irq();
break;
/* Size of the log buffer */
-@@ -2925,7 +2946,12 @@ void register_console(struct console *ne
+@@ -2943,7 +2964,12 @@ void register_console(struct console *ne
*/
exclusive_console = newcon;
exclusive_console_stop_seq = console_seq;
diff --git a/patches/0005-tasklets-Replace-spin-wait-in-tasklet_kill.patch b/patches/0005-tasklets-Replace-spin-wait-in-tasklet_kill.patch
new file mode 100644
index 000000000000..9926169bdbeb
--- /dev/null
+++ b/patches/0005-tasklets-Replace-spin-wait-in-tasklet_kill.patch
@@ -0,0 +1,66 @@
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Mon, 7 Dec 2020 12:47:43 +0100
+Subject: [PATCH 05/19] tasklets: Replace spin wait in tasklet_kill()
+
+tasklet_kill() spin waits for TASKLET_STATE_SCHED to be cleared invoking
+yield() from inside the loop. yield() is an ill defined mechanism and the
+result might still be wasting CPU cycles in a tight loop which is
+especially painful in a guest when the CPU running the tasklet is scheduled
+out.
+
+tasklet_kill() is used in teardown paths and not performance critical at
+all. Replace the spin wait with wait_var_event().
+
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ kernel/softirq.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/kernel/softirq.c
++++ b/kernel/softirq.c
+@@ -530,6 +530,16 @@ void __tasklet_hi_schedule(struct taskle
+ }
+ EXPORT_SYMBOL(__tasklet_hi_schedule);
+
++static inline bool tasklet_clear_sched(struct tasklet_struct *t)
++{
++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
++ wake_up_var(&t->state);
++ return true;
++ }
++
++ return false;
++}
++
+ static void tasklet_action_common(struct softirq_action *a,
+ struct tasklet_head *tl_head,
+ unsigned int softirq_nr)
+@@ -549,8 +559,7 @@ static void tasklet_action_common(struct
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+- if (!test_and_clear_bit(TASKLET_STATE_SCHED,
+- &t->state))
++ if (!tasklet_clear_sched(t))
+ BUG();
+ if (t->use_callback)
+ t->callback(t);
+@@ -610,13 +619,11 @@ void tasklet_kill(struct tasklet_struct
+ if (in_interrupt())
+ pr_notice("Attempt to kill tasklet from interrupt\n");
+
+- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
+- do {
+- yield();
+- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
+- }
++ while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
++ wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
++
+ tasklet_unlock_wait(t);
+- clear_bit(TASKLET_STATE_SCHED, &t->state);
++ tasklet_clear_sched(t);
+ }
+ EXPORT_SYMBOL(tasklet_kill);
+
diff --git a/patches/0006-define-CONSOLE_LOG_MAX-in-printk.h.patch b/patches/0006-define-CONSOLE_LOG_MAX-in-printk.h.patch
index e7474f4cbcae..b737aa4a8c1b 100644
--- a/patches/0006-define-CONSOLE_LOG_MAX-in-printk.h.patch
+++ b/patches/0006-define-CONSOLE_LOG_MAX-in-printk.h.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define LOG_LEVEL(v) ((v) & 0x07)
#define LOG_FACILITY(v) ((v) >> 3 & 0xff)
-@@ -1455,11 +1455,11 @@ static int syslog_print(char __user *buf
+@@ -1473,11 +1473,11 @@ static int syslog_print(char __user *buf
char *text;
int len = 0;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (size > 0) {
size_t n;
-@@ -1530,7 +1530,7 @@ static int syslog_print_all(char __user
+@@ -1548,7 +1548,7 @@ static int syslog_print_all(char __user
u64 seq;
bool time;
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!text)
return -ENOMEM;
-@@ -1552,7 +1552,7 @@ static int syslog_print_all(char __user
+@@ -1570,7 +1570,7 @@ static int syslog_print_all(char __user
len -= get_record_print_text_size(&info, line_count, true, time);
}
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len = 0;
prb_for_each_record(seq, prb, seq, &r) {
-@@ -2194,8 +2194,6 @@ EXPORT_SYMBOL(printk);
+@@ -2212,8 +2212,6 @@ EXPORT_SYMBOL(printk);
#else /* CONFIG_PRINTK */
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define printk_time false
#define prb_read_valid(rb, seq, r) false
-@@ -2506,7 +2504,7 @@ static inline int can_use_console(void)
+@@ -2524,7 +2522,7 @@ static inline int can_use_console(void)
void console_unlock(void)
{
static char ext_text[CONSOLE_EXT_LOG_MAX];
diff --git a/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch b/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
index 26d919fdb2fc..b0ee73c83207 100644
--- a/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
+++ b/patches/0006-futex-Fix-bug-on-when-a-requeued-RT-task-times-out.patch
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1721,6 +1722,34 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1720,6 +1721,34 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
diff --git a/patches/tasklets_Prevent_kill_unlock_wait_deadlock_on_RT.patch b/patches/0006-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
index 029987bd40d4..71f39962e7e7 100644
--- a/patches/tasklets_Prevent_kill_unlock_wait_deadlock_on_RT.patch
+++ b/patches/0006-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
@@ -1,13 +1,11 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: tasklets: Prevent kill/unlock_wait deadlock on RT
-Date: Fri, 13 Nov 2020 15:02:26 +0100
+Date: Fri, 4 Dec 2020 18:02:00 +0100
+Subject: [PATCH 06/19] tasklets: Prevent tasklet_unlock_spin_wait() deadlock
+ on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-
-tasklet_kill() and tasklet_unlock_wait() spin and wait for the
-TASKLET_STATE_SCHED resp. TASKLET_STATE_RUN bit in the tasklet state to be
-cleared. This works on !RT nicely because the corresponding execution can
-only happen on a different CPU.
+tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in
+the tasklet state to be cleared. This works on !RT nicely because the
+corresponding execution can only happen on a different CPU.
On RT softirq processing is preemptible, therefore a task preempting the
softirq processing thread can spin forever.
@@ -20,10 +18,11 @@ then the local_bh_disable()/enable() pair is just a waste of processor
cycles.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/interrupt.h | 8 ++------
- kernel/softirq.c | 38 +++++++++++++++++++++++++++++++++++++-
- 2 files changed, 39 insertions(+), 7 deletions(-)
+ include/linux/interrupt.h | 13 ++-----------
+ kernel/softirq.c | 26 ++++++++++++++++++++++++++
+ 2 files changed, 28 insertions(+), 11 deletions(-)
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -36,28 +35,36 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int tasklet_trylock(struct tasklet_struct *t)
{
return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
-@@ -666,11 +666,7 @@ static inline void tasklet_unlock(struct
- clear_bit(TASKLET_STATE_RUN, &(t)->state);
- }
+@@ -662,16 +662,7 @@ static inline int tasklet_trylock(struct
--static inline void tasklet_unlock_wait(struct tasklet_struct *t)
+ void tasklet_unlock(struct tasklet_struct *t);
+ void tasklet_unlock_wait(struct tasklet_struct *t);
+-
+-/*
+- * Do not use in new code. There is no real reason to invoke this from
+- * atomic contexts.
+- */
+-static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t)
-{
-- while (test_bit(TASKLET_STATE_RUN, &(t)->state))
+- while (test_bit(TASKLET_STATE_RUN, &t->state))
- cpu_relax();
-}
-+void tasklet_unlock_wait(struct tasklet_struct *t);
++void tasklet_unlock_spin_wait(struct tasklet_struct *t);
#else
static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; }
static inline void tasklet_unlock(struct tasklet_struct *t) { }
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -818,6 +818,29 @@ void tasklet_init(struct tasklet_struct
+@@ -614,6 +614,32 @@ void tasklet_init(struct tasklet_struct
}
EXPORT_SYMBOL(tasklet_init);
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
-+
-+void tasklet_unlock_wait(struct tasklet_struct *t)
++/*
++ * Do not use in new code. There is no real reason to invoke this from
++ * atomic contexts.
++ */
++void tasklet_unlock_spin_wait(struct tasklet_struct *t)
+{
+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) {
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
@@ -75,31 +82,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ }
+ }
+}
-+EXPORT_SYMBOL(tasklet_unlock_wait);
++EXPORT_SYMBOL(tasklet_unlock_spin_wait);
+#endif
+
void tasklet_kill(struct tasklet_struct *t)
{
if (in_interrupt())
-@@ -825,7 +848,20 @@ void tasklet_kill(struct tasklet_struct
-
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
- do {
-- yield();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
-+ /*
-+ * Prevent a live lock when current
-+ * preempted soft interrupt processing or
-+ * prevents ksoftirqd from running. If the
-+ * tasklet runs on a different CPU then
-+ * this has no effect other than doing the
-+ * BH disable/enable dance for nothing.
-+ */
-+ local_bh_disable();
-+ local_bh_enable();
-+ } else {
-+ yield();
-+ }
- } while (test_bit(TASKLET_STATE_SCHED, &t->state));
- }
- tasklet_unlock_wait(t);
diff --git a/patches/0007-printk-track-limit-recursion.patch b/patches/0007-printk-track-limit-recursion.patch
index d0c80b20dce5..baa31f730da1 100644
--- a/patches/0007-printk-track-limit-recursion.patch
+++ b/patches/0007-printk-track-limit-recursion.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1906,6 +1906,65 @@ static void call_console_drivers(const c
+@@ -1924,6 +1924,65 @@ static void call_console_drivers(const c
}
}
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int printk_delay_msec __read_mostly;
static inline void printk_delay(void)
-@@ -2006,11 +2065,13 @@ int vprintk_store(int facility, int leve
+@@ -2024,11 +2083,13 @@ int vprintk_store(int facility, int leve
struct prb_reserved_entry e;
enum log_flags lflags = 0;
struct printk_record r;
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u64 ts_nsec;
/*
-@@ -2021,6 +2082,9 @@ int vprintk_store(int facility, int leve
+@@ -2039,6 +2100,9 @@ int vprintk_store(int facility, int leve
*/
ts_nsec = local_clock();
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The sprintf needs to come first since the syslog prefix might be
* passed in as a parameter. An extra byte must be reserved so that
-@@ -2058,7 +2122,8 @@ int vprintk_store(int facility, int leve
+@@ -2076,7 +2140,8 @@ int vprintk_store(int facility, int leve
prb_commit(&e);
}
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -2074,7 +2139,7 @@ int vprintk_store(int facility, int leve
+@@ -2092,7 +2157,7 @@ int vprintk_store(int facility, int leve
prb_rec_init_wr(&r, reserve_size + trunc_msg_len);
if (!prb_reserve(&e, prb, &r))
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* fill message */
-@@ -2096,7 +2161,10 @@ int vprintk_store(int facility, int leve
+@@ -2114,7 +2179,10 @@ int vprintk_store(int facility, int leve
else
prb_final_commit(&e);
diff --git a/patches/softirq_Add_RT_specific_softirq_accounting.patch b/patches/0007-softirq-Add-RT-specific-softirq-accounting.patch
index 759cb05d3602..e03780c97da5 100644
--- a/patches/softirq_Add_RT_specific_softirq_accounting.patch
+++ b/patches/0007-softirq-Add-RT-specific-softirq-accounting.patch
@@ -1,8 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: softirq: Add RT specific softirq accounting
-Date: Fri, 13 Nov 2020 15:02:19 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:52 +0100
+Subject: [PATCH 07/19] softirq: Add RT specific softirq accounting
RT requires the softirq processing and local bottomhalf disabled regions to
be preemptible. Using the normal preempt count based serialization is
@@ -18,8 +16,8 @@ Add a RT only counter to task struct and adjust the relevant macros in
preempt.h.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-V2: Rewrote changelog.
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
---
include/linux/hardirq.h | 1 +
include/linux/preempt.h | 6 +++++-
diff --git a/patches/0008-irqtime-Make-accounting-correct-on-RT.patch b/patches/0008-irqtime-Make-accounting-correct-on-RT.patch
new file mode 100644
index 000000000000..509d10421a6b
--- /dev/null
+++ b/patches/0008-irqtime-Make-accounting-correct-on-RT.patch
@@ -0,0 +1,46 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:53 +0100
+Subject: [PATCH 08/19] irqtime: Make accounting correct on RT
+
+vtime_account_irq and irqtime_account_irq() base checks on preempt_count()
+which fails on RT because preempt_count() does not contain the softirq
+accounting which is seperate on RT.
+
+These checks do not need the full preempt count as they only operate on the
+hard and softirq sections.
+
+Use irq_count() instead which provides the correct value on both RT and non
+RT kernels. The compiler is clever enough to fold the masking for !RT:
+
+ 99b: 65 8b 05 00 00 00 00 mov %gs:0x0(%rip),%eax
+ - 9a2: 25 ff ff ff 7f and $0x7fffffff,%eax
+ + 9a2: 25 00 ff ff 00 and $0xffff00,%eax
+
+Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
+---
+ kernel/sched/cputime.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -60,7 +60,7 @@ void irqtime_account_irq(struct task_str
+ cpu = smp_processor_id();
+ delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
+ irqtime->irq_start_time += delta;
+- pc = preempt_count() - offset;
++ pc = irq_count() - offset;
+
+ /*
+ * We do not account for softirq time from ksoftirqd here.
+@@ -421,7 +421,7 @@ void vtime_task_switch(struct task_struc
+
+ void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
+ {
+- unsigned int pc = preempt_count() - offset;
++ unsigned int pc = irq_count() - offset;
+
+ if (pc & HARDIRQ_OFFSET) {
+ vtime_account_hardirq(tsk);
diff --git a/patches/0008-printk-invoke-dumper-using-a-copy-of-the-iterator.patch b/patches/0008-printk-invoke-dumper-using-a-copy-of-the-iterator.patch
index d8144d54e6ee..f4358da33a6d 100644
--- a/patches/0008-printk-invoke-dumper-using-a-copy-of-the-iterator.patch
+++ b/patches/0008-printk-invoke-dumper-using-a-copy-of-the-iterator.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3389,6 +3389,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3407,6 +3407,7 @@ void kmsg_dump(enum kmsg_dump_reason rea
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
enum kmsg_dump_reason max_reason = dumper->max_reason;
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If client has not provided a specific max_reason, default
-@@ -3401,19 +3402,20 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3419,19 +3420,20 @@ void kmsg_dump(enum kmsg_dump_reason rea
if (reason > max_reason)
continue;
diff --git a/patches/0009-printk-remove-logbuf_lock.patch b/patches/0009-printk-remove-logbuf_lock.patch
index 8db85030bcab..484a1569b4c3 100644
--- a/patches/0009-printk-remove-logbuf_lock.patch
+++ b/patches/0009-printk-remove-logbuf_lock.patch
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
file->private_data = user;
return 0;
-@@ -1465,11 +1419,9 @@ static int syslog_print(char __user *buf
+@@ -1483,11 +1437,9 @@ static int syslog_print(char __user *buf
size_t n;
size_t skip;
@@ -183,7 +183,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
}
if (r.info->seq != syslog_seq) {
-@@ -1498,8 +1450,7 @@ static int syslog_print(char __user *buf
+@@ -1516,8 +1468,7 @@ static int syslog_print(char __user *buf
syslog_partial += n;
} else
n = 0;
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!n)
break;
-@@ -1524,6 +1475,7 @@ static int syslog_print_all(char __user
+@@ -1542,6 +1493,7 @@ static int syslog_print_all(char __user
struct printk_info info;
unsigned int line_count;
struct printk_record r;
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u64 clr_seq;
char *text;
int len = 0;
-@@ -1535,19 +1487,25 @@ static int syslog_print_all(char __user
+@@ -1553,19 +1505,25 @@ static int syslog_print_all(char __user
return -ENOMEM;
time = printk_time;
@@ -230,7 +230,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
len -= get_record_print_text_size(&info, line_count, true, time);
}
-@@ -1565,23 +1523,20 @@ static int syslog_print_all(char __user
+@@ -1583,23 +1541,20 @@ static int syslog_print_all(char __user
break;
}
@@ -256,7 +256,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kfree(text);
return len;
-@@ -1589,11 +1544,9 @@ static int syslog_print_all(char __user
+@@ -1607,11 +1562,9 @@ static int syslog_print_all(char __user
static void syslog_clear(void)
{
@@ -270,7 +270,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int do_syslog(int type, char __user *buf, int len, int source)
-@@ -1674,8 +1627,7 @@ int do_syslog(int type, char __user *buf
+@@ -1692,8 +1645,7 @@ int do_syslog(int type, char __user *buf
break;
/* Number of chars in the log buffer */
case SYSLOG_ACTION_SIZE_UNREAD:
@@ -280,7 +280,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (syslog_seq < prb_first_valid_seq(prb)) {
/* messages are gone, move to first one */
syslog_seq = prb_first_valid_seq(prb);
-@@ -1702,8 +1654,7 @@ int do_syslog(int type, char __user *buf
+@@ -1720,8 +1672,7 @@ int do_syslog(int type, char __user *buf
}
error -= syslog_partial;
}
@@ -290,7 +290,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
/* Size of the log buffer */
case SYSLOG_ACTION_SIZE_BUFFER:
-@@ -2171,9 +2122,9 @@ asmlinkage int vprintk_emit(int facility
+@@ -2189,9 +2140,9 @@ asmlinkage int vprintk_emit(int facility
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
@@ -301,7 +301,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Suppress unimportant messages after panic happens */
if (unlikely(suppress_printk))
-@@ -2619,7 +2570,6 @@ void console_unlock(void)
+@@ -2637,7 +2588,6 @@ void console_unlock(void)
size_t len;
printk_safe_enter_irqsave(flags);
@@ -309,7 +309,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skip:
if (!prb_read_valid(prb, console_seq, &r))
break;
-@@ -2663,7 +2613,6 @@ void console_unlock(void)
+@@ -2681,7 +2631,6 @@ void console_unlock(void)
console_msg_format & MSG_FORMAT_SYSLOG,
printk_time);
console_seq++;
@@ -317,7 +317,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* While actively printing out messages, if another printk()
-@@ -2690,8 +2639,6 @@ void console_unlock(void)
+@@ -2708,8 +2657,6 @@ void console_unlock(void)
console_locked = 0;
@@ -326,7 +326,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
up_console_sem();
/*
-@@ -2700,9 +2647,7 @@ void console_unlock(void)
+@@ -2718,9 +2665,7 @@ void console_unlock(void)
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
@@ -336,7 +336,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_safe_exit_irqrestore(flags);
if (retry && console_trylock())
-@@ -2766,13 +2711,8 @@ void console_flush_on_panic(enum con_flu
+@@ -2784,13 +2729,8 @@ void console_flush_on_panic(enum con_flu
console_trylock();
console_may_schedule = 0;
@@ -351,7 +351,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
}
-@@ -2999,9 +2939,7 @@ void register_console(struct console *ne
+@@ -3017,9 +2957,7 @@ void register_console(struct console *ne
/*
* console_unlock(); will print out the buffered messages
* for us.
@@ -362,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We're about to replay the log buffer. Only do this to the
* just-registered console to avoid excessive message spam to
* the already-registered consoles.
-@@ -3014,11 +2952,9 @@ void register_console(struct console *ne
+@@ -3032,11 +2970,9 @@ void register_console(struct console *ne
exclusive_console_stop_seq = console_seq;
/* Get a consistent copy of @syslog_seq. */
@@ -376,7 +376,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
console_unlock();
console_sysfs_notify();
-@@ -3384,7 +3320,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
+@@ -3402,7 +3338,6 @@ EXPORT_SYMBOL_GPL(kmsg_dump_reason_str);
void kmsg_dump(enum kmsg_dump_reason reason)
{
struct kmsg_dumper *dumper;
@@ -384,7 +384,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_lock();
list_for_each_entry_rcu(dumper, &dump_list, list) {
-@@ -3412,10 +3347,8 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3430,10 +3365,8 @@ void kmsg_dump(enum kmsg_dump_reason rea
INIT_LIST_HEAD(&dumper_copy.list);
dumper_copy.active = true;
@@ -395,7 +395,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_unlock();
}
-@@ -3496,14 +3429,7 @@ bool kmsg_dump_get_line_nolock(struct km
+@@ -3514,14 +3447,7 @@ bool kmsg_dump_get_line_nolock(struct km
bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
char *line, size_t size, size_t *len)
{
@@ -411,7 +411,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
-@@ -3532,7 +3458,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+@@ -3550,7 +3476,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
struct printk_info info;
unsigned int line_count;
struct printk_record r;
@@ -419,7 +419,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u64 seq;
u64 next_seq;
size_t l = 0;
-@@ -3544,17 +3469,14 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+@@ -3562,17 +3487,14 @@ bool kmsg_dump_get_buffer(struct kmsg_du
if (!dumper->active || !buf || !size)
goto out;
@@ -438,7 +438,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* calculate length of entire buffer */
seq = dumper->cur_seq;
-@@ -3594,7 +3516,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+@@ -3612,7 +3534,6 @@ bool kmsg_dump_get_buffer(struct kmsg_du
dumper->next_seq = next_seq;
ret = true;
@@ -446,7 +446,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out:
if (len)
*len = l;
-@@ -3628,11 +3549,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
+@@ -3646,11 +3567,7 @@ void kmsg_dump_rewind_nolock(struct kmsg
*/
void kmsg_dump_rewind(struct kmsg_dumper *dumper)
{
diff --git a/patches/softirq_Move_various_protections_into_inline_helpers.patch b/patches/0009-softirq-Move-various-protections-into-inline-helpers.patch
index 959a06de1166..b592f8385ccf 100644
--- a/patches/softirq_Move_various_protections_into_inline_helpers.patch
+++ b/patches/0009-softirq-Move-various-protections-into-inline-helpers.patch
@@ -1,21 +1,21 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: softirq: Move various protections into inline helpers
-Date: Fri, 13 Nov 2020 15:02:20 +0100
+Date: Fri, 4 Dec 2020 18:01:54 +0100
+Subject: [PATCH 09/19] softirq: Move various protections into inline helpers
To allow reuse of the bulk of softirq processing code for RT and to avoid
#ifdeffery all over the place, split protections for various code sections
out into inline helpers so the RT variant can just replace them in one go.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-V2: Adapt to Frederics rework
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
---
kernel/softirq.c | 39 ++++++++++++++++++++++++++++++++-------
1 file changed, 32 insertions(+), 7 deletions(-)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -204,6 +204,32 @@ void __local_bh_enable_ip(unsigned long
+@@ -205,6 +205,32 @@ void __local_bh_enable_ip(unsigned long
}
EXPORT_SYMBOL(__local_bh_enable_ip);
@@ -48,7 +48,7 @@ V2: Adapt to Frederics rework
static inline void invoke_softirq(void)
{
if (ksoftirqd_running(local_softirq_pending()))
-@@ -316,7 +342,7 @@ asmlinkage __visible void __softirq_entr
+@@ -317,7 +343,7 @@ asmlinkage __visible void __softirq_entr
pending = local_softirq_pending();
@@ -57,7 +57,7 @@ V2: Adapt to Frederics rework
in_hardirq = lockdep_softirq_start();
account_softirq_enter(current);
-@@ -367,8 +393,7 @@ asmlinkage __visible void __softirq_entr
+@@ -368,8 +394,7 @@ asmlinkage __visible void __softirq_entr
account_softirq_exit(current);
lockdep_softirq_end(in_hardirq);
@@ -67,7 +67,7 @@ V2: Adapt to Frederics rework
current_restore_flags(old_flags, PF_MEMALLOC);
}
-@@ -463,7 +488,7 @@ inline void raise_softirq_irqoff(unsigne
+@@ -464,7 +489,7 @@ inline void raise_softirq_irqoff(unsigne
* Otherwise we wake up ksoftirqd to make sure we
* schedule the softirq soon.
*/
@@ -76,7 +76,7 @@ V2: Adapt to Frederics rework
wakeup_softirqd();
}
-@@ -641,18 +666,18 @@ static int ksoftirqd_should_run(unsigned
+@@ -692,18 +717,18 @@ static int ksoftirqd_should_run(unsigned
static void run_ksoftirqd(unsigned int cpu)
{
diff --git a/patches/softirq_Make_softirq_control_and_processing_RT_aware.patch b/patches/0010-softirq-Make-softirq-control-and-processing-RT-aware.patch
index a4c56cdf50d8..9fc91fb9dfd6 100644
--- a/patches/softirq_Make_softirq_control_and_processing_RT_aware.patch
+++ b/patches/0010-softirq-Make-softirq-control-and-processing-RT-aware.patch
@@ -1,8 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: softirq: Make softirq control and processing RT aware
-Date: Fri, 13 Nov 2020 15:02:21 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:55 +0100
+Subject: [PATCH 10/19] softirq: Make softirq control and processing RT aware
Provide a local lock based serialization for soft interrupts on RT which
allows the local_bh_disabled() sections and servicing soft interrupts to be
@@ -12,8 +10,8 @@ Provide the necessary inline helpers which allow to reuse the bulk of the
softirq processing code.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
-V2: Adjusted to Frederic's changes
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
---
include/linux/bottom_half.h | 2
kernel/softirq.c | 188 ++++++++++++++++++++++++++++++++++++++++++--
@@ -40,7 +38,7 @@ V2: Adjusted to Frederic's changes
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
-@@ -100,20 +101,189 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_contex
+@@ -101,20 +102,189 @@ EXPORT_PER_CPU_SYMBOL_GPL(hardirq_contex
#endif
/*
@@ -156,7 +154,7 @@ V2: Adjusted to Frederic's changes
+ lockdep_assert_irqs_enabled();
+
+ local_irq_save(flags);
-+ curcnt = this_cpu_read(softirq_ctrl.cnt);
++ curcnt = __this_cpu_read(softirq_ctrl.cnt);
+
+ /*
+ * If this is not reenabling soft interrupts, no point in trying to
@@ -236,7 +234,7 @@ V2: Adjusted to Frederic's changes
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
{
unsigned long flags;
-@@ -274,6 +444,8 @@ asmlinkage __visible void do_softirq(voi
+@@ -275,6 +445,8 @@ asmlinkage __visible void do_softirq(voi
local_irq_restore(flags);
}
@@ -245,7 +243,7 @@ V2: Adjusted to Frederic's changes
/*
* We restart softirq processing for at most MAX_SOFTIRQ_RESTART times,
* but break the loop if need_resched() is set or after 2 ms.
-@@ -378,8 +550,10 @@ asmlinkage __visible void __softirq_entr
+@@ -379,8 +551,10 @@ asmlinkage __visible void __softirq_entr
pending >>= softirq_bit;
}
diff --git a/patches/0011-printk-refactor-kmsg_dump_get_buffer.patch b/patches/0011-printk-refactor-kmsg_dump_get_buffer.patch
index 26ac0194afad..8731faabf423 100644
--- a/patches/0011-printk-refactor-kmsg_dump_get_buffer.patch
+++ b/patches/0011-printk-refactor-kmsg_dump_get_buffer.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3450,19 +3450,17 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
+@@ -3468,19 +3468,17 @@ EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
* read.
*/
bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!dumper->active || !buf || !size)
goto out;
-@@ -3475,47 +3473,50 @@ bool kmsg_dump_get_buffer(struct kmsg_du
+@@ -3493,47 +3491,50 @@ bool kmsg_dump_get_buffer(struct kmsg_du
if (dumper->cur_seq >= dumper->next_seq)
goto out;
diff --git a/patches/tick_sched_Prevent_false_positive_softirq_pending_warnings_on_RT.patch b/patches/0011-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
index 5f25f1289c64..32053d8c7fdf 100644
--- a/patches/tick_sched_Prevent_false_positive_softirq_pending_warnings_on_RT.patch
+++ b/patches/0011-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
@@ -1,8 +1,7 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: tick/sched: Prevent false positive softirq pending warnings on RT
-Date: Fri, 13 Nov 2020 15:02:22 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:56 +0100
+Subject: [PATCH 11/19] tick/sched: Prevent false positive softirq pending
+ warnings on RT
On RT a task which has soft interrupts disabled can block on a lock and
schedule out to idle while soft interrupts are pending. This triggers the
@@ -15,6 +14,8 @@ To prevent that check the per CPU state which indicates that a scheduled
out task has soft interrupts disabled.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Frederic Weisbecker <frederic@kernel.org>
---
include/linux/bottom_half.h | 6 ++++++
kernel/softirq.c | 15 +++++++++++++++
@@ -36,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -138,6 +138,21 @@ static DEFINE_PER_CPU(struct softirq_ctr
+@@ -139,6 +139,21 @@ static DEFINE_PER_CPU(struct softirq_ctr
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};
@@ -52,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ */
+bool local_bh_blocked(void)
+{
-+ return this_cpu_read(softirq_ctrl.cnt) != 0;
++ return __this_cpu_read(softirq_ctrl.cnt) != 0;
+}
+
void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)
diff --git a/patches/0012-console-add-write_atomic-interface.patch b/patches/0012-console-add-write_atomic-interface.patch
index ae9c84d0cd8c..e180b81db014 100644
--- a/patches/0012-console-add-write_atomic-interface.patch
+++ b/patches/0012-console-add-write_atomic-interface.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* _LINUX_CONSOLE_H */
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3552,3 +3552,103 @@ void kmsg_dump_rewind(struct kmsg_dumper
+@@ -3570,3 +3570,103 @@ void kmsg_dump_rewind(struct kmsg_dumper
EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
#endif
diff --git a/patches/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch b/patches/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
index 682db58002dc..e333c4ef5fbf 100644
--- a/patches/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ b/patches/0012-locking-rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This is the control structure for tasks blocked on a rt_mutex,
-@@ -154,6 +155,12 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -153,6 +154,12 @@ extern bool __rt_mutex_futex_unlock(stru
struct wake_q_head *wqh);
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
diff --git a/patches/rcu_Prevent_false_positive_softirq_warning_on_RT.patch b/patches/0012-rcu-Prevent-false-positive-softirq-warning-on-RT.patch
index 1ac6defa709e..979c0336d0a2 100644
--- a/patches/rcu_Prevent_false_positive_softirq_warning_on_RT.patch
+++ b/patches/0012-rcu-Prevent-false-positive-softirq-warning-on-RT.patch
@@ -1,21 +1,21 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Subject: rcu: Prevent false positive softirq warning on RT
-Date: Fri, 13 Nov 2020 15:02:23 +0100
-
-From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 4 Dec 2020 18:01:57 +0100
+Subject: [PATCH 12/19] rcu: Prevent false positive softirq warning on RT
Soft interrupt disabled sections can legitimately be preempted or schedule
out when blocking on a lock on RT enabled kernels so the RCU preempt check
warning has to be disabled for RT kernels.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
---
include/linux/rcupdate.h | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -319,7 +319,8 @@ static inline void rcu_preempt_sleep_che
+@@ -325,7 +325,8 @@ static inline void rcu_preempt_sleep_che
#define rcu_sleep_check() \
do { \
rcu_preempt_sleep_check(); \
diff --git a/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch b/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
index 11d1fdbedf82..75ba6a559f93 100644
--- a/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
+++ b/patches/0013-locking-rtmutex-export-lockdep-less-version-of-rt_mu.patch
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -157,6 +157,9 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -156,6 +156,9 @@ extern bool __rt_mutex_futex_unlock(stru
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
/* RW semaphore special interface */
diff --git a/patches/0013-net-jme-Replace-link-change-tasklet-with-a-worker.patch b/patches/0013-net-jme-Replace-link-change-tasklet-with-a-worker.patch
new file mode 100644
index 000000000000..33a5d5a9bc6b
--- /dev/null
+++ b/patches/0013-net-jme-Replace-link-change-tasklet-with-a-worker.patch
@@ -0,0 +1,78 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Jan 2021 16:12:02 +0100
+Subject: [PATCH 13/19] net: jme: Replace link-change tasklet with a worker
+
+The link change tasklet disables taskelts for tx/rx processing while
+upating hw parameters and then enables the tasklets again.
+
+This update can also be pushed into a workqueue where it can be
+performed in preemptible context. This allows tasklet_disable() to
+become sleeping.
+
+Replace the linkch_task tasklet with a workqueue.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/jme.c | 10 +++++-----
+ drivers/net/ethernet/jme.h | 2 +-
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/ethernet/jme.c
++++ b/drivers/net/ethernet/jme.c
+@@ -1265,9 +1265,9 @@ jme_stop_shutdown_timer(struct jme_adapt
+ jwrite32f(jme, JME_APMC, apmc);
+ }
+
+-static void jme_link_change_tasklet(struct tasklet_struct *t)
++static void jme_link_change_work(struct work_struct *work)
+ {
+- struct jme_adapter *jme = from_tasklet(jme, t, linkch_task);
++ struct jme_adapter *jme = container_of(work, struct jme_adapter, linkch_task);
+ struct net_device *netdev = jme->dev;
+ int rc;
+
+@@ -1510,7 +1510,7 @@ jme_intr_msi(struct jme_adapter *jme, u3
+ * all other events are ignored
+ */
+ jwrite32(jme, JME_IEVE, intrstat);
+- tasklet_schedule(&jme->linkch_task);
++ schedule_work(&jme->linkch_task);
+ goto out_reenable;
+ }
+
+@@ -1832,7 +1832,6 @@ jme_open(struct net_device *netdev)
+ jme_clear_pm_disable_wol(jme);
+ JME_NAPI_ENABLE(jme);
+
+- tasklet_setup(&jme->linkch_task, jme_link_change_tasklet);
+ tasklet_setup(&jme->txclean_task, jme_tx_clean_tasklet);
+ tasklet_setup(&jme->rxclean_task, jme_rx_clean_tasklet);
+ tasklet_setup(&jme->rxempty_task, jme_rx_empty_tasklet);
+@@ -1920,7 +1919,7 @@ jme_close(struct net_device *netdev)
+
+ JME_NAPI_DISABLE(jme);
+
+- tasklet_kill(&jme->linkch_task);
++ cancel_work_sync(&jme->linkch_task);
+ tasklet_kill(&jme->txclean_task);
+ tasklet_kill(&jme->rxclean_task);
+ tasklet_kill(&jme->rxempty_task);
+@@ -3035,6 +3034,7 @@ jme_init_one(struct pci_dev *pdev,
+ atomic_set(&jme->rx_empty, 1);
+
+ tasklet_setup(&jme->pcc_task, jme_pcc_tasklet);
++ INIT_WORK(&jme->linkch_task, jme_link_change_work);
+ jme->dpi.cur = PCC_P1;
+
+ jme->reg_ghc = 0;
+--- a/drivers/net/ethernet/jme.h
++++ b/drivers/net/ethernet/jme.h
+@@ -411,7 +411,7 @@ struct jme_adapter {
+ struct tasklet_struct rxempty_task;
+ struct tasklet_struct rxclean_task;
+ struct tasklet_struct txclean_task;
+- struct tasklet_struct linkch_task;
++ struct work_struct linkch_task;
+ struct tasklet_struct pcc_task;
+ unsigned long flags;
+ u32 reg_txcs;
diff --git a/patches/0014-net-arcnet-Fix-RESET-flag-handling.patch b/patches/0014-net-arcnet-Fix-RESET-flag-handling.patch
new file mode 100644
index 000000000000..30319da1bc83
--- /dev/null
+++ b/patches/0014-net-arcnet-Fix-RESET-flag-handling.patch
@@ -0,0 +1,287 @@
+From: "Ahmed S. Darwish" <a.darwish@linutronix.de>
+Date: Thu, 28 Jan 2021 20:48:02 +0100
+Subject: [PATCH 14/19] net: arcnet: Fix RESET flag handling
+
+The main arcnet interrupt handler calls arcnet_close() then
+arcnet_open(), if the RESET status flag is encountered.
+
+This is invalid:
+
+ 1) In general, interrupt handlers should never call ->ndo_stop() and
+ ->ndo_open() functions. They are usually full of blocking calls and
+ other methods that are expected to be called only from drivers
+ init and exit code paths.
+
+ 2) arcnet_close() contains a del_timer_sync(). If the irq handler
+ interrupts the to-be-deleted timer, del_timer_sync() will just loop
+ forever.
+
+ 3) arcnet_close() also calls tasklet_kill(), which has a warning if
+ called from irq context.
+
+ 4) For device reset, the sequence "arcnet_close(); arcnet_open();" is
+ not complete. Some children arcnet drivers have special init/exit
+ code sequences, which then embed a call to arcnet_open() and
+ arcnet_close() accordingly. Check drivers/net/arcnet/com20020.c.
+
+Run the device RESET sequence from a scheduled workqueue instead.
+
+Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/arcnet/arc-rimi.c | 4 +-
+ drivers/net/arcnet/arcdevice.h | 6 +++
+ drivers/net/arcnet/arcnet.c | 66 +++++++++++++++++++++++++++++++++++---
+ drivers/net/arcnet/com20020-isa.c | 4 +-
+ drivers/net/arcnet/com20020-pci.c | 2 -
+ drivers/net/arcnet/com20020_cs.c | 2 -
+ drivers/net/arcnet/com90io.c | 4 +-
+ drivers/net/arcnet/com90xx.c | 4 +-
+ 8 files changed, 78 insertions(+), 14 deletions(-)
+
+--- a/drivers/net/arcnet/arc-rimi.c
++++ b/drivers/net/arcnet/arc-rimi.c
+@@ -332,7 +332,7 @@ static int __init arc_rimi_init(void)
+ dev->irq = 9;
+
+ if (arcrimi_probe(dev)) {
+- free_netdev(dev);
++ free_arcdev(dev);
+ return -EIO;
+ }
+
+@@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void)
+ iounmap(lp->mem_start);
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ free_irq(dev->irq, dev);
+- free_netdev(dev);
++ free_arcdev(dev);
+ }
+
+ #ifndef MODULE
+--- a/drivers/net/arcnet/arcdevice.h
++++ b/drivers/net/arcnet/arcdevice.h
+@@ -298,6 +298,10 @@ struct arcnet_local {
+
+ int excnak_pending; /* We just got an excesive nak interrupt */
+
++ /* RESET flag handling */
++ int reset_in_progress;
++ struct work_struct reset_work;
++
+ struct {
+ uint16_t sequence; /* sequence number (incs with each packet) */
+ __be16 aborted_seq;
+@@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *
+
+ void arcnet_unregister_proto(struct ArcProto *proto);
+ irqreturn_t arcnet_interrupt(int irq, void *dev_id);
++
+ struct net_device *alloc_arcdev(const char *name);
++void free_arcdev(struct net_device *dev);
+
+ int arcnet_open(struct net_device *dev);
+ int arcnet_close(struct net_device *dev);
+--- a/drivers/net/arcnet/arcnet.c
++++ b/drivers/net/arcnet/arcnet.c
+@@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_li
+ struct arcnet_local *lp = from_timer(lp, t, timer);
+ struct net_device *dev = lp->dev;
+
+- if (!netif_carrier_ok(dev)) {
++ spin_lock_irq(&lp->lock);
++
++ if (!lp->reset_in_progress && !netif_carrier_ok(dev)) {
+ netif_carrier_on(dev);
+ netdev_info(dev, "link up\n");
+ }
++
++ spin_unlock_irq(&lp->lock);
++}
++
++static void reset_device_work(struct work_struct *work)
++{
++ struct arcnet_local *lp;
++ struct net_device *dev;
++
++ lp = container_of(work, struct arcnet_local, reset_work);
++ dev = lp->dev;
++
++ /* Do not bring the network interface back up if an ifdown
++ * was already done.
++ */
++ if (!netif_running(dev) || !lp->reset_in_progress)
++ return;
++
++ rtnl_lock();
++
++ /* Do another check, in case of an ifdown that was triggered in
++ * the small race window between the exit condition above and
++ * acquiring RTNL.
++ */
++ if (!netif_running(dev) || !lp->reset_in_progress)
++ goto out;
++
++ dev_close(dev);
++ dev_open(dev, NULL);
++
++out:
++ rtnl_unlock();
+ }
+
+ static void arcnet_reply_tasklet(unsigned long data)
+@@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const ch
+ lp->dev = dev;
+ spin_lock_init(&lp->lock);
+ timer_setup(&lp->timer, arcnet_timer, 0);
++ INIT_WORK(&lp->reset_work, reset_device_work);
+ }
+
+ return dev;
+ }
+ EXPORT_SYMBOL(alloc_arcdev);
+
++void free_arcdev(struct net_device *dev)
++{
++ struct arcnet_local *lp = netdev_priv(dev);
++
++ /* Do not cancel this at ->ndo_close(), as the workqueue itself
++ * indirectly calls the ifdown path through dev_close().
++ */
++ cancel_work_sync(&lp->reset_work);
++ free_netdev(dev);
++}
++EXPORT_SYMBOL(free_arcdev);
++
+ /* Open/initialize the board. This is called sometime after booting when
+ * the 'ifconfig' program is run.
+ *
+@@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev)
+
+ /* shut down the card */
+ lp->hw.close(dev);
++
++ /* reset counters */
++ lp->reset_in_progress = 0;
++
+ module_put(lp->hw.owner);
+ return 0;
+ }
+@@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, vo
+
+ spin_lock_irqsave(&lp->lock, flags);
+
++ if (lp->reset_in_progress)
++ goto out;
++
+ /* RESET flag was enabled - if device is not running, we must
+ * clear it right away (but nothing else).
+ */
+@@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, vo
+ if (status & RESETflag) {
+ arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n",
+ status);
+- arcnet_close(dev);
+- arcnet_open(dev);
++
++ lp->reset_in_progress = 1;
++ netif_stop_queue(dev);
++ netif_carrier_off(dev);
++ schedule_work(&lp->reset_work);
+
+ /* get out of the interrupt handler! */
+- break;
++ goto out;
+ }
+ /* RX is inhibited - we must have received something.
+ * Prepare to receive into the next buffer.
+@@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, vo
+ udelay(1);
+ lp->hw.intmask(dev, lp->intmask);
+
++out:
+ spin_unlock_irqrestore(&lp->lock, flags);
+ return retval;
+ }
+--- a/drivers/net/arcnet/com20020-isa.c
++++ b/drivers/net/arcnet/com20020-isa.c
+@@ -169,7 +169,7 @@ static int __init com20020_init(void)
+ dev->irq = 9;
+
+ if (com20020isa_probe(dev)) {
+- free_netdev(dev);
++ free_arcdev(dev);
+ return -EIO;
+ }
+
+@@ -182,7 +182,7 @@ static void __exit com20020_exit(void)
+ unregister_netdev(my_dev);
+ free_irq(my_dev->irq, my_dev);
+ release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE);
+- free_netdev(my_dev);
++ free_arcdev(my_dev);
+ }
+
+ #ifndef MODULE
+--- a/drivers/net/arcnet/com20020-pci.c
++++ b/drivers/net/arcnet/com20020-pci.c
+@@ -291,7 +291,7 @@ static void com20020pci_remove(struct pc
+
+ unregister_netdev(dev);
+ free_irq(dev->irq, dev);
+- free_netdev(dev);
++ free_arcdev(dev);
+ }
+ }
+
+--- a/drivers/net/arcnet/com20020_cs.c
++++ b/drivers/net/arcnet/com20020_cs.c
+@@ -177,7 +177,7 @@ static void com20020_detach(struct pcmci
+ dev = info->dev;
+ if (dev) {
+ dev_dbg(&link->dev, "kfree...\n");
+- free_netdev(dev);
++ free_arcdev(dev);
+ }
+ dev_dbg(&link->dev, "kfree2...\n");
+ kfree(info);
+--- a/drivers/net/arcnet/com90io.c
++++ b/drivers/net/arcnet/com90io.c
+@@ -396,7 +396,7 @@ static int __init com90io_init(void)
+ err = com90io_probe(dev);
+
+ if (err) {
+- free_netdev(dev);
++ free_arcdev(dev);
+ return err;
+ }
+
+@@ -419,7 +419,7 @@ static void __exit com90io_exit(void)
+
+ free_irq(dev->irq, dev);
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+- free_netdev(dev);
++ free_arcdev(dev);
+ }
+
+ module_init(com90io_init)
+--- a/drivers/net/arcnet/com90xx.c
++++ b/drivers/net/arcnet/com90xx.c
+@@ -554,7 +554,7 @@ static int __init com90xx_found(int ioad
+ err_release_mem:
+ release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1);
+ err_free_dev:
+- free_netdev(dev);
++ free_arcdev(dev);
+ return -EIO;
+ }
+
+@@ -672,7 +672,7 @@ static void __exit com90xx_exit(void)
+ release_region(dev->base_addr, ARCNET_TOTAL_SIZE);
+ release_mem_region(dev->mem_start,
+ dev->mem_end - dev->mem_start + 1);
+- free_netdev(dev);
++ free_arcdev(dev);
+ }
+ }
+
diff --git a/patches/0014-printk-relocate-printk_delay-and-vprintk_default.patch b/patches/0014-printk-relocate-printk_delay-and-vprintk_default.patch
index 8025fab17ca5..c50430ac8539 100644
--- a/patches/0014-printk-relocate-printk_delay-and-vprintk_default.patch
+++ b/patches/0014-printk-relocate-printk_delay-and-vprintk_default.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1670,6 +1670,20 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1688,6 +1688,20 @@ SYSCALL_DEFINE3(syslog, int, type, char
return do_syslog(type, buf, len, SYSLOG_FROM_READER);
}
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Special console_lock variants that help to reduce the risk of soft-lockups.
* They allow to pass console_lock to another printk() call using a busy wait.
-@@ -1913,20 +1927,6 @@ static void printk_exit_irqrestore(unsig
+@@ -1931,20 +1945,6 @@ static void printk_exit_irqrestore(unsig
local_irq_restore(flags);
}
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline u32 printk_caller_id(void)
{
return in_task() ? task_pid_nr(current) :
-@@ -2162,18 +2162,18 @@ asmlinkage int vprintk_emit(int facility
+@@ -2180,18 +2180,18 @@ asmlinkage int vprintk_emit(int facility
}
EXPORT_SYMBOL(vprintk_emit);
diff --git a/patches/0015-net-sundance-Use-tasklet_disable_in_atomic.patch b/patches/0015-net-sundance-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 000000000000..73a621c8caf1
--- /dev/null
+++ b/patches/0015-net-sundance-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,25 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Jan 2021 15:44:01 +0100
+Subject: [PATCH 15/19] net: sundance: Use tasklet_disable_in_atomic().
+
+tasklet_disable() is used in the timer callback.
+
+Replace it with tasklet_disable_in_atomic() so it can be used in atomic
+context while tasklet_disable() may sleep.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/dlink/sundance.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/dlink/sundance.c
++++ b/drivers/net/ethernet/dlink/sundance.c
+@@ -963,7 +963,7 @@ static void tx_timeout(struct net_device
+ unsigned long flag;
+
+ netif_stop_queue(dev);
+- tasklet_disable(&np->tx_tasklet);
++ tasklet_disable_in_atomic(&np->tx_tasklet);
+ iowrite16(0, ioaddr + IntrEnable);
+ printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x "
+ "TxFrameId %2.2x,"
diff --git a/patches/0015-printk-combine-boot_delay_msec-into-printk_delay.patch b/patches/0015-printk-combine-boot_delay_msec-into-printk_delay.patch
index 51081a23944b..d0db0d1ca2f8 100644
--- a/patches/0015-printk-combine-boot_delay_msec-into-printk_delay.patch
+++ b/patches/0015-printk-combine-boot_delay_msec-into-printk_delay.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1672,8 +1672,10 @@ SYSCALL_DEFINE3(syslog, int, type, char
+@@ -1690,8 +1690,10 @@ SYSCALL_DEFINE3(syslog, int, type, char
int printk_delay_msec __read_mostly;
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(printk_delay_msec)) {
int m = printk_delay_msec;
-@@ -2132,8 +2134,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -2150,8 +2152,7 @@ asmlinkage int vprintk_emit(int facility
in_sched = true;
}
diff --git a/patches/0016-ath9k-Use-tasklet_disable_in_atomic.patch b/patches/0016-ath9k-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 000000000000..262f9bba2551
--- /dev/null
+++ b/patches/0016-ath9k-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,32 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Jan 2021 16:33:45 +0100
+Subject: [PATCH 16/19] ath9k: Use tasklet_disable_in_atomic().
+
+All caller of ath9k_beacon_ensure_primary_slot() are preemptible /
+acquire a mutex except for the callchain:
+
+ spin_lock_bh(&sc->sc_pcu_lock);
+ ath_complete_reset()
+ -> ath9k_calculate_summary_state()
+ -> ath9k_beacon_ensure_primary_slot()
+
+which is always invoked in atomic context due to the spin lock.
+I have no idea how to get around it so convert it to
+tasklet_disable_in_atomic().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/wireless/ath/ath9k/beacon.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/wireless/ath/ath9k/beacon.c
++++ b/drivers/net/wireless/ath/ath9k/beacon.c
+@@ -251,7 +251,7 @@ void ath9k_beacon_ensure_primary_slot(st
+ int first_slot = ATH_BCBUF;
+ int slot;
+
+- tasklet_disable(&sc->bcon_tasklet);
++ tasklet_disable_in_atomic(&sc->bcon_tasklet);
+
+ /* Find first taken slot. */
+ for (slot = 0; slot < ATH_BCBUF; slot++) {
diff --git a/patches/0016-printk-change-console_seq-to-atomic64_t.patch b/patches/0016-printk-change-console_seq-to-atomic64_t.patch
index 463f58cbb701..ecf3ad96682b 100644
--- a/patches/0016-printk-change-console_seq-to-atomic64_t.patch
+++ b/patches/0016-printk-change-console_seq-to-atomic64_t.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct latched_seq {
seqcount_latch_t latch;
u64 val[2];
-@@ -2217,7 +2218,7 @@ EXPORT_SYMBOL(printk);
+@@ -2235,7 +2236,7 @@ EXPORT_SYMBOL(printk);
#define prb_first_valid_seq(rb) 0
static u64 syslog_seq;
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 exclusive_console_stop_seq;
static unsigned long console_dropped;
-@@ -2526,6 +2527,7 @@ void console_unlock(void)
+@@ -2544,6 +2545,7 @@ void console_unlock(void)
bool do_cond_resched, retry;
struct printk_info info;
struct printk_record r;
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console_suspended) {
up_console_sem();
-@@ -2569,12 +2571,14 @@ void console_unlock(void)
+@@ -2587,12 +2589,14 @@ void console_unlock(void)
printk_safe_enter_irqsave(flags);
skip:
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (suppress_message_printing(r.info->level)) {
-@@ -2583,13 +2587,13 @@ void console_unlock(void)
+@@ -2601,13 +2605,13 @@ void console_unlock(void)
* directly to the console when we received it, and
* record that has level above the console loglevel.
*/
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
exclusive_console = NULL;
}
-@@ -2610,7 +2614,7 @@ void console_unlock(void)
+@@ -2628,7 +2632,7 @@ void console_unlock(void)
len = record_print_text(&r,
console_msg_format & MSG_FORMAT_SYSLOG,
printk_time);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* While actively printing out messages, if another printk()
-@@ -2645,7 +2649,7 @@ void console_unlock(void)
+@@ -2663,7 +2667,7 @@ void console_unlock(void)
* there's a new owner and the console_unlock() from them will do the
* flush, no worries.
*/
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_safe_exit_irqrestore(flags);
if (retry && console_trylock())
-@@ -2710,7 +2714,7 @@ void console_flush_on_panic(enum con_flu
+@@ -2728,7 +2732,7 @@ void console_flush_on_panic(enum con_flu
console_may_schedule = 0;
if (mode == CONSOLE_REPLAY_ALL)
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
}
-@@ -2947,11 +2951,11 @@ void register_console(struct console *ne
+@@ -2965,11 +2969,11 @@ void register_console(struct console *ne
* ignores console_lock.
*/
exclusive_console = newcon;
diff --git a/patches/0017-PCI-hv-Use-tasklet_disable_in_atomic.patch b/patches/0017-PCI-hv-Use-tasklet_disable_in_atomic.patch
new file mode 100644
index 000000000000..40258d146369
--- /dev/null
+++ b/patches/0017-PCI-hv-Use-tasklet_disable_in_atomic.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Jan 2021 16:59:34 +0100
+Subject: [PATCH 17/19] PCI: hv: Use tasklet_disable_in_atomic()
+
+The hv_compose_msi_msg() callback in irq_chip::irq_compose_msi_msg is
+invoked via irq_chip_compose_msi_msg().
+
+Possible call paths:
+- mp_irqdomain_activate()
+ raw_spin_lock_irqsave(&ioapic_lock,);
+ ioapic_configure_entry()
+ -> ioapic_setup_msg_from_msi()
+ -> irq_chip_compose_msi_msg()
+
+- tick_resume_broadcast()
+ raw_spin_lock_irqsave(&tick_broadcast_lock,);
+ clockevents_tick_resume()
+ -> dev->tick_resume()
+ -> hpet_clkevt_msi_resume()
+ -> irq_chip_compose_msi_msg()
+
+Use tasklet_disable_in_atomic() in atomic context.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/pci/controller/pci-hyperv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/pci/controller/pci-hyperv.c
++++ b/drivers/pci/controller/pci-hyperv.c
+@@ -1458,7 +1458,7 @@ static void hv_compose_msi_msg(struct ir
+ * Prevents hv_pci_onchannelcallback() from running concurrently
+ * in the tasklet.
+ */
+- tasklet_disable(&channel->callback_event);
++ tasklet_disable_in_atomic(&channel->callback_event);
+
+ /*
+ * Since this function is called with IRQ locks held, can't
diff --git a/patches/0017-printk-introduce-kernel-sync-mode.patch b/patches/0017-printk-introduce-kernel-sync-mode.patch
index b2a3ef5b78e6..1aae4b89ab21 100644
--- a/patches/0017-printk-introduce-kernel-sync-mode.patch
+++ b/patches/0017-printk-introduce-kernel-sync-mode.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* All 3 protected by @syslog_lock. */
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1687,6 +1691,110 @@ static inline void printk_delay(int leve
+@@ -1705,6 +1709,110 @@ static inline void printk_delay(int leve
}
}
@@ -165,7 +165,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Special console_lock variants that help to reduce the risk of soft-lockups.
* They allow to pass console_lock to another printk() call using a busy wait.
-@@ -1861,6 +1969,8 @@ static void call_console_drivers(const c
+@@ -1879,6 +1987,8 @@ static void call_console_drivers(const c
if (!cpu_online(smp_processor_id()) &&
!(con->flags & CON_ANYTIME))
continue;
@@ -174,7 +174,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (con->flags & CON_EXTENDED)
con->write(con, ext_text, ext_len);
else {
-@@ -2015,6 +2125,7 @@ int vprintk_store(int facility, int leve
+@@ -2033,6 +2143,7 @@ int vprintk_store(int facility, int leve
const u32 caller_id = printk_caller_id();
struct prb_reserved_entry e;
enum log_flags lflags = 0;
@@ -182,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct printk_record r;
unsigned long irqflags;
u16 trunc_msg_len = 0;
-@@ -2024,6 +2135,7 @@ int vprintk_store(int facility, int leve
+@@ -2042,6 +2153,7 @@ int vprintk_store(int facility, int leve
u16 text_len;
int ret = 0;
u64 ts_nsec;
@@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Since the duration of printk() can vary depending on the message
-@@ -2062,6 +2174,7 @@ int vprintk_store(int facility, int leve
+@@ -2080,6 +2192,7 @@ int vprintk_store(int facility, int leve
if (lflags & LOG_CONT) {
prb_rec_init_wr(&r, reserve_size);
if (prb_reserve_in_last(&e, prb, &r, caller_id, LOG_LINE_MAX)) {
@@ -198,7 +198,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text_len = printk_sprint(&r.text_buf[r.info->text_len], reserve_size,
facility, &lflags, fmt, args);
r.info->text_len += text_len;
-@@ -2069,6 +2182,7 @@ int vprintk_store(int facility, int leve
+@@ -2087,6 +2200,7 @@ int vprintk_store(int facility, int leve
if (lflags & LOG_NEWLINE) {
r.info->flags |= LOG_NEWLINE;
prb_final_commit(&e);
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else {
prb_commit(&e);
}
-@@ -2093,6 +2207,8 @@ int vprintk_store(int facility, int leve
+@@ -2111,6 +2225,8 @@ int vprintk_store(int facility, int leve
goto out;
}
@@ -215,7 +215,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* fill message */
text_len = printk_sprint(&r.text_buf[0], reserve_size, facility, &lflags, fmt, args);
if (trunc_msg_len)
-@@ -2107,13 +2223,19 @@ int vprintk_store(int facility, int leve
+@@ -2125,13 +2241,19 @@ int vprintk_store(int facility, int leve
memcpy(&r.info->dev_info, dev_info, sizeof(r.info->dev_info));
/* A message without a trailing newline can be continued. */
@@ -237,7 +237,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
printk_exit_irqrestore(irqflags);
return ret;
}
-@@ -2217,6 +2339,8 @@ EXPORT_SYMBOL(printk);
+@@ -2235,6 +2357,8 @@ EXPORT_SYMBOL(printk);
#define prb_read_valid(rb, seq, r) false
#define prb_first_valid_seq(rb) 0
@@ -246,7 +246,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static u64 syslog_seq;
static atomic64_t console_seq = ATOMIC64_INIT(0);
static u64 exclusive_console_stop_seq;
-@@ -2502,6 +2626,8 @@ static int have_callable_console(void)
+@@ -2520,6 +2644,8 @@ static int have_callable_console(void)
*/
static inline int can_use_console(void)
{
@@ -255,7 +255,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cpu_online(raw_smp_processor_id()) || have_callable_console();
}
-@@ -3323,6 +3449,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3341,6 +3467,18 @@ void kmsg_dump(enum kmsg_dump_reason rea
{
struct kmsg_dumper *dumper;
diff --git a/patches/0018-atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch b/patches/0018-atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
new file mode 100644
index 000000000000..c48ab3dedc91
--- /dev/null
+++ b/patches/0018-atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 28 Jan 2021 18:13:28 +0100
+Subject: [PATCH 18/19] atm: eni: Use tasklet_disable_in_atomic() in the
+ ->send() callback
+
+The atmdev_ops::send callback may be invoked with disabled BH from
+net_device_ops::ndo_start_xmit().
+
+Use tasklet_disable_in_atomic() here so it can continue spin in atomic
+context. The other user of tasklet_disable() are preemptible so they can
+remain.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/atm/eni.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/atm/eni.c
++++ b/drivers/atm/eni.c
+@@ -2054,7 +2054,7 @@ static int eni_send(struct atm_vcc *vcc,
+ }
+ submitted++;
+ ATM_SKB(skb)->vcc = vcc;
+- tasklet_disable(&ENI_DEV(vcc->dev)->task);
++ tasklet_disable_in_atomic(&ENI_DEV(vcc->dev)->task);
+ res = do_tx(skb);
+ tasklet_enable(&ENI_DEV(vcc->dev)->task);
+ if (res == enq_ok) return 0;
diff --git a/patches/0018-printk-move-console-printing-to-kthreads.patch b/patches/0018-printk-move-console-printing-to-kthreads.patch
index cfb185b32b53..cb42512ebd7b 100644
--- a/patches/0018-printk-move-console-printing-to-kthreads.patch
+++ b/patches/0018-printk-move-console-printing-to-kthreads.patch
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct latched_seq {
seqcount_latch_t latch;
u64 val[2];
-@@ -1702,6 +1691,8 @@ static bool console_can_sync(struct cons
+@@ -1720,6 +1709,8 @@ static bool console_can_sync(struct cons
return false;
if (con->write_atomic && kernel_sync_mode())
return true;
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
}
-@@ -1711,6 +1702,8 @@ static bool call_sync_console_driver(str
+@@ -1729,6 +1720,8 @@ static bool call_sync_console_driver(str
return false;
if (con->write_atomic && kernel_sync_mode())
con->write_atomic(con, text, text_len);
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
else
return false;
-@@ -1785,202 +1778,16 @@ static void print_sync_until(struct cons
+@@ -1803,202 +1796,16 @@ static void print_sync_until(struct cons
console_atomic_lock(&flags);
for (;;) {
@@ -299,7 +299,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PRINTK_NMI
#define NUM_RECURSION_CTX 2
#else
-@@ -2246,41 +2053,18 @@ asmlinkage int vprintk_emit(int facility
+@@ -2264,41 +2071,18 @@ asmlinkage int vprintk_emit(int facility
{
unsigned long flags;
int printed_len;
@@ -342,7 +342,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
wake_up_klogd();
return printed_len;
}
-@@ -2332,38 +2116,164 @@ asmlinkage __visible int printk(const ch
+@@ -2350,38 +2134,164 @@ asmlinkage __visible int printk(const ch
}
EXPORT_SYMBOL(printk);
@@ -530,7 +530,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_PRINTK */
-@@ -2601,36 +2511,6 @@ int is_console_locked(void)
+@@ -2619,36 +2529,6 @@ int is_console_locked(void)
}
EXPORT_SYMBOL(is_console_locked);
@@ -567,7 +567,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* console_unlock - unlock the console system
*
-@@ -2647,139 +2527,14 @@ static inline int can_use_console(void)
+@@ -2665,139 +2545,14 @@ static inline int can_use_console(void)
*/
void console_unlock(void)
{
@@ -707,7 +707,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL(console_unlock);
-@@ -2829,18 +2584,20 @@ void console_unblank(void)
+@@ -2847,18 +2602,20 @@ void console_unblank(void)
*/
void console_flush_on_panic(enum con_flush_mode mode)
{
@@ -738,7 +738,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
}
-@@ -2975,7 +2732,6 @@ static int try_enable_new_console(struct
+@@ -2993,7 +2750,6 @@ static int try_enable_new_console(struct
*/
void register_console(struct console *newcon)
{
@@ -746,7 +746,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct console *bcon = NULL;
int err;
-@@ -2999,6 +2755,8 @@ void register_console(struct console *ne
+@@ -3017,6 +2773,8 @@ void register_console(struct console *ne
}
}
@@ -755,7 +755,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console_drivers && console_drivers->flags & CON_BOOT)
bcon = console_drivers;
-@@ -3063,27 +2821,12 @@ void register_console(struct console *ne
+@@ -3081,27 +2839,12 @@ void register_console(struct console *ne
if (newcon->flags & CON_EXTENDED)
nr_ext_console_drivers++;
@@ -788,7 +788,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
console_unlock();
console_sysfs_notify();
-@@ -3157,6 +2900,9 @@ int unregister_console(struct console *c
+@@ -3175,6 +2918,9 @@ int unregister_console(struct console *c
console_unlock();
console_sysfs_notify();
@@ -798,7 +798,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (console->exit)
res = console->exit(console);
-@@ -3239,6 +2985,15 @@ static int __init printk_late_init(void)
+@@ -3257,6 +3003,15 @@ static int __init printk_late_init(void)
unregister_console(con);
}
}
@@ -814,7 +814,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = cpuhp_setup_state_nocalls(CPUHP_PRINTK_DEAD, "printk:dead", NULL,
console_cpu_notify);
WARN_ON(ret < 0);
-@@ -3254,7 +3009,6 @@ late_initcall(printk_late_init);
+@@ -3272,7 +3027,6 @@ late_initcall(printk_late_init);
* Delayed printk version, for scheduler-internal messages:
*/
#define PRINTK_PENDING_WAKEUP 0x01
@@ -822,7 +822,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_PER_CPU(int, printk_pending);
-@@ -3262,14 +3016,8 @@ static void wake_up_klogd_work_func(stru
+@@ -3280,14 +3034,8 @@ static void wake_up_klogd_work_func(stru
{
int pending = __this_cpu_xchg(printk_pending, 0);
@@ -838,7 +838,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = {
-@@ -3292,13 +3040,6 @@ void wake_up_klogd(void)
+@@ -3310,13 +3058,6 @@ void wake_up_klogd(void)
void defer_console_output(void)
{
diff --git a/patches/0019-firewire-ohci-Use-tasklet_disable_in_atomic-in-the-t.patch b/patches/0019-firewire-ohci-Use-tasklet_disable_in_atomic-in-the-t.patch
new file mode 100644
index 000000000000..f4aba05e96ef
--- /dev/null
+++ b/patches/0019-firewire-ohci-Use-tasklet_disable_in_atomic-in-the-t.patch
@@ -0,0 +1,45 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 29 Jan 2021 13:09:59 +0100
+Subject: [PATCH 19/19] firewire: ohci: Use tasklet_disable_in_atomic() in the
+ two atomic user
+
+ohci_cancel_packet() can be invoked from ar_context_tasklet(). It
+will cancel a different tasklet than it is running.
+
+ohci_flush_iso_completions() can be invoked from other driver either
+from a tasklet or from a workqueue with disabled preemption.
+The function irq_target_callback() make it appear to be invoked from
+IRQ-context. The `sc' callback it assigns can be tracked back to the
+flush_iso_completions() function. This is called from preemptible
+context (ohci_allocate_iso_context()) and ohci_flush_iso_completions().
+
+The invocation from at_context_flush() is always preemptible.
+
+Use tasklet_disable_in_atomic() for the two invocations from atomic
+context.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firewire/ohci.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/firewire/ohci.c
++++ b/drivers/firewire/ohci.c
+@@ -2545,7 +2545,7 @@ static int ohci_cancel_packet(struct fw_
+ struct driver_data *driver_data = packet->driver_data;
+ int ret = -ENOENT;
+
+- tasklet_disable(&ctx->tasklet);
++ tasklet_disable_in_atomic(&ctx->tasklet);
+
+ if (packet->ack != 0)
+ goto out;
+@@ -3465,7 +3465,7 @@ static int ohci_flush_iso_completions(st
+ struct iso_context *ctx = container_of(base, struct iso_context, base);
+ int ret = 0;
+
+- tasklet_disable(&ctx->context.tasklet);
++ tasklet_disable_in_atomic(&ctx->context.tasklet);
+
+ if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
+ context_tasklet((unsigned long)&ctx->context);
diff --git a/patches/0019-printk-remove-deferred-printing.patch b/patches/0019-printk-remove-deferred-printing.patch
index ed11afe427c8..ea32ed504c67 100644
--- a/patches/0019-printk-remove-deferred-printing.patch
+++ b/patches/0019-printk-remove-deferred-printing.patch
@@ -254,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (seq != prb_next_seq(&printk_rb_static)) {
pr_err("dropped %llu messages\n",
prb_next_seq(&printk_rb_static) - seq);
-@@ -1925,9 +1904,9 @@ static u16 printk_sprint(char *text, u16
+@@ -1943,9 +1922,9 @@ static u16 printk_sprint(char *text, u16
}
__printf(4, 0)
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
const u32 caller_id = printk_caller_id();
struct prb_reserved_entry e;
-@@ -2051,7 +2030,6 @@ asmlinkage int vprintk_emit(int facility
+@@ -2069,7 +2048,6 @@ asmlinkage int vprintk_emit(int facility
const struct dev_printk_info *dev_info,
const char *fmt, va_list args)
{
@@ -275,7 +275,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int printed_len;
/* Suppress unimportant messages after panic happens */
-@@ -2061,20 +2039,29 @@ asmlinkage int vprintk_emit(int facility
+@@ -2079,20 +2057,29 @@ asmlinkage int vprintk_emit(int facility
if (level == LOGLEVEL_SCHED)
level = LOGLEVEL_DEFAULT;
@@ -309,7 +309,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
asmlinkage int vprintk(const char *fmt, va_list args)
{
-@@ -3038,18 +3025,10 @@ void wake_up_klogd(void)
+@@ -3056,18 +3043,10 @@ void wake_up_klogd(void)
preempt_enable();
}
@@ -422,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -9329,7 +9329,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9295,7 +9295,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -430,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -9409,7 +9408,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9375,7 +9374,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/0020-printk-add-console-handover.patch b/patches/0020-printk-add-console-handover.patch
index fe1f58eadd27..aad255f44476 100644
--- a/patches/0020-printk-add-console-handover.patch
+++ b/patches/0020-printk-add-console-handover.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
char name[16];
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1670,6 +1670,8 @@ static bool console_can_sync(struct cons
+@@ -1688,6 +1688,8 @@ static bool console_can_sync(struct cons
return false;
if (con->write_atomic && kernel_sync_mode())
return true;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (con->write && (con->flags & CON_BOOT) && !con->thread)
return true;
return false;
-@@ -1681,6 +1683,8 @@ static bool call_sync_console_driver(str
+@@ -1699,6 +1701,8 @@ static bool call_sync_console_driver(str
return false;
if (con->write_atomic && kernel_sync_mode())
con->write_atomic(con, text, text_len);
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
else if (con->write && (con->flags & CON_BOOT) && !con->thread)
con->write(con, text, text_len);
else
-@@ -2785,8 +2789,10 @@ void register_console(struct console *ne
+@@ -2803,8 +2807,10 @@ void register_console(struct console *ne
* the real console are the same physical device, it's annoying to
* see the beginning boot messages twice
*/
diff --git a/patches/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/patches/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index 05720343f8e9..08c9e448c1f4 100644
--- a/patches/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/patches/0021-locking-rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -2246,7 +2392,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2245,7 +2391,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -312,7 +312,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2316,3 +2462,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2315,3 +2461,97 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -412,7 +412,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -160,6 +160,7 @@ extern void rt_mutex_postunlock(struct w
+@@ -159,6 +159,7 @@ extern void rt_mutex_postunlock(struct w
struct wake_q_head *wake_sleeper_q);
/* RW semaphore special interface */
@@ -420,7 +420,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
extern int __rt_mutex_trylock(struct rt_mutex *lock);
-@@ -167,6 +168,7 @@ extern void __rt_mutex_unlock(struct rt_
+@@ -166,6 +167,7 @@ extern void __rt_mutex_unlock(struct rt_
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
diff --git a/patches/0021-printk-add-pr_flush.patch b/patches/0021-printk-add-pr_flush.patch
index aff6c8baa018..66895773e2f6 100644
--- a/patches/0021-printk-add-pr_flush.patch
+++ b/patches/0021-printk-add-pr_flush.patch
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -3185,6 +3185,12 @@ void kmsg_dump(enum kmsg_dump_reason rea
+@@ -3203,6 +3203,12 @@ void kmsg_dump(enum kmsg_dump_reason rea
sync_mode = true;
pr_info("enabled sync mode\n");
}
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -3521,3 +3527,76 @@ void console_atomic_unlock(unsigned int
+@@ -3539,3 +3545,76 @@ void console_atomic_unlock(unsigned int
prb_unlock(&printk_cpulock, flags);
}
EXPORT_SYMBOL(console_atomic_unlock);
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 003f348fbbf7..6a469a3a9035 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -138,7 +138,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
mov x0, sp
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
-@@ -925,7 +925,7 @@ asmlinkage void do_notify_resume(struct
+@@ -918,7 +918,7 @@ asmlinkage void do_notify_resume(struct
/* Check valid user FS if needed */
addr_limit_user_check();
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index f4ecd3073c96..232d4ee30bae 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -43,10 +43,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+}
+
- unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
+ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status)
{
- unsigned int trace_flags = 0;
-@@ -2603,7 +2612,8 @@ unsigned int _tracing_gen_ctx_flags(unsi
+ unsigned int trace_flags = irqs_status;
+@@ -2596,7 +2605,8 @@ unsigned int tracing_gen_ctx_irq_test(un
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -55,8 +55,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ (migration_disable_value() & 0xff) << 8;
}
- unsigned int tracing_gen_ctx_flags(void)
-@@ -3836,9 +3846,10 @@ static void print_lat_help_header(struct
+ struct ring_buffer_event *
+@@ -3802,9 +3812,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3876,9 +3887,10 @@ static void print_func_help_header_irq(s
+@@ -3842,9 +3853,10 @@ static void print_func_help_header_irq(s
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
diff --git a/patches/irqtime-Use-irq_count-instead-of-preempt_count.patch b/patches/irqtime-Use-irq_count-instead-of-preempt_count.patch
deleted file mode 100644
index 2097a520c75e..000000000000
--- a/patches/irqtime-Use-irq_count-instead-of-preempt_count.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 4 Dec 2020 18:00:31 +0100
-Subject: [PATCH] irqtime: Use irq_count() instead of preempt_count()
-
-preempt_count() does not contain the softirq bits on a PREEMPT_RT
-kernel. irq_count() does.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/cputime.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/sched/cputime.c
-+++ b/kernel/sched/cputime.c
-@@ -60,7 +60,7 @@ void irqtime_account_irq(struct task_str
- cpu = smp_processor_id();
- delta = sched_clock_cpu(cpu) - irqtime->irq_start_time;
- irqtime->irq_start_time += delta;
-- pc = preempt_count() - offset;
-+ pc = irq_count() - offset;
-
- /*
- * We do not account for softirq time from ksoftirqd here.
-@@ -421,7 +421,7 @@ void vtime_task_switch(struct task_struc
-
- void vtime_account_irq(struct task_struct *tsk, unsigned int offset)
- {
-- unsigned int pc = preempt_count() - offset;
-+ unsigned int pc = irq_count() - offset;
-
- if (pc & HARDIRQ_OFFSET) {
- vtime_account_hardirq(tsk);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index ad0785c83de1..15367194da7c 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt25
++-rt26
diff --git a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
index 9d8b0652510a..cbfeb98ffba3 100644
--- a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+++ b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3145,7 +3146,7 @@ static bool consume_obj_stock(struct obj
+@@ -3143,7 +3144,7 @@ static bool consume_obj_stock(struct obj
unsigned long flags;
bool ret = false;
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
-@@ -3153,7 +3154,7 @@ static bool consume_obj_stock(struct obj
+@@ -3151,7 +3152,7 @@ static bool consume_obj_stock(struct obj
ret = true;
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3212,7 +3213,7 @@ static void refill_obj_stock(struct obj_
+@@ -3210,7 +3211,7 @@ static void refill_obj_stock(struct obj_
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
-@@ -3226,7 +3227,7 @@ static void refill_obj_stock(struct obj_
+@@ -3224,7 +3225,7 @@ static void refill_obj_stock(struct obj_
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7138,9 +7139,13 @@ static int __init mem_cgroup_init(void)
+@@ -7136,9 +7137,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 021a3db680d8..7449cabc91d4 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -5735,12 +5743,12 @@ static int mem_cgroup_move_account(struc
+@@ -5733,12 +5741,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6811,10 +6819,10 @@ int mem_cgroup_charge(struct page *page,
+@@ -6809,10 +6817,10 @@ int mem_cgroup_charge(struct page *page,
css_get(&memcg->css);
commit_charge(page, memcg);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6858,11 +6866,11 @@ static void uncharge_batch(const struct
+@@ -6856,11 +6864,11 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
-@@ -7016,10 +7024,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -7014,10 +7022,10 @@ void mem_cgroup_migrate(struct page *old
css_get(&memcg->css);
commit_charge(newpage, memcg);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -7194,6 +7202,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7192,6 +7200,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -7239,9 +7248,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7237,9 +7246,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
index 3e33bb83bf44..bdf1dfccbf7c 100644
--- a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
+++ b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -3047,9 +3047,9 @@ static void drain_local_pages_wq(struct
+@@ -3048,9 +3048,9 @@ static void drain_local_pages_wq(struct
* cpu which is allright but we also have to make sure to not move to
* a different one.
*/
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index c55d5664d1b1..406cab8adb4b 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __free_pages_core(struct page *page, unsigned int order)
-@@ -2960,13 +2968,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2961,13 +2969,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2988,7 +2996,7 @@ static void drain_pages_zone(unsigned in
+@@ -2989,7 +2997,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2996,7 +3004,7 @@ static void drain_pages_zone(unsigned in
+@@ -2997,7 +3005,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -3247,9 +3255,9 @@ void free_unref_page(struct page *page)
+@@ -3248,9 +3256,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -3276,7 +3284,7 @@ void free_unref_page_list(struct list_he
+@@ -3277,7 +3285,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -3291,12 +3299,12 @@ void free_unref_page_list(struct list_he
+@@ -3292,12 +3300,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3466,7 +3474,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3467,7 +3475,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3474,7 +3482,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3475,7 +3483,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3508,7 +3516,8 @@ struct page *rmqueue(struct zone *prefer
+@@ -3509,7 +3517,8 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3534,7 +3543,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3535,7 +3544,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3547,7 +3556,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3548,7 +3557,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8800,7 +8809,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8801,7 +8810,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8809,7 +8818,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8810,7 +8819,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch b/patches/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
index 73c93506d883..c13ad75615fd 100644
--- a/patches/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
+++ b/patches/net-Dequeue-in-dev_cpu_dead-without-the-lock.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -10860,7 +10860,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10865,7 +10865,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index 22d40bd4343a..a0880f678ba7 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -179,7 +179,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct nlattr *opt)
{
struct gnet_estimator *parm = nla_data(opt);
-@@ -223,7 +223,7 @@ int gen_replace_estimator(struct gnet_st
+@@ -226,7 +226,7 @@ int gen_replace_estimator(struct gnet_st
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *lock,
@@ -238,7 +238,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *b)
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
-@@ -1257,7 +1257,7 @@ static struct Qdisc *qdisc_create(struct
+@@ -1258,7 +1258,7 @@ static struct Qdisc *qdisc_create(struct
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index cc77c74e3593..cb3876af6895 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -55,17 +55,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/preempt.h | 54 ++++++++++++++++++++++++++--
include/linux/sched.h | 38 +++++++++++++++++++
include/linux/thread_info.h | 12 +++++-
- include/linux/trace_events.h | 4 +-
+ include/linux/trace_events.h | 5 ++
kernel/Kconfig.preempt | 6 +++
kernel/sched/core.c | 82 +++++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 16 ++++----
kernel/sched/features.h | 3 +
kernel/sched/sched.h | 9 ++++
kernel/trace/trace.c | 45 +++++++++++++----------
- kernel/trace/trace.h | 2 +
kernel/trace/trace_events.c | 1
kernel/trace/trace_output.c | 14 ++++++-
- 13 files changed, 250 insertions(+), 36 deletions(-)
+ 12 files changed, 249 insertions(+), 36 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -261,7 +260,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ entry->flags = trace_ctx >> 24;
}
- unsigned int _tracing_gen_ctx_flags(unsigned long irqflags);
+ unsigned int tracing_gen_ctx_irq_test(unsigned int irqs_status);
+@@ -170,6 +172,7 @@ enum trace_flag_type {
+ TRACE_FLAG_SOFTIRQ = 0x10,
+ TRACE_FLAG_PREEMPT_RESCHED = 0x20,
+ TRACE_FLAG_NMI = 0x40,
++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
+ };
+
+ #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
--- a/kernel/Kconfig.preempt
+++ b/kernel/Kconfig.preempt
@@ -1,5 +1,11 @@
@@ -529,7 +536,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2612,8 +2612,11 @@ unsigned int _tracing_gen_ctx_flags(unsi
+@@ -2605,8 +2605,11 @@ unsigned int tracing_gen_ctx_irq_test(un
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -542,8 +549,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ (trace_flags << 24);
}
- unsigned int tracing_gen_ctx_flags(void)
-@@ -3841,15 +3844,17 @@ unsigned long trace_total_entries(struct
+ struct ring_buffer_event *
+@@ -3807,15 +3810,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -570,7 +577,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3883,14 +3888,16 @@ static void print_func_help_header_irq(s
+@@ -3849,14 +3854,16 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
@@ -595,24 +602,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void
---- a/kernel/trace/trace.h
-+++ b/kernel/trace/trace.h
-@@ -144,6 +144,7 @@ struct kretprobe_trace_entry_head {
- * NEED_RESCHED - reschedule is requested
- * HARDIRQ - inside an interrupt handler
- * SOFTIRQ - inside a softirq handler
-+ * NEED_RESCHED_LAZY - lazy reschedule is requested
- */
- enum trace_flag_type {
- TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -153,6 +154,7 @@ enum trace_flag_type {
- TRACE_FLAG_SOFTIRQ = 0x10,
- TRACE_FLAG_PREEMPT_RESCHED = 0x20,
- TRACE_FLAG_NMI = 0x40,
-+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x80,
- };
-
- #define TRACE_BUF_SIZE 1024
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -184,6 +184,7 @@ static int trace_define_common_fields(vo
diff --git a/patches/series b/patches/series
index bcaaf08eac88..e9d6c46a7df0 100644
--- a/patches/series
+++ b/patches/series
@@ -117,10 +117,11 @@ notifier-Make-atomic_notifiers-use-raw_spinlock.patch
0004-doc-Update-RCU-s-requirements-page-about-the-PREEMPT.patch
0005-doc-Use-CONFIG_PREEMPTION.patch
-# 20210112230057.2374308-1-bigeasy@linutronix.de
+# 20210203160549.480454190@goodmis.org
0001-tracing-Merge-irqflags-preempt-counter.patch
-0002-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch
-0003-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch
+0002-tracing-Inline-tracing_gen_ctx_flags.patch
+0003-tracing-Use-in_serving_softirq-to-deduct-softirq-sta.patch
+0004-tracing-Remove-NULL-check-from-current-in-tracing_ge.patch
############################################################
# Ready for posting
@@ -199,16 +200,25 @@ tcp-Remove-superfluous-BH-disable-around-listening_h.patch
0005-irq-Call-tick_irq_enter-inside-HARDIRQ_OFFSET.patch
# WIP
-softirq_Add_RT_specific_softirq_accounting.patch
-softirq_Move_various_protections_into_inline_helpers.patch
-softirq_Make_softirq_control_and_processing_RT_aware.patch
-tick_sched_Prevent_false_positive_softirq_pending_warnings_on_RT.patch
-rcu_Prevent_false_positive_softirq_warning_on_RT.patch
-softirq_Replace_barrier_with_cpu_relax_in_tasklet_unlock_wait_.patch
-tasklets_Use_static_inlines_for_stub_implementations.patch
-tasklets_Prevent_kill_unlock_wait_deadlock_on_RT.patch
-#
-irqtime-Use-irq_count-instead-of-preempt_count.patch
+0001-softirq-Replace-barrier-with-cpu_relax-in-tasklet_un.patch
+0002-tasklets-Use-static-inlines-for-stub-implementations.patch
+0003-tasklets-Provide-tasklet_disable_in_atomic.patch
+0004-tasklets-Replace-spin-wait-in-tasklet_unlock_wait.patch
+0005-tasklets-Replace-spin-wait-in-tasklet_kill.patch
+0006-tasklets-Prevent-tasklet_unlock_spin_wait-deadlock-o.patch
+0007-softirq-Add-RT-specific-softirq-accounting.patch
+0008-irqtime-Make-accounting-correct-on-RT.patch
+0009-softirq-Move-various-protections-into-inline-helpers.patch
+0010-softirq-Make-softirq-control-and-processing-RT-aware.patch
+0011-tick-sched-Prevent-false-positive-softirq-pending-wa.patch
+0012-rcu-Prevent-false-positive-softirq-warning-on-RT.patch
+0013-net-jme-Replace-link-change-tasklet-with-a-worker.patch
+0014-net-arcnet-Fix-RESET-flag-handling.patch
+0015-net-sundance-Use-tasklet_disable_in_atomic.patch
+0016-ath9k-Use-tasklet_disable_in_atomic.patch
+0017-PCI-hv-Use-tasklet_disable_in_atomic.patch
+0018-atm-eni-Use-tasklet_disable_in_atomic-in-the-send-ca.patch
+0019-firewire-ohci-Use-tasklet_disable_in_atomic-in-the-t.patch
# TIP 5f0c71278d6848b4809f83af90f28196e1505ab1
x86-fpu-Simplify-fpregs_-un-lock.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 055e67907281..b866201f9deb 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int valid_signal(unsigned long sig)
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -151,7 +151,7 @@ static void __exit_signal(struct task_st
+@@ -152,7 +152,7 @@ static void __exit_signal(struct task_st
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index ec05ae7f7c90..b08502de25a4 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -11176,7 +11176,7 @@ static int __init net_dev_init(void)
+@@ -11181,7 +11181,7 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 82db20520374..7543db264a57 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -131,7 +131,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -10842,6 +10848,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10847,6 +10853,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/x86-fpu-Make-kernel-FPU-protection-RT-friendly.patch b/patches/x86-fpu-Make-kernel-FPU-protection-RT-friendly.patch
index e94080e22f43..7dc2ab3790cd 100644
--- a/patches/x86-fpu-Make-kernel-FPU-protection-RT-friendly.patch
+++ b/patches/x86-fpu-Make-kernel-FPU-protection-RT-friendly.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
-@@ -32,15 +32,29 @@ extern void fpregs_mark_activate(void);
+@@ -43,15 +43,29 @@ static inline void kernel_fpu_begin(void
*
* local_bh_disable() protects against both preemption and soft interrupts
* on !RT kernels.
diff --git a/patches/x86-fpu-Simplify-fpregs_-un-lock.patch b/patches/x86-fpu-Simplify-fpregs_-un-lock.patch
index 2c53a5242c62..95f61d1d3f01 100644
--- a/patches/x86-fpu-Simplify-fpregs_-un-lock.patch
+++ b/patches/x86-fpu-Simplify-fpregs_-un-lock.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/include/asm/fpu/api.h
+++ b/arch/x86/include/asm/fpu/api.h
-@@ -29,17 +29,18 @@ extern void fpregs_mark_activate(void);
+@@ -40,17 +40,18 @@ static inline void kernel_fpu_begin(void
* A context switch will (and softirq might) save CPU's FPU registers to
* fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
* a random state.