summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-09-01 14:06:27 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-09-01 14:06:27 +0200
commitda9e10bf0b05a02d1ef4447e677eba697af32ec9 (patch)
tree48a360bd030a2171f5df7c0d439521d148870186
parent626fc679894e1386b5951d957d62114338d69372 (diff)
downloadlinux-rt-5.14-rt16-patches.tar.gz
[ANNOUNCE] v5.14-rt16v5.14-rt16-patches
Dear RT folks! I'm pleased to announce the v5.14-rt16 patch set. Changes since v5.14-rt15: - The scheduler related "balance_push()" fix in the previous release had a logic error which has been now corrected. Patch by Thomas Gleixner. - The kcov related warning, reported by Clark Williams, has been addressed. Known issues - netconsole triggers WARN. - The "Memory controller" (CONFIG_MEMCG) has been disabled. - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is still not clear if the RCU related change is correct. - Clark Williams reported issues in i915 (execlists_dequeue_irq()) - Valentin Schneider reported a few splats on ARM64, see https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/ The delta patch against v5.14-rt15 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rt15-rt16.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rt16 The RT patch against v5.14 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rt16.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rt16.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch2
-rw-r--r--patches/0001_documentation_kcov_include_types_h_in_the_example.patch37
-rw-r--r--patches/0002_documentation_kcov_define_ip_in_the_example.patch26
-rw-r--r--patches/0003-sched-wakeup-Split-out-the-wakeup-__state-check.patch6
-rw-r--r--patches/0003_kcov_allocate_per_cpu_memory_on_the_relevant_node.patch29
-rw-r--r--patches/0004_kcov_avoid_enable_disable_interrupts_if_in_task.patch44
-rw-r--r--patches/0005_kcov_replace_local_irq_save_with_a_local_lock_t.patch158
-rw-r--r--patches/0006-sched-wakeup-Prepare-for-RT-sleeping-spin-rwlocks.patch2
-rw-r--r--patches/0007-sched-core-Rework-the-__schedule-preempt-argument.patch22
-rw-r--r--patches/0008-sched-core-Provide-a-scheduling-point-for-RT-locks.patch4
-rw-r--r--patches/ARM64__Allow_to_enable_RT.patch2
-rw-r--r--patches/Add_localversion_for_-RT_release.patch2
-rw-r--r--patches/arch_arm64__Add_lazy_preempt_support.patch2
-rw-r--r--patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch2
-rw-r--r--patches/printk__remove_deferred_printing.patch2
-rw-r--r--patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch4
-rw-r--r--patches/sched-Prevent-balance_push-on-remote-runqueues.patch27
-rw-r--r--patches/sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch2
-rw-r--r--patches/sched__Add_support_for_lazy_preemption.patch18
-rw-r--r--patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch2
-rw-r--r--patches/sched__Move_mmdrop_to_RCU_on_RT.patch4
-rw-r--r--patches/series7
22 files changed, 358 insertions, 46 deletions
diff --git a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
index b595324950fd..f671d9e19b67 100644
--- a/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
+++ b/patches/0001-sched-Trigger-warning-if-migration_disabled-counter-.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2119,6 +2119,8 @@ void migrate_enable(void)
+@@ -2133,6 +2133,8 @@ void migrate_enable(void)
if (p->migration_disabled > 1) {
p->migration_disabled--;
return;
diff --git a/patches/0001_documentation_kcov_include_types_h_in_the_example.patch b/patches/0001_documentation_kcov_include_types_h_in_the_example.patch
new file mode 100644
index 000000000000..5d9376358965
--- /dev/null
+++ b/patches/0001_documentation_kcov_include_types_h_in_the_example.patch
@@ -0,0 +1,37 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: Documentation/kcov: Include types.h in the example.
+Date: Mon, 30 Aug 2021 19:26:23 +0200
+
+The first example code has includes at the top, the following two
+example share that part. The last example (remote coverage collection)
+requires the linux/types.h header file due its __aligned_u64 usage.
+
+Add the linux/types.h to the top most example and a comment that the
+header files from above are required as it is done in the second
+example.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210830172627.267989-2-bigeasy@linutronix.de
+---
+ Documentation/dev-tools/kcov.rst | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/Documentation/dev-tools/kcov.rst
++++ b/Documentation/dev-tools/kcov.rst
+@@ -50,6 +50,7 @@ The following program demonstrates cover
+ #include <sys/mman.h>
+ #include <unistd.h>
+ #include <fcntl.h>
++ #include <linux/types.h>
+
+ #define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
+ #define KCOV_ENABLE _IO('c', 100)
+@@ -251,6 +252,8 @@ selectively from different subsystems.
+
+ .. code-block:: c
+
++ /* Same includes and defines as above. */
++
+ struct kcov_remote_arg {
+ __u32 trace_mode;
+ __u32 area_size;
diff --git a/patches/0002_documentation_kcov_define_ip_in_the_example.patch b/patches/0002_documentation_kcov_define_ip_in_the_example.patch
new file mode 100644
index 000000000000..891cac812efd
--- /dev/null
+++ b/patches/0002_documentation_kcov_define_ip_in_the_example.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: Documentation/kcov: Define `ip' in the example.
+Date: Mon, 30 Aug 2021 19:26:24 +0200
+
+The example code uses the variable `ip' but never declares it.
+
+Declare `ip' as a 64bit variable which is the same type as the array
+from which it loads its value.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210830172627.267989-3-bigeasy@linutronix.de
+---
+ Documentation/dev-tools/kcov.rst | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/Documentation/dev-tools/kcov.rst
++++ b/Documentation/dev-tools/kcov.rst
+@@ -178,6 +178,8 @@ Comparison operands collection
+ /* Read number of comparisons collected. */
+ n = __atomic_load_n(&cover[0], __ATOMIC_RELAXED);
+ for (i = 0; i < n; i++) {
++ uint64_t ip;
++
+ type = cover[i * KCOV_WORDS_PER_CMP + 1];
+ /* arg1 and arg2 - operands of the comparison. */
+ arg1 = cover[i * KCOV_WORDS_PER_CMP + 2];
diff --git a/patches/0003-sched-wakeup-Split-out-the-wakeup-__state-check.patch b/patches/0003-sched-wakeup-Split-out-the-wakeup-__state-check.patch
index f4035110014e..de3fd3da89cf 100644
--- a/patches/0003-sched-wakeup-Split-out-the-wakeup-__state-check.patch
+++ b/patches/0003-sched-wakeup-Split-out-the-wakeup-__state-check.patch
@@ -26,7 +26,7 @@ Link: https://lore.kernel.org/r/20210815211302.088945085@linutronix.de
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3562,6 +3562,22 @@ static void ttwu_queue(struct task_struc
+@@ -3576,6 +3576,22 @@ static void ttwu_queue(struct task_struc
}
/*
@@ -49,7 +49,7 @@ Link: https://lore.kernel.org/r/20210815211302.088945085@linutronix.de
* Notes on Program-Order guarantees on SMP systems.
*
* MIGRATION
-@@ -3700,10 +3716,9 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3714,10 +3730,9 @@ try_to_wake_up(struct task_struct *p, un
* - we're serialized against set_special_state() by virtue of
* it disabling IRQs (this allows not taking ->pi_lock).
*/
@@ -61,7 +61,7 @@ Link: https://lore.kernel.org/r/20210815211302.088945085@linutronix.de
trace_sched_waking(p);
WRITE_ONCE(p->__state, TASK_RUNNING);
trace_sched_wakeup(p);
-@@ -3718,14 +3733,11 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3732,14 +3747,11 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
diff --git a/patches/0003_kcov_allocate_per_cpu_memory_on_the_relevant_node.patch b/patches/0003_kcov_allocate_per_cpu_memory_on_the_relevant_node.patch
new file mode 100644
index 000000000000..78d116df4360
--- /dev/null
+++ b/patches/0003_kcov_allocate_per_cpu_memory_on_the_relevant_node.patch
@@ -0,0 +1,29 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kcov: Allocate per-CPU memory on the relevant node.
+Date: Mon, 30 Aug 2021 19:26:25 +0200
+
+During boot kcov allocates per-CPU memory which is used later if remote/
+softirq processing is enabled.
+
+Allocate the per-CPU memory on the CPU local node to avoid cross node
+memory access.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210830172627.267989-4-bigeasy@linutronix.de
+---
+ kernel/kcov.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -1034,8 +1034,8 @@ static int __init kcov_init(void)
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+- void *area = vmalloc(CONFIG_KCOV_IRQ_AREA_SIZE *
+- sizeof(unsigned long));
++ void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
++ sizeof(unsigned long), cpu_to_node(cpu));
+ if (!area)
+ return -ENOMEM;
+ per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
diff --git a/patches/0004_kcov_avoid_enable_disable_interrupts_if_in_task.patch b/patches/0004_kcov_avoid_enable_disable_interrupts_if_in_task.patch
new file mode 100644
index 000000000000..7fca93c3a44d
--- /dev/null
+++ b/patches/0004_kcov_avoid_enable_disable_interrupts_if_in_task.patch
@@ -0,0 +1,44 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kcov: Avoid enable+disable interrupts if !in_task().
+Date: Mon, 30 Aug 2021 19:26:26 +0200
+
+kcov_remote_start() may need to allocate memory in the in_task() case
+(otherwise per-CPU memory has been pre-allocated) and therefore requires
+enabled interrupts.
+The interrupts are enabled before checking if the allocation is required
+so if no allocation is required then the interrupts are needlessly
+enabled and disabled again.
+
+Enable interrupts only if memory allocation is performed.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210830172627.267989-5-bigeasy@linutronix.de
+---
+ kernel/kcov.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -869,19 +869,19 @@ void kcov_remote_start(u64 handle)
+ size = CONFIG_KCOV_IRQ_AREA_SIZE;
+ area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
+ }
+- spin_unlock_irqrestore(&kcov_remote_lock, flags);
++ spin_unlock(&kcov_remote_lock);
+
+ /* Can only happen when in_task(). */
+ if (!area) {
++ local_irqrestore(flags);
+ area = vmalloc(size * sizeof(unsigned long));
+ if (!area) {
+ kcov_put(kcov);
+ return;
+ }
++ local_irq_save(flags);
+ }
+
+- local_irq_save(flags);
+-
+ /* Reset coverage size. */
+ *(u64 *)area = 0;
+
diff --git a/patches/0005_kcov_replace_local_irq_save_with_a_local_lock_t.patch b/patches/0005_kcov_replace_local_irq_save_with_a_local_lock_t.patch
new file mode 100644
index 000000000000..8a4aff07c5e9
--- /dev/null
+++ b/patches/0005_kcov_replace_local_irq_save_with_a_local_lock_t.patch
@@ -0,0 +1,158 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Subject: kcov: Replace local_irq_save() with a local_lock_t.
+Date: Mon, 30 Aug 2021 19:26:27 +0200
+
+The kcov code mixes local_irq_save() and spin_lock() in
+kcov_remote_{start|end}(). This creates a warning on PREEMPT_RT because
+local_irq_save() disables interrupts and spin_lock_t is turned into a
+sleeping lock which can not be acquired in a section with disabled
+interrupts.
+
+The kcov_remote_lock is used to synchronize the access to the hash-list
+kcov_remote_map. The local_irq_save() block protects access to the
+per-CPU data kcov_percpu_data.
+
+There no compelling reason to change the lock type to raw_spin_lock_t to
+make it work with local_irq_save(). Changing it would require to move
+memory allocation (in kcov_remote_add()) and deallocation outside of the
+locked section.
+Adding an unlimited amount of entries to the hashlist will increase the
+IRQ-off time during lookup. It could be argued that this is debug code
+and the latency does not matter. There is however no need to do so and
+it would allow to use this facility in an RT enabled build.
+
+Using a local_lock_t instead of local_irq_save() has the befit of adding
+a protection scope within the source which makes it obvious what is
+protected. On a !PREEMPT_RT && !LOCKDEP build the local_lock_irqsave()
+maps directly to local_irq_save() so there is overhead at runtime.
+
+Replace the local_irq_save() section with a local_lock_t.
+
+Reported-by: Clark Williams <williams@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lore.kernel.org/r/20210830172627.267989-6-bigeasy@linutronix.de
+---
+ kernel/kcov.c | 30 +++++++++++++++++-------------
+ 1 file changed, 17 insertions(+), 13 deletions(-)
+
+--- a/kernel/kcov.c
++++ b/kernel/kcov.c
+@@ -88,6 +88,7 @@ static struct list_head kcov_remote_area
+
+ struct kcov_percpu_data {
+ void *irq_area;
++ local_lock_t lock;
+
+ unsigned int saved_mode;
+ unsigned int saved_size;
+@@ -96,7 +97,9 @@ struct kcov_percpu_data {
+ int saved_sequence;
+ };
+
+-static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data);
++static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
+
+ /* Must be called with kcov_remote_lock locked. */
+ static struct kcov_remote *kcov_remote_find(u64 handle)
+@@ -824,7 +827,7 @@ void kcov_remote_start(u64 handle)
+ if (!in_task() && !in_serving_softirq())
+ return;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+
+ /*
+ * Check that kcov_remote_start() is not called twice in background
+@@ -832,7 +835,7 @@ void kcov_remote_start(u64 handle)
+ */
+ mode = READ_ONCE(t->kcov_mode);
+ if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+ /*
+@@ -841,14 +844,15 @@ void kcov_remote_start(u64 handle)
+ * happened while collecting coverage from a background thread.
+ */
+ if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+
+ spin_lock(&kcov_remote_lock);
+ remote = kcov_remote_find(handle);
+ if (!remote) {
+- spin_unlock_irqrestore(&kcov_remote_lock, flags);
++ spin_unlock(&kcov_remote_lock);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+ kcov_debug("handle = %llx, context: %s\n", handle,
+@@ -873,13 +877,13 @@ void kcov_remote_start(u64 handle)
+
+ /* Can only happen when in_task(). */
+ if (!area) {
+- local_irqrestore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ area = vmalloc(size * sizeof(unsigned long));
+ if (!area) {
+ kcov_put(kcov);
+ return;
+ }
+- local_irq_save(flags);
++ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+ }
+
+ /* Reset coverage size. */
+@@ -891,7 +895,7 @@ void kcov_remote_start(u64 handle)
+ }
+ kcov_start(t, kcov, size, area, mode, sequence);
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+
+ }
+ EXPORT_SYMBOL(kcov_remote_start);
+@@ -965,12 +969,12 @@ void kcov_remote_stop(void)
+ if (!in_task() && !in_serving_softirq())
+ return;
+
+- local_irq_save(flags);
++ local_lock_irqsave(&kcov_percpu_data.lock, flags);
+
+ mode = READ_ONCE(t->kcov_mode);
+ barrier();
+ if (!kcov_mode_enabled(mode)) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+ /*
+@@ -978,12 +982,12 @@ void kcov_remote_stop(void)
+ * actually found the remote handle and started collecting coverage.
+ */
+ if (in_serving_softirq() && !t->kcov_softirq) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+ /* Make sure that kcov_softirq is only set when in softirq. */
+ if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+ return;
+ }
+
+@@ -1013,7 +1017,7 @@ void kcov_remote_stop(void)
+ spin_unlock(&kcov_remote_lock);
+ }
+
+- local_irq_restore(flags);
++ local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
+
+ /* Get in kcov_remote_start(). */
+ kcov_put(kcov);
diff --git a/patches/0006-sched-wakeup-Prepare-for-RT-sleeping-spin-rwlocks.patch b/patches/0006-sched-wakeup-Prepare-for-RT-sleeping-spin-rwlocks.patch
index 9877dc34acb5..e6c1e049b88e 100644
--- a/patches/0006-sched-wakeup-Prepare-for-RT-sleeping-spin-rwlocks.patch
+++ b/patches/0006-sched-wakeup-Prepare-for-RT-sleeping-spin-rwlocks.patch
@@ -146,7 +146,7 @@ Link: https://lore.kernel.org/r/20210815211302.258751046@linutronix.de
#ifdef CONFIG_MMU
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3566,14 +3566,47 @@ static void ttwu_queue(struct task_struc
+@@ -3580,14 +3580,47 @@ static void ttwu_queue(struct task_struc
*
* The caller holds p::pi_lock if p != current or has preemption
* disabled when p == current.
diff --git a/patches/0007-sched-core-Rework-the-__schedule-preempt-argument.patch b/patches/0007-sched-core-Rework-the-__schedule-preempt-argument.patch
index 5f16bf2473ca..a356228f63c0 100644
--- a/patches/0007-sched-core-Rework-the-__schedule-preempt-argument.patch
+++ b/patches/0007-sched-core-Rework-the-__schedule-preempt-argument.patch
@@ -40,7 +40,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5820,6 +5820,18 @@ pick_next_task(struct rq *rq, struct tas
+@@ -5908,6 +5908,18 @@ pick_next_task(struct rq *rq, struct tas
#endif /* CONFIG_SCHED_CORE */
/*
@@ -59,7 +59,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
* __schedule() is the main scheduler function.
*
* The main means of driving the scheduler and thus entering this function are:
-@@ -5858,7 +5870,7 @@ pick_next_task(struct rq *rq, struct tas
+@@ -5946,7 +5958,7 @@ pick_next_task(struct rq *rq, struct tas
*
* WARNING: must be called with preemption disabled!
*/
@@ -68,7 +68,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
{
struct task_struct *prev, *next;
unsigned long *switch_count;
-@@ -5871,13 +5883,13 @@ static void __sched notrace __schedule(b
+@@ -5959,13 +5971,13 @@ static void __sched notrace __schedule(b
rq = cpu_rq(cpu);
prev = rq->curr;
@@ -84,7 +84,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
/*
* Make sure that signal_pending_state()->signal_pending() below
-@@ -5911,7 +5923,7 @@ static void __sched notrace __schedule(b
+@@ -5999,7 +6011,7 @@ static void __sched notrace __schedule(b
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
prev_state = READ_ONCE(prev->__state);
@@ -93,7 +93,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
if (signal_pending_state(prev_state, prev)) {
WRITE_ONCE(prev->__state, TASK_RUNNING);
} else {
-@@ -5977,7 +5989,7 @@ static void __sched notrace __schedule(b
+@@ -6065,7 +6077,7 @@ static void __sched notrace __schedule(b
migrate_disable_switch(rq, prev);
psi_sched_switch(prev, next, !task_on_rq_queued(prev));
@@ -102,7 +102,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
/* Also unlocks the rq: */
rq = context_switch(rq, prev, next, &rf);
-@@ -5998,7 +6010,7 @@ void __noreturn do_task_dead(void)
+@@ -6086,7 +6098,7 @@ void __noreturn do_task_dead(void)
/* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE;
@@ -111,7 +111,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
BUG();
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-@@ -6059,7 +6071,7 @@ asmlinkage __visible void __sched schedu
+@@ -6147,7 +6159,7 @@ asmlinkage __visible void __sched schedu
sched_submit_work(tsk);
do {
preempt_disable();
@@ -120,7 +120,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
sched_preempt_enable_no_resched();
} while (need_resched());
sched_update_worker(tsk);
-@@ -6087,7 +6099,7 @@ void __sched schedule_idle(void)
+@@ -6175,7 +6187,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->__state);
do {
@@ -129,7 +129,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
} while (need_resched());
}
-@@ -6140,7 +6152,7 @@ static void __sched notrace preempt_sche
+@@ -6228,7 +6240,7 @@ static void __sched notrace preempt_sche
*/
preempt_disable_notrace();
preempt_latency_start(1);
@@ -138,7 +138,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
-@@ -6219,7 +6231,7 @@ asmlinkage __visible void __sched notrac
+@@ -6307,7 +6319,7 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -147,7 +147,7 @@ Link: https://lore.kernel.org/r/20210815211302.315473019@linutronix.de
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -6368,7 +6380,7 @@ asmlinkage __visible void __sched preemp
+@@ -6456,7 +6468,7 @@ asmlinkage __visible void __sched preemp
do {
preempt_disable();
local_irq_enable();
diff --git a/patches/0008-sched-core-Provide-a-scheduling-point-for-RT-locks.patch b/patches/0008-sched-core-Provide-a-scheduling-point-for-RT-locks.patch
index 61f58a20f59b..2068925bf48e 100644
--- a/patches/0008-sched-core-Provide-a-scheduling-point-for-RT-locks.patch
+++ b/patches/0008-sched-core-Provide-a-scheduling-point-for-RT-locks.patch
@@ -40,7 +40,7 @@ Link: https://lore.kernel.org/r/20210815211302.372319055@linutronix.de
extern void io_schedule_finish(int token);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5829,7 +5829,13 @@ pick_next_task(struct rq *rq, struct tas
+@@ -5917,7 +5917,13 @@ pick_next_task(struct rq *rq, struct tas
*/
#define SM_NONE 0x0
#define SM_PREEMPT 0x1
@@ -55,7 +55,7 @@ Link: https://lore.kernel.org/r/20210815211302.372319055@linutronix.de
/*
* __schedule() is the main scheduler function.
-@@ -6134,6 +6140,18 @@ void __sched schedule_preempt_disabled(v
+@@ -6222,6 +6228,18 @@ void __sched schedule_preempt_disabled(v
preempt_disable();
}
diff --git a/patches/ARM64__Allow_to_enable_RT.patch b/patches/ARM64__Allow_to_enable_RT.patch
index f6517b8aed2c..4f2462cde89b 100644
--- a/patches/ARM64__Allow_to_enable_RT.patch
+++ b/patches/ARM64__Allow_to_enable_RT.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select ARCH_WANT_COMPAT_IPC_PARSE_VERSION if COMPAT
select ARCH_WANT_DEFAULT_BPF_JIT
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
-@@ -215,6 +216,7 @@ config ARM64
+@@ -216,6 +217,7 @@ config ARM64
select PCI_DOMAINS_GENERIC if PCI
select PCI_ECAM if (ACPI && PCI)
select PCI_SYSCALL if PCI
diff --git a/patches/Add_localversion_for_-RT_release.patch b/patches/Add_localversion_for_-RT_release.patch
index 2361800c1e6b..22146ab020cb 100644
--- a/patches/Add_localversion_for_-RT_release.patch
+++ b/patches/Add_localversion_for_-RT_release.patch
@@ -15,4 +15,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt15
++-rt16
diff --git a/patches/arch_arm64__Add_lazy_preempt_support.patch b/patches/arch_arm64__Add_lazy_preempt_support.patch
index cf81faffe11f..864a2e592e8b 100644
--- a/patches/arch_arm64__Add_lazy_preempt_support.patch
+++ b/patches/arch_arm64__Add_lazy_preempt_support.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -193,6 +193,7 @@ config ARM64
+@@ -194,6 +194,7 @@ config ARM64
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
diff --git a/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch b/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
index a05af414166a..0020876629c4 100644
--- a/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
+++ b/patches/kernel_sched__move_stack__kprobe_clean_up_to___put_task_struct.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
task_numa_free(tsk, true);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4638,15 +4638,6 @@ static struct rq *finish_task_switch(str
+@@ -4652,15 +4652,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/printk__remove_deferred_printing.patch b/patches/printk__remove_deferred_printing.patch
index fd867298ef48..adecc0b5bdc0 100644
--- a/patches/printk__remove_deferred_printing.patch
+++ b/patches/printk__remove_deferred_printing.patch
@@ -542,7 +542,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-EXPORT_SYMBOL(vprintk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3162,8 +3162,8 @@ static int select_fallback_rq(int cpu, s
+@@ -3176,8 +3176,8 @@ static int select_fallback_rq(int cpu, s
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
diff --git a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
index 5fb0bd61c5b5..48c97c3d0732 100644
--- a/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
+++ b/patches/ptrace__fix_ptrace_vs_tasklist_lock_race.patch
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2990,7 +2990,7 @@ unsigned long wait_task_inactive(struct
+@@ -3004,7 +3004,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
cpu_relax();
}
-@@ -3005,7 +3005,7 @@ unsigned long wait_task_inactive(struct
+@@ -3019,7 +3019,7 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/sched-Prevent-balance_push-on-remote-runqueues.patch b/patches/sched-Prevent-balance_push-on-remote-runqueues.patch
index b576aa4eb67d..28b0c9ee5143 100644
--- a/patches/sched-Prevent-balance_push-on-remote-runqueues.patch
+++ b/patches/sched-Prevent-balance_push-on-remote-runqueues.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 27 Aug 2021 16:07:30 +0200
-Subject: [PATCH] sched: Prevent balance_push() on remote runqueues
+Subject: sched: Prevent balance_push() on remote runqueues
+Date: Sat, 28 Aug 2021 15:55:52 +0200
sched_setscheduler() and rt_mutex_setprio() invoke the run-queue balance
callback after changing priorities or the scheduling class of a task. The
@@ -12,22 +12,33 @@ is only valid to be invoked on the outgoing CPU that's wrong. It not only
triggers the debug warning, but also leaves the per CPU variable push_work
unprotected, which can result in double enqueues on the stop machine list.
-Remove the warning and check that the function is invoked on the
-outgoing CPU. If not, just return and do nothing.
+Remove the warning and validate that the function is invoked on the
+outgoing CPU.
Fixes: ae7927023243 ("sched: Optimize finish_lock_switch()")
Reported-by: Sebastian Siewior <bigeasy@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: stable@vger.kernel.org
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Ben Segall <bsegall@google.com>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Link: https://lore.kernel.org/r/87tujb0yn1.ffs@tglx
+Link: https://lore.kernel.org/r/87zgt1hdw7.ffs@tglx
+---
+V2: Use the correct check for the outgoing CPU
---
kernel/sched/core.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -8435,7 +8435,6 @@ static void balance_push(struct rq *rq)
+@@ -8523,7 +8523,6 @@ static void balance_push(struct rq *rq)
struct task_struct *push_task = rq->curr;
lockdep_assert_rq_held(rq);
@@ -35,7 +46,7 @@ Link: https://lore.kernel.org/r/87tujb0yn1.ffs@tglx
/*
* Ensure the thing is persistent until balance_push_set(.on = false);
-@@ -8443,9 +8442,10 @@ static void balance_push(struct rq *rq)
+@@ -8531,9 +8530,10 @@ static void balance_push(struct rq *rq)
rq->balance_callback = &balance_push_callback;
/*
@@ -44,7 +55,7 @@ Link: https://lore.kernel.org/r/87tujb0yn1.ffs@tglx
+ * CPU.
*/
- if (!cpu_dying(rq->cpu))
-+ if (!cpu_dying(rq->cpu) && rq == this_rq())
++ if (!cpu_dying(rq->cpu) || rq != this_rq())
return;
/*
diff --git a/patches/sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch b/patches/sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch
index cf894fb444c9..1ef0c8f9f8d3 100644
--- a/patches/sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch
+++ b/patches/sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3033,7 +3033,7 @@ unsigned long wait_task_inactive(struct
+@@ -3047,7 +3047,7 @@ unsigned long wait_task_inactive(struct
ktime_t to = NSEC_PER_SEC / HZ;
set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/patches/sched__Add_support_for_lazy_preemption.patch b/patches/sched__Add_support_for_lazy_preemption.patch
index 8caa49943970..a2539b30be3b 100644
--- a/patches/sched__Add_support_for_lazy_preemption.patch
+++ b/patches/sched__Add_support_for_lazy_preemption.patch
@@ -288,7 +288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -972,6 +972,46 @@ void resched_curr(struct rq *rq)
+@@ -986,6 +986,46 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2112,6 +2152,7 @@ void migrate_disable(void)
+@@ -2126,6 +2166,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
@@ -343,7 +343,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -2142,6 +2183,7 @@ void migrate_enable(void)
+@@ -2156,6 +2197,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_enable);
-@@ -4193,6 +4235,9 @@ int sched_fork(unsigned long clone_flags
+@@ -4207,6 +4249,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -5965,6 +6010,7 @@ static void __sched notrace __schedule(u
+@@ -6053,6 +6098,7 @@ static void __sched notrace __schedule(u
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -369,7 +369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
#ifdef CONFIG_SCHED_DEBUG
rq->last_seen_need_resched_ns = 0;
-@@ -6182,6 +6228,30 @@ static void __sched notrace preempt_sche
+@@ -6270,6 +6316,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -400,7 +400,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -6195,7 +6265,8 @@ asmlinkage __visible void __sched notrac
+@@ -6283,7 +6353,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -410,7 +410,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -6228,6 +6299,9 @@ asmlinkage __visible void __sched notrac
+@@ -6316,6 +6387,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -420,7 +420,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -8349,7 +8423,9 @@ void __init init_idle(struct task_struct
+@@ -8437,7 +8511,9 @@ void __init init_idle(struct task_struct
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch b/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
index b073ddf37795..353c701fd0dd 100644
--- a/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
+++ b/patches/sched__Do_not_account_rcu_preempt_depth_on_RT_in_might_sleep.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -9143,7 +9143,7 @@ void __init sched_init(void)
+@@ -9234,7 +9234,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched__Move_mmdrop_to_RCU_on_RT.patch b/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
index ba8951a631c1..f26b1002caf0 100644
--- a/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
+++ b/patches/sched__Move_mmdrop_to_RCU_on_RT.patch
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4626,9 +4626,13 @@ static struct rq *finish_task_switch(str
+@@ -4640,9 +4640,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -8459,6 +8463,7 @@ void sched_setnuma(struct task_struct *p
+@@ -8547,6 +8551,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index a0e6f2d3edfa..2ee8ffaf3a4e 100644
--- a/patches/series
+++ b/patches/series
@@ -82,6 +82,13 @@ highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
sched-Switch-wait_task_inactive-to-HRTIMER_MODE_REL_.patch
sched-Prevent-balance_push-on-remote-runqueues.patch
+#KCOV
+0001_documentation_kcov_include_types_h_in_the_example.patch
+0002_documentation_kcov_define_ip_in_the_example.patch
+0003_kcov_allocate_per_cpu_memory_on_the_relevant_node.patch
+0004_kcov_avoid_enable_disable_interrupts_if_in_task.patch
+0005_kcov_replace_local_irq_save_with_a_local_lock_t.patch
+
###########################################################################
# Post
###########################################################################