summaryrefslogtreecommitdiff
path: root/patches
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-01-08 20:06:30 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-01-08 20:06:30 +0100
commit65b3a72f7a3eea25032094032b71273514e900d8 (patch)
treeacec95d53b9c72e61addaed1f6a1fd17ef711f2a /patches
parentc92b84a95543ed10de290ae699f7f0a778332e65 (diff)
downloadlinux-rt-65b3a72f7a3eea25032094032b71273514e900d8.tar.gz
[ANNOUNCE] v5.10.4-rt22v5.10.4-rt22-patches
Dear RT folks! I'm pleased to announce the v5.10.4-rt22 patch set. Changes since v5.10.4-rt21: - Avoid "set but not used" warnings in the irq-off tracer. - The local-lock patch for powerpc64/pseries/iommu didn't get update and failed to compile. - Powerpc64/pseries didn't compile due to recursive header include. Known issues - None. The delta patch against v5.10.4-rt21 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/incr/patch-5.10.4-rt21-rt22.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.10.4-rt22 The RT patch against v5.10.4 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patch-5.10.4-rt22.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.4-rt22.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Diffstat (limited to 'patches')
-rw-r--r--patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch6
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch16
-rw-r--r--patches/0002-s390-vtime-Use-the-generic-IRQ-entry-accounting.patch4
-rw-r--r--patches/0002-sched-Fix-balance_callback.patch2
-rw-r--r--patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch14
-rw-r--r--patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch12
-rw-r--r--patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch8
-rw-r--r--patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch6
-rw-r--r--patches/0008-sched-Massage-set_cpus_allowed.patch6
-rw-r--r--patches/0009-sched-Add-migrate_disable.patch2
-rw-r--r--patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch8
-rw-r--r--patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch2
-rw-r--r--patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch8
-rw-r--r--patches/0019-printk-remove-deferred-printing.patch4
-rw-r--r--patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch6
-rw-r--r--patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch44
-rw-r--r--patches/efi-Allow-efi-runtime.patch2
-rw-r--r--patches/efi-Disable-runtime-services-on-RT.patch2
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch8
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch10
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch12
-rw-r--r--patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch2
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch26
-rw-r--r--patches/powerpc-Avoid-recursive-header-includes.patch39
-rw-r--r--patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch59
-rw-r--r--patches/preempt-lazy-support.patch10
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/series1
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch168
31 files changed, 286 insertions, 207 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index c55ecc272d89..b8336c56d4a4 100644
--- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2934,13 +2940,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2935,13 +2941,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2956,14 +2967,21 @@ static void drain_pages_zone(unsigned in
+@@ -2957,14 +2968,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3192,7 +3210,10 @@ static void free_unref_page_commit(struc
+@@ -3193,7 +3211,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 60feaaaebcf6..5de868a58f28 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2951,7 +2966,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2952,7 +2967,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2981,7 +2996,7 @@ static void drain_pages_zone(unsigned in
+@@ -2982,7 +2997,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3180,7 +3195,8 @@ static bool free_unref_page_prepare(stru
+@@ -3181,7 +3196,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -3210,10 +3226,8 @@ static void free_unref_page_commit(struc
+@@ -3211,10 +3227,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3224,13 +3238,17 @@ void free_unref_page(struct page *page)
+@@ -3225,13 +3239,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3241,6 +3259,11 @@ void free_unref_page_list(struct list_he
+@@ -3242,6 +3260,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -3253,10 +3276,12 @@ void free_unref_page_list(struct list_he
+@@ -3254,10 +3277,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -3269,6 +3294,21 @@ void free_unref_page_list(struct list_he
+@@ -3270,6 +3295,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/patches/0002-s390-vtime-Use-the-generic-IRQ-entry-accounting.patch b/patches/0002-s390-vtime-Use-the-generic-IRQ-entry-accounting.patch
index c76ff7547ae6..9b257bacde5d 100644
--- a/patches/0002-s390-vtime-Use-the-generic-IRQ-entry-accounting.patch
+++ b/patches/0002-s390-vtime-Use-the-generic-IRQ-entry-accounting.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -627,6 +627,12 @@ config HAVE_TIF_NOHZ
+@@ -643,6 +643,12 @@ config HAVE_TIF_NOHZ
config HAVE_VIRT_CPU_ACCOUNTING
bool
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
config ARCH_HAS_SCALED_CPUTIME
bool
-@@ -641,7 +647,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN
+@@ -657,7 +663,6 @@ config HAVE_VIRT_CPU_ACCOUNTING_GEN
some 32-bit arches may require multiple accesses, so proper
locking is needed to protect against concurrent accesses.
diff --git a/patches/0002-sched-Fix-balance_callback.patch b/patches/0002-sched-Fix-balance_callback.patch
index efdaf21db268..f044809a3664 100644
--- a/patches/0002-sched-Fix-balance_callback.patch
+++ b/patches/0002-sched-Fix-balance_callback.patch
@@ -214,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1221,6 +1221,9 @@ static inline void rq_pin_lock(struct rq
+@@ -1215,6 +1215,9 @@ static inline void rq_pin_lock(struct rq
rq->clock_update_flags &= (RQCF_REQ_SKIP|RQCF_ACT_SKIP);
rf->clock_update_flags = 0;
#endif
diff --git a/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch b/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
index 8dfee813f4f0..7e3aa6b1daff 100644
--- a/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
+++ b/patches/0003-sched-hotplug-Ensure-only-per-cpu-kthreads-run-durin.patch
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&rq->lock);
}
-@@ -6838,6 +6859,90 @@ static void migrate_tasks(struct rq *dea
+@@ -6834,6 +6855,90 @@ static void migrate_tasks(struct rq *dea
rq->stop = stop;
}
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -6923,6 +7028,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6919,6 +7024,8 @@ int sched_cpu_activate(unsigned int cpu)
struct rq *rq = cpu_rq(cpu);
struct rq_flags rf;
@@ -181,7 +181,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going up, increment the number of cores with SMT present.
-@@ -6970,6 +7077,8 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6966,6 +7073,8 @@ int sched_cpu_deactivate(unsigned int cp
*/
synchronize_rcu();
@@ -190,7 +190,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -6983,6 +7092,7 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6979,6 +7088,7 @@ int sched_cpu_deactivate(unsigned int cp
ret = cpuset_cpu_inactive(cpu);
if (ret) {
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -973,6 +973,7 @@ struct rq {
+@@ -967,6 +967,7 @@ struct rq {
unsigned long cpu_capacity_orig;
struct callback_head *balance_callback;
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned char nohz_idle_balance;
unsigned char idle_balance;
-@@ -1385,6 +1386,9 @@ init_numa_balancing(unsigned long clone_
+@@ -1379,6 +1380,9 @@ init_numa_balancing(unsigned long clone_
#ifdef CONFIG_SMP
@@ -218,7 +218,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
queue_balance_callback(struct rq *rq,
struct callback_head *head,
-@@ -1392,12 +1396,13 @@ queue_balance_callback(struct rq *rq,
+@@ -1386,12 +1390,13 @@ queue_balance_callback(struct rq *rq,
{
lockdep_assert_held(&rq->lock);
diff --git a/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch b/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
index 5c1928da8242..98d0ab5626ce 100644
--- a/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
+++ b/patches/0004-sched-core-Wait-for-tasks-being-pushed-away-on-hotpl.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6901,8 +6901,21 @@ static void balance_push(struct rq *rq)
+@@ -6897,8 +6897,21 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
get_task_struct(push_task);
/*
-@@ -6933,6 +6946,20 @@ static void balance_push_set(int cpu, bo
+@@ -6929,6 +6942,20 @@ static void balance_push_set(int cpu, bo
rq_unlock_irqrestore(rq, &rf);
}
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else
static inline void balance_push(struct rq *rq)
-@@ -6943,6 +6970,10 @@ static inline void balance_push_set(int
+@@ -6939,6 +6966,10 @@ static inline void balance_push_set(int
{
}
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_HOTPLUG_CPU */
void set_rq_online(struct rq *rq)
-@@ -7097,6 +7128,10 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7093,6 +7124,10 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7337,6 +7372,9 @@ void __init sched_init(void)
+@@ -7333,6 +7368,9 @@ void __init sched_init(void)
rq_csd_init(rq, &rq->nohz_csd, nohz_csd_func);
#endif
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_set(&rq->nr_iowait, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1004,6 +1004,10 @@ struct rq {
+@@ -998,6 +998,10 @@ struct rq {
/* This is used to determine avg_idle's max value */
u64 max_idle_balance_cost;
diff --git a/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch b/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
index b6dba4191a3c..79300cbaa7c0 100644
--- a/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
+++ b/patches/0006-sched-hotplug-Consolidate-task-migration-on-CPU-unpl.patch
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.name = "smpboot/threads:online",
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6746,120 +6746,6 @@ void idle_task_exit(void)
+@@ -6742,120 +6742,6 @@ void idle_task_exit(void)
/* finish_cpu(), as ran on the BP, will clean up the active_mm state */
}
@@ -206,7 +206,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int __balance_push_cpu_stop(void *arg)
{
struct task_struct *p = arg;
-@@ -7128,10 +7014,6 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -7124,10 +7010,6 @@ int sched_cpu_deactivate(unsigned int cp
return ret;
}
sched_domains_numa_masks_clear(cpu);
@@ -217,7 +217,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -7151,6 +7033,41 @@ int sched_cpu_starting(unsigned int cpu)
+@@ -7147,6 +7029,41 @@ int sched_cpu_starting(unsigned int cpu)
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -259,7 +259,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int sched_cpu_dying(unsigned int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -7164,7 +7081,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7160,7 +7077,6 @@ int sched_cpu_dying(unsigned int cpu)
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
diff --git a/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch b/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
index ea4419d2d12c..acf73080c49a 100644
--- a/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
+++ b/patches/0007-sched-Fix-hotplug-vs-CPU-bandwidth-control.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6982,6 +6982,8 @@ int sched_cpu_activate(unsigned int cpu)
+@@ -6978,6 +6978,8 @@ int sched_cpu_activate(unsigned int cpu)
int sched_cpu_deactivate(unsigned int cpu)
{
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
set_cpu_active(cpu, false);
-@@ -6996,6 +6998,14 @@ int sched_cpu_deactivate(unsigned int cp
+@@ -6992,6 +6994,14 @@ int sched_cpu_deactivate(unsigned int cp
balance_push_set(cpu, true);
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
/*
* When going down, decrement the number of cores with SMT present.
-@@ -7077,10 +7087,6 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7073,10 +7083,6 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
diff --git a/patches/0008-sched-Massage-set_cpus_allowed.patch b/patches/0008-sched-Massage-set_cpus_allowed.patch
index 9d33beb7f341..b2a974876cfe 100644
--- a/patches/0008-sched-Massage-set_cpus_allowed.patch
+++ b/patches/0008-sched-Massage-set_cpus_allowed.patch
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!retval) {
cpuset_cpus_allowed(p, cpus_allowed);
-@@ -6595,7 +6603,7 @@ void init_idle(struct task_struct *idle,
+@@ -6591,7 +6599,7 @@ void init_idle(struct task_struct *idle,
*
* And since this is boot we can forgo the serialization.
*/
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Assumes rq->lock is held */
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1814,7 +1814,8 @@ struct sched_class {
+@@ -1808,7 +1808,8 @@ struct sched_class {
void (*task_woken)(struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
-@@ -1907,7 +1908,9 @@ extern void update_group_capacity(struct
+@@ -1901,7 +1902,9 @@ extern void update_group_capacity(struct
extern void trigger_load_balance(struct rq *rq);
diff --git a/patches/0009-sched-Add-migrate_disable.patch b/patches/0009-sched-Add-migrate_disable.patch
index 98318d489643..9ec19dd76365 100644
--- a/patches/0009-sched-Add-migrate_disable.patch
+++ b/patches/0009-sched-Add-migrate_disable.patch
@@ -306,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_sched_switch(preempt, prev, next);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1902,14 +1902,16 @@ static inline bool sched_fair_runnable(s
+@@ -1896,14 +1896,16 @@ static inline bool sched_fair_runnable(s
extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
extern struct task_struct *pick_next_task_idle(struct rq *rq);
diff --git a/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch b/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
index 62cdb031e3b2..e839a368f07e 100644
--- a/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
+++ b/patches/0011-sched-core-Make-migrate-disable-and-CPU-hotplug-coop.patch
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
static void
-@@ -7069,15 +7087,20 @@ static void balance_push(struct rq *rq)
+@@ -7065,15 +7083,20 @@ static void balance_push(struct rq *rq)
* Both the cpu-hotplug and stop task are in this case and are
* required to complete the hotplug process.
*/
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&rq->lock);
rcuwait_wake_up(&rq->hotplug_wait);
raw_spin_lock(&rq->lock);
-@@ -7124,7 +7147,8 @@ static void balance_hotplug_wait(void)
+@@ -7120,7 +7143,8 @@ static void balance_hotplug_wait(void)
{
struct rq *rq = this_rq();
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
TASK_UNINTERRUPTIBLE);
}
-@@ -7369,7 +7393,7 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -7365,7 +7389,7 @@ int sched_cpu_dying(unsigned int cpu)
sched_tick_stop(cpu);
rq_lock_irqsave(rq, &rf);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
calc_load_migrate(rq);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1053,6 +1053,10 @@ struct rq {
+@@ -1047,6 +1047,10 @@ struct rq {
/* Must be inspected within a rcu lock section */
struct cpuidle_state *idle_state;
#endif
diff --git a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
index fa294e0f5b19..6690dc422436 100644
--- a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+++ b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1749,6 +1749,7 @@ static inline int task_on_rq_migrating(s
+@@ -1743,6 +1743,7 @@ static inline int task_on_rq_migrating(s
#define WF_FORK 0x02 /* Child wakeup after fork */
#define WF_MIGRATED 0x04 /* Internal use, task got migrated */
#define WF_ON_CPU 0x08 /* Wakee is on_cpu */
diff --git a/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch b/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
index 46c5d606aa7c..e795349f0b44 100644
--- a/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
+++ b/patches/0015-sched-Fix-migrate_disable-vs-rt-dl-balancing.patch
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.task_tick = task_tick_rt,
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1057,6 +1057,8 @@ struct rq {
+@@ -1051,6 +1051,8 @@ struct rq {
#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
unsigned int nr_pinned;
#endif
@@ -424,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
#ifdef CONFIG_FAIR_GROUP_SCHED
-@@ -1084,6 +1086,16 @@ static inline int cpu_of(struct rq *rq)
+@@ -1078,6 +1080,16 @@ static inline int cpu_of(struct rq *rq)
#endif
}
@@ -441,7 +441,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_SCHED_SMT
extern void __update_idle_core(struct rq *rq);
-@@ -1823,6 +1835,8 @@ struct sched_class {
+@@ -1817,6 +1829,8 @@ struct sched_class {
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -450,7 +450,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
-@@ -1918,6 +1932,24 @@ extern void trigger_load_balance(struct
+@@ -1912,6 +1926,24 @@ extern void trigger_load_balance(struct
extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
diff --git a/patches/0019-printk-remove-deferred-printing.patch b/patches/0019-printk-remove-deferred-printing.patch
index 27f2cdb920ec..98462856621c 100644
--- a/patches/0019-printk-remove-deferred-printing.patch
+++ b/patches/0019-printk-remove-deferred-printing.patch
@@ -422,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -9336,7 +9336,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9343,7 +9343,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -430,7 +430,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -9416,7 +9415,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9423,7 +9422,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
index c08ea25b65f8..62180e54c028 100644
--- a/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
+++ b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
-@@ -7905,6 +7897,39 @@ void __cant_sleep(const char *file, int
+@@ -7901,6 +7893,39 @@ void __cant_sleep(const char *file, int
add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
}
EXPORT_SYMBOL_GPL(__cant_sleep);
@@ -244,7 +244,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_MAGIC_SYSRQ
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1054,7 +1054,7 @@ struct rq {
+@@ -1048,7 +1048,7 @@ struct rq {
struct cpuidle_state *idle_state;
#endif
@@ -253,7 +253,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int nr_pinned;
#endif
unsigned int push_busy;
-@@ -1090,7 +1090,7 @@ static inline int cpu_of(struct rq *rq)
+@@ -1084,7 +1084,7 @@ static inline int cpu_of(struct rq *rq)
static inline bool is_migration_disabled(struct task_struct *p)
{
diff --git a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 41cecc620322..a4a8c83b1958 100644
--- a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -1257,7 +1257,7 @@ static int update_parent_subparts_cpumas
+@@ -1280,7 +1280,7 @@ static int update_parent_subparts_cpumas
* Newly added CPUs will be removed from effective_cpus and
* newly deleted ones will be added back to effective_cpus.
*/
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (adding) {
cpumask_or(parent->subparts_cpus,
parent->subparts_cpus, tmp->addmask);
-@@ -1276,7 +1276,7 @@ static int update_parent_subparts_cpumas
+@@ -1299,7 +1299,7 @@ static int update_parent_subparts_cpumas
}
parent->nr_subparts_cpus = cpumask_weight(parent->subparts_cpus);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cmd == partcmd_update;
}
-@@ -1381,7 +1381,7 @@ static void update_cpumasks_hier(struct
+@@ -1404,7 +1404,7 @@ static void update_cpumasks_hier(struct
continue;
rcu_read_unlock();
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_copy(cp->effective_cpus, tmp->new_cpus);
if (cp->nr_subparts_cpus &&
-@@ -1412,7 +1412,7 @@ static void update_cpumasks_hier(struct
+@@ -1435,7 +1435,7 @@ static void update_cpumasks_hier(struct
= cpumask_weight(cp->subparts_cpus);
}
}
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -1530,7 +1530,7 @@ static int update_cpumask(struct cpuset
+@@ -1553,7 +1553,7 @@ static int update_cpumask(struct cpuset
return -EINVAL;
}
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
/*
-@@ -1541,7 +1541,7 @@ static int update_cpumask(struct cpuset
+@@ -1564,7 +1564,7 @@ static int update_cpumask(struct cpuset
cs->cpus_allowed);
cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus);
}
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_cpumasks_hier(cs, &tmp);
-@@ -1735,9 +1735,9 @@ static void update_nodemasks_hier(struct
+@@ -1758,9 +1758,9 @@ static void update_nodemasks_hier(struct
continue;
rcu_read_unlock();
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1805,9 +1805,9 @@ static int update_nodemask(struct cpuset
+@@ -1828,9 +1828,9 @@ static int update_nodemask(struct cpuset
if (retval < 0)
goto done;
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1898,9 +1898,9 @@ static int update_flag(cpuset_flagbits_t
+@@ -1921,9 +1921,9 @@ static int update_flag(cpuset_flagbits_t
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -2409,7 +2409,7 @@ static int cpuset_common_seq_show(struct
+@@ -2432,7 +2432,7 @@ static int cpuset_common_seq_show(struct
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
switch (type) {
case FILE_CPULIST:
-@@ -2431,7 +2431,7 @@ static int cpuset_common_seq_show(struct
+@@ -2454,7 +2454,7 @@ static int cpuset_common_seq_show(struct
ret = -EINVAL;
}
@@ -167,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2744,14 +2744,14 @@ static int cpuset_css_online(struct cgro
+@@ -2767,14 +2767,14 @@ static int cpuset_css_online(struct cgro
cpuset_inc();
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2778,12 +2778,12 @@ static int cpuset_css_online(struct cgro
+@@ -2801,12 +2801,12 @@ static int cpuset_css_online(struct cgro
}
rcu_read_unlock();
@@ -199,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
percpu_up_write(&cpuset_rwsem);
put_online_cpus();
-@@ -2839,7 +2839,7 @@ static void cpuset_css_free(struct cgrou
+@@ -2862,7 +2862,7 @@ static void cpuset_css_free(struct cgrou
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
percpu_down_write(&cpuset_rwsem);
@@ -208,7 +208,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2850,7 +2850,7 @@ static void cpuset_bind(struct cgroup_su
+@@ -2873,7 +2873,7 @@ static void cpuset_bind(struct cgroup_su
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@@ -217,7 +217,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
percpu_up_write(&cpuset_rwsem);
}
-@@ -2947,12 +2947,12 @@ hotplug_update_tasks_legacy(struct cpuse
+@@ -2970,12 +2970,12 @@ hotplug_update_tasks_legacy(struct cpuse
{
bool is_empty;
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2989,10 +2989,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -3012,10 +3012,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@@ -245,7 +245,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -3147,7 +3147,7 @@ static void cpuset_hotplug_workfn(struct
+@@ -3170,7 +3170,7 @@ static void cpuset_hotplug_workfn(struct
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@@ -254,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!on_dfl)
cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
/*
-@@ -3167,17 +3167,17 @@ static void cpuset_hotplug_workfn(struct
+@@ -3190,17 +3190,17 @@ static void cpuset_hotplug_workfn(struct
}
}
cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
@@ -275,7 +275,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_tasks_nodemask(&top_cpuset);
}
-@@ -3278,11 +3278,11 @@ void cpuset_cpus_allowed(struct task_str
+@@ -3301,11 +3301,11 @@ void cpuset_cpus_allowed(struct task_str
{
unsigned long flags;
@@ -289,7 +289,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -3343,11 +3343,11 @@ nodemask_t cpuset_mems_allowed(struct ta
+@@ -3366,11 +3366,11 @@ nodemask_t cpuset_mems_allowed(struct ta
nodemask_t mask;
unsigned long flags;
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return mask;
}
-@@ -3439,14 +3439,14 @@ bool __cpuset_node_allowed(int node, gfp
+@@ -3462,14 +3462,14 @@ bool __cpuset_node_allowed(int node, gfp
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
diff --git a/patches/efi-Allow-efi-runtime.patch b/patches/efi-Allow-efi-runtime.patch
index 72a0cc979099..97ac4fdccb26 100644
--- a/patches/efi-Allow-efi-runtime.patch
+++ b/patches/efi-Allow-efi-runtime.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -96,6 +96,9 @@ static int __init parse_efi_cmdline(char
+@@ -97,6 +97,9 @@ static int __init parse_efi_cmdline(char
if (parse_option_str(str, "noruntime"))
disable_runtime = true;
diff --git a/patches/efi-Disable-runtime-services-on-RT.patch b/patches/efi-Disable-runtime-services-on-RT.patch
index 60b38d8a071a..2eecd1a8d598 100644
--- a/patches/efi-Disable-runtime-services-on-RT.patch
+++ b/patches/efi-Disable-runtime-services-on-RT.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
-@@ -65,7 +65,7 @@ struct mm_struct efi_mm = {
+@@ -66,7 +66,7 @@ struct mm_struct efi_mm = {
struct workqueue_struct *efi_rts_wq;
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index e662a287f341..355a181805fb 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->flags = trace_ctx >> 16;
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2571,6 +2571,15 @@ enum print_line_t trace_handle_return(st
+@@ -2578,6 +2578,15 @@ enum print_line_t trace_handle_return(st
}
EXPORT_SYMBOL_GPL(trace_handle_return);
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static unsigned int __tracing_gen_ctx_flags(unsigned long irqflags)
{
unsigned int trace_flags = 0;
-@@ -2601,7 +2610,8 @@ static unsigned int __tracing_gen_ctx_fl
+@@ -2608,7 +2617,8 @@ static unsigned int __tracing_gen_ctx_fl
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -56,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
-@@ -3843,9 +3853,10 @@ static void print_lat_help_header(struct
+@@ -3850,9 +3860,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3883,9 +3894,10 @@ static void print_func_help_header_irq(s
+@@ -3890,9 +3901,10 @@ static void print_func_help_header_irq(s
seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space);
seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space);
seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index bba4391fd1bd..1b76c9e42e6f 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt21
++-rt22
diff --git a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
index 47a90c012d71..9d8b0652510a 100644
--- a/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
+++ b/patches/mm-memcontrol-Provide-a-local_lock-for-per-CPU-memcg.patch
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3144,7 +3145,7 @@ static bool consume_obj_stock(struct obj
+@@ -3145,7 +3146,7 @@ static bool consume_obj_stock(struct obj
unsigned long flags;
bool ret = false;
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) {
-@@ -3152,7 +3153,7 @@ static bool consume_obj_stock(struct obj
+@@ -3153,7 +3154,7 @@ static bool consume_obj_stock(struct obj
ret = true;
}
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -3211,7 +3212,7 @@ static void refill_obj_stock(struct obj_
+@@ -3212,7 +3213,7 @@ static void refill_obj_stock(struct obj_
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached_objcg != objcg) { /* reset if necessary */
-@@ -3225,7 +3226,7 @@ static void refill_obj_stock(struct obj_
+@@ -3226,7 +3227,7 @@ static void refill_obj_stock(struct obj_
if (stock->nr_bytes > PAGE_SIZE)
drain_obj_stock(stock);
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size)
-@@ -7135,9 +7136,13 @@ static int __init mem_cgroup_init(void)
+@@ -7138,9 +7139,13 @@ static int __init mem_cgroup_init(void)
cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
memcg_hotplug_cpu_dead);
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index bff38083e8cb..021a3db680d8 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -5732,12 +5740,12 @@ static int mem_cgroup_move_account(struc
+@@ -5735,12 +5743,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6808,10 +6816,10 @@ int mem_cgroup_charge(struct page *page,
+@@ -6811,10 +6819,10 @@ int mem_cgroup_charge(struct page *page,
css_get(&memcg->css);
commit_charge(page, memcg);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6855,11 +6863,11 @@ static void uncharge_batch(const struct
+@@ -6858,11 +6866,11 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -78,7 +78,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* drop reference from uncharge_page */
css_put(&ug->memcg->css);
-@@ -7013,10 +7021,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -7016,10 +7024,10 @@ void mem_cgroup_migrate(struct page *old
css_get(&memcg->css);
commit_charge(newpage, memcg);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -7191,6 +7199,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7194,6 +7202,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -7236,9 +7245,13 @@ void mem_cgroup_swapout(struct page *pag
+@@ -7239,9 +7248,13 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
index f8b65a378f5d..195012a9164c 100644
--- a/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
+++ b/patches/mm-page_alloc-Use-migrate_disable-in-drain_local_pag.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -3044,9 +3044,9 @@ static void drain_local_pages_wq(struct
+@@ -3045,9 +3045,9 @@ static void drain_local_pages_wq(struct
* cpu which is allright but we also have to make sure to not move to
* a different one.
*/
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 6b222ea636a0..f9933b7bbff2 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __free_pages_core(struct page *page, unsigned int order)
-@@ -2957,13 +2965,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2958,13 +2966,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2985,7 +2993,7 @@ static void drain_pages_zone(unsigned in
+@@ -2986,7 +2994,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2993,7 +3001,7 @@ static void drain_pages_zone(unsigned in
+@@ -2994,7 +3002,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -3244,9 +3252,9 @@ void free_unref_page(struct page *page)
+@@ -3245,9 +3253,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -3273,7 +3281,7 @@ void free_unref_page_list(struct list_he
+@@ -3274,7 +3282,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -3288,12 +3296,12 @@ void free_unref_page_list(struct list_he
+@@ -3289,12 +3297,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3463,7 +3471,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3464,7 +3472,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3471,7 +3479,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3472,7 +3480,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
@@ -142,7 +142,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3505,7 +3513,8 @@ struct page *rmqueue(struct zone *prefer
+@@ -3506,7 +3514,8 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -152,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3531,7 +3540,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3532,7 +3541,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3544,7 +3553,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3545,7 +3554,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8797,7 +8806,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8798,7 +8807,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8806,7 +8815,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8807,7 +8816,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/powerpc-Avoid-recursive-header-includes.patch b/patches/powerpc-Avoid-recursive-header-includes.patch
new file mode 100644
index 000000000000..6a048d598782
--- /dev/null
+++ b/patches/powerpc-Avoid-recursive-header-includes.patch
@@ -0,0 +1,39 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 8 Jan 2021 19:48:21 +0100
+Subject: [PATCH] powerpc: Avoid recursive header includes
+
+- The include of bug.h leads to an include of printk.h which gets back
+ to spinlock.h and complains then about missing xchg().
+ Remove bug.h and add bits.h which is needed for BITS_PER_BYTE.
+
+- Avoid the "please don't include this file directly" error from
+ rwlock-rt. Allow an include from/with rtmutex.h.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/powerpc/include/asm/cmpxchg.h | 2 +-
+ arch/powerpc/include/asm/simple_spinlock_types.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/include/asm/cmpxchg.h
++++ b/arch/powerpc/include/asm/cmpxchg.h
+@@ -5,7 +5,7 @@
+ #ifdef __KERNEL__
+ #include <linux/compiler.h>
+ #include <asm/synch.h>
+-#include <linux/bug.h>
++#include <linux/bits.h>
+
+ #ifdef __BIG_ENDIAN
+ #define BITOFF_CAL(size, off) ((sizeof(u32) - size - off) * BITS_PER_BYTE)
+--- a/arch/powerpc/include/asm/simple_spinlock_types.h
++++ b/arch/powerpc/include/asm/simple_spinlock_types.h
+@@ -2,7 +2,7 @@
+ #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+ #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
+
+-#ifndef __LINUX_SPINLOCK_TYPES_H
++#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__LINUX_RT_MUTEX_H)
+ # error "please don't include this file directly"
+ #endif
+
diff --git a/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch b/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
index 2c2d8b4b1874..470c64020422 100644
--- a/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
+++ b/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
@@ -12,8 +12,8 @@ Use local_irq_save() instead of local_irq_disable().
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------
- 1 file changed, 10 insertions(+), 6 deletions(-)
+ arch/powerpc/platforms/pseries/iommu.c | 31 ++++++++++++++++++++-----------
+ 1 file changed, 20 insertions(+), 11 deletions(-)
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -25,65 +25,86 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
-@@ -191,6 +192,7 @@ static int tce_build_pSeriesLP(unsigned
+@@ -190,7 +191,13 @@ static int tce_build_pSeriesLP(unsigned
+ return ret;
}
- static DEFINE_PER_CPU(__be64 *, tce_page);
-+static DEFINE_LOCAL_IRQ_LOCK(tcp_page_lock);
+-static DEFINE_PER_CPU(__be64 *, tce_page);
++struct tce_page {
++ __be64 * page;
++ local_lock_t lock;
++};
++static DEFINE_PER_CPU(struct tce_page, tce_page) = {
++ .lock = INIT_LOCAL_LOCK(lock),
++};
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
-@@ -212,7 +214,8 @@ static int tce_buildmulti_pSeriesLP(stru
+@@ -212,9 +219,10 @@ static int tce_buildmulti_pSeriesLP(stru
direction, attrs);
}
- local_irq_save(flags); /* to protect tcep and the page behind it */
+ /* to protect tcep and the page behind it */
-+ local_lock_irqsave(tcp_page_lock, flags);
++ local_lock_irqsave(&tce_page.lock, flags);
- tcep = __this_cpu_read(tce_page);
+- tcep = __this_cpu_read(tce_page);
++ tcep = __this_cpu_read(tce_page.page);
-@@ -223,7 +226,7 @@ static int tce_buildmulti_pSeriesLP(stru
+ /* This is safe to do since interrupts are off when we're called
+ * from iommu_alloc{,_sg}()
+@@ -223,12 +231,12 @@ static int tce_buildmulti_pSeriesLP(stru
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
- local_irq_restore(flags);
-+ local_unlock_irqrestore(tcp_page_lock, flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
return tce_build_pSeriesLP(tbl->it_index, tcenum,
tbl->it_page_shift,
npages, uaddr, direction, attrs);
-@@ -258,7 +261,7 @@ static int tce_buildmulti_pSeriesLP(stru
+ }
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ rpn = __pa(uaddr) >> TCE_SHIFT;
+@@ -258,7 +266,7 @@ static int tce_buildmulti_pSeriesLP(stru
tcenum += limit;
} while (npages > 0 && !rc);
- local_irq_restore(flags);
-+ local_unlock_irqrestore(tcp_page_lock, flags);
++ local_unlock_irqrestore(&tce_page.lock, flags);
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
-@@ -429,13 +432,14 @@ static int tce_setrange_multi_pSeriesLP(
+@@ -429,16 +437,17 @@ static int tce_setrange_multi_pSeriesLP(
DMA_BIDIRECTIONAL, 0);
}
- local_irq_disable(); /* to protect tcep and the page behind it */
+- tcep = __this_cpu_read(tce_page);
+ /* to protect tcep and the page behind it */
-+ local_lock_irq(tcp_page_lock);
- tcep = __this_cpu_read(tce_page);
++ local_lock_irq(&tce_page.lock);
++ tcep = __this_cpu_read(tce_page.page);
if (!tcep) {
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
if (!tcep) {
- local_irq_enable();
-+ local_unlock_irq(tcp_page_lock);
++ local_unlock_irq(&tce_page.lock);
return -ENOMEM;
}
- __this_cpu_write(tce_page, tcep);
-@@ -481,7 +485,7 @@ static int tce_setrange_multi_pSeriesLP(
+- __this_cpu_write(tce_page, tcep);
++ __this_cpu_write(tce_page.page, tcep);
+ }
+
+ proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
+@@ -481,7 +490,7 @@ static int tce_setrange_multi_pSeriesLP(
/* error cleanup: caller will clear whole range */
- local_irq_enable();
-+ local_unlock_irq(tcp_page_lock);
++ local_unlock_irq(&tce_page.lock);
return rc;
}
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 1797f06a25b2..bf5eb8e61e48 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -412,7 +412,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -7172,7 +7248,9 @@ void init_idle(struct task_struct *idle,
+@@ -7168,7 +7244,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -511,7 +511,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1993,6 +1993,15 @@ extern void reweight_task(struct task_st
+@@ -1987,6 +1987,15 @@ extern void reweight_task(struct task_st
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -529,7 +529,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2610,8 +2610,11 @@ static unsigned int __tracing_gen_ctx_fl
+@@ -2617,8 +2617,11 @@ static unsigned int __tracing_gen_ctx_fl
trace_flags |= TRACE_FLAG_NEED_RESCHED;
if (test_preempt_need_resched())
trace_flags |= TRACE_FLAG_PREEMPT_RESCHED;
@@ -543,7 +543,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
unsigned int _tracing_gen_ctx_flags(unsigned long irqflags)
-@@ -3848,15 +3851,17 @@ unsigned long trace_total_entries(struct
+@@ -3855,15 +3858,17 @@ unsigned long trace_total_entries(struct
static void print_lat_help_header(struct seq_file *m)
{
@@ -570,7 +570,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct array_buffer *buf, struct seq_file *m)
-@@ -3890,14 +3895,16 @@ static void print_func_help_header_irq(s
+@@ -3897,14 +3902,16 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 69a8499b11f1..bd14ca488cf1 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7881,7 +7881,7 @@ void __init sched_init(void)
+@@ -7877,7 +7877,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/series b/patches/series
index 949197974061..05560731ffd9 100644
--- a/patches/series
+++ b/patches/series
@@ -478,6 +478,7 @@ powerpc-traps.patch
powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
powerpc-stackprotector-work-around-stack-guard-init-.patch
+powerpc-Avoid-recursive-header-includes.patch
POWERPC-Allow-to-enable-RT.patch
# DRIVERS
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 1cceda460d77..055e67907281 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -2016,6 +2016,7 @@ static __latent_entropy struct task_stru
+@@ -2017,6 +2017,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch b/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch
index 16684546326c..bbdac118f98f 100644
--- a/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch
+++ b/patches/tracing-Merge-irqflags-preemt-counter-add-RT-bits.patch
@@ -54,13 +54,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kernel/trace/trace_functions.c | 26 +---
kernel/trace/trace_functions_graph.c | 32 ++---
kernel/trace/trace_hwlat.c | 7 -
- kernel/trace/trace_irqsoff.c | 62 ++++------
+ kernel/trace/trace_irqsoff.c | 86 +++++--------
kernel/trace/trace_kprobe.c | 14 --
kernel/trace/trace_mmiotrace.c | 14 +-
kernel/trace/trace_sched_wakeup.c | 61 +++++----
kernel/trace/trace_syscalls.c | 20 +--
kernel/trace/trace_uprobe.c | 4
- 19 files changed, 297 insertions(+), 282 deletions(-)
+ 19 files changed, 303 insertions(+), 300 deletions(-)
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -196,7 +196,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -165,7 +165,7 @@ static union trace_eval_map_item *trace_
+@@ -176,7 +176,7 @@ static union trace_eval_map_item *trace_
int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define MAX_TRACER_SIZE 100
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
-@@ -894,23 +894,23 @@ static inline void trace_access_lock_ini
+@@ -905,23 +905,23 @@ static inline void trace_access_lock_ini
#ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct trace_buffer *buffer,
@@ -237,7 +237,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
-@@ -918,24 +918,24 @@ static inline void ftrace_trace_stack(st
+@@ -929,24 +929,24 @@ static inline void ftrace_trace_stack(st
static __always_inline void
trace_event_setup(struct ring_buffer_event *event,
@@ -266,7 +266,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return event;
}
-@@ -996,25 +996,22 @@ int __trace_puts(unsigned long ip, const
+@@ -1007,25 +1007,22 @@ int __trace_puts(unsigned long ip, const
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct print_entry *entry;
@@ -296,7 +296,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event) {
size = 0;
goto out;
-@@ -1033,7 +1030,7 @@ int __trace_puts(unsigned long ip, const
+@@ -1044,7 +1041,7 @@ int __trace_puts(unsigned long ip, const
entry->buf[size] = '\0';
__buffer_unlock_commit(buffer, event);
@@ -305,7 +305,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out:
ring_buffer_nest_end(buffer);
return size;
-@@ -1050,25 +1047,22 @@ int __trace_bputs(unsigned long ip, cons
+@@ -1061,25 +1058,22 @@ int __trace_bputs(unsigned long ip, cons
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct bputs_entry *entry;
@@ -334,7 +334,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
goto out;
-@@ -1077,7 +1071,7 @@ int __trace_bputs(unsigned long ip, cons
+@@ -1088,7 +1082,7 @@ int __trace_bputs(unsigned long ip, cons
entry->str = str;
__buffer_unlock_commit(buffer, event);
@@ -343,7 +343,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = 1;
out:
-@@ -2577,36 +2571,78 @@ enum print_line_t trace_handle_return(st
+@@ -2584,36 +2578,78 @@ enum print_line_t trace_handle_return(st
}
EXPORT_SYMBOL_GPL(trace_handle_return);
@@ -440,7 +440,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
-@@ -2726,7 +2762,7 @@ struct ring_buffer_event *
+@@ -2733,7 +2769,7 @@ struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file,
int type, unsigned long len,
@@ -449,7 +449,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct ring_buffer_event *entry;
int val;
-@@ -2739,15 +2775,15 @@ trace_event_buffer_lock_reserve(struct t
+@@ -2746,15 +2782,15 @@ trace_event_buffer_lock_reserve(struct t
/* Try to use the per cpu buffer first */
val = this_cpu_inc_return(trace_buffered_event_cnt);
if (val == 1) {
@@ -468,7 +468,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
-@@ -2756,8 +2792,8 @@ trace_event_buffer_lock_reserve(struct t
+@@ -2763,8 +2799,8 @@ trace_event_buffer_lock_reserve(struct t
*/
if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
@@ -479,7 +479,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return entry;
}
-@@ -2834,7 +2870,7 @@ int tracepoint_printk_sysctl(struct ctl_
+@@ -2841,7 +2877,7 @@ int tracepoint_printk_sysctl(struct ctl_
return ret;
}
@@ -488,7 +488,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
if (static_key_false(&tracepoint_printk_key.key))
output_printk(fbuffer);
-@@ -2843,9 +2879,9 @@ void trace_event_buffer_commit(struct tr
+@@ -2850,9 +2886,9 @@ void trace_event_buffer_commit(struct tr
ftrace_exports(fbuffer->event, TRACE_EXPORT_EVENT);
event_trigger_unlock_commit_regs(fbuffer->trace_file, fbuffer->buffer,
fbuffer->event, fbuffer->entry,
@@ -500,7 +500,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Skip 3:
-@@ -2859,7 +2895,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_com
+@@ -2866,7 +2902,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_com
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
@@ -509,7 +509,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct pt_regs *regs)
{
__buffer_unlock_commit(buffer, event);
-@@ -2870,8 +2906,8 @@ void trace_buffer_unlock_commit_regs(str
+@@ -2877,8 +2913,8 @@ void trace_buffer_unlock_commit_regs(str
* and mmiotrace, but that's ok if they lose a function or
* two. They are not that meaningful.
*/
@@ -520,7 +520,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2885,9 +2921,8 @@ trace_buffer_unlock_commit_nostack(struc
+@@ -2892,9 +2928,8 @@ trace_buffer_unlock_commit_nostack(struc
}
void
@@ -532,7 +532,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct trace_event_call *call = &event_function;
struct trace_buffer *buffer = tr->array_buffer.buffer;
-@@ -2895,7 +2930,7 @@ trace_function(struct trace_array *tr,
+@@ -2902,7 +2937,7 @@ trace_function(struct trace_array *tr,
struct ftrace_entry *entry;
event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
@@ -541,7 +541,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
return;
entry = ring_buffer_event_data(event);
-@@ -2929,8 +2964,8 @@ static DEFINE_PER_CPU(struct ftrace_stac
+@@ -2936,8 +2971,8 @@ static DEFINE_PER_CPU(struct ftrace_stac
static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct trace_buffer *buffer,
@@ -552,7 +552,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct trace_event_call *call = &event_kernel_stack;
struct ring_buffer_event *event;
-@@ -2977,7 +3012,7 @@ static void __ftrace_trace_stack(struct
+@@ -2984,7 +3019,7 @@ static void __ftrace_trace_stack(struct
size = nr_entries * sizeof(unsigned long);
event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
@@ -561,7 +561,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
goto out;
entry = ring_buffer_event_data(event);
-@@ -2998,22 +3033,22 @@ static void __ftrace_trace_stack(struct
+@@ -3005,22 +3040,22 @@ static void __ftrace_trace_stack(struct
static inline void ftrace_trace_stack(struct trace_array *tr,
struct trace_buffer *buffer,
@@ -590,7 +590,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return;
}
-@@ -3027,7 +3062,7 @@ void __trace_stack(struct trace_array *t
+@@ -3034,7 +3069,7 @@ void __trace_stack(struct trace_array *t
return;
rcu_irq_enter_irqson();
@@ -599,7 +599,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_irq_exit_irqson();
}
-@@ -3049,7 +3084,7 @@ void trace_dump_stack(int skip)
+@@ -3056,7 +3091,7 @@ void trace_dump_stack(int skip)
skip++;
#endif
__ftrace_trace_stack(global_trace.array_buffer.buffer,
@@ -608,7 +608,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(trace_dump_stack);
-@@ -3058,7 +3093,7 @@ static DEFINE_PER_CPU(int, user_stack_co
+@@ -3065,7 +3100,7 @@ static DEFINE_PER_CPU(int, user_stack_co
static void
ftrace_trace_userstack(struct trace_array *tr,
@@ -617,7 +617,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event;
-@@ -3085,7 +3120,7 @@ ftrace_trace_userstack(struct trace_arra
+@@ -3092,7 +3127,7 @@ ftrace_trace_userstack(struct trace_arra
__this_cpu_inc(user_stack_count);
event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
@@ -626,7 +626,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
goto out_drop_count;
entry = ring_buffer_event_data(event);
-@@ -3105,7 +3140,7 @@ ftrace_trace_userstack(struct trace_arra
+@@ -3112,7 +3147,7 @@ ftrace_trace_userstack(struct trace_arra
#else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct trace_array *tr,
struct trace_buffer *buffer,
@@ -635,7 +635,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
#endif /* !CONFIG_USER_STACKTRACE_SUPPORT */
-@@ -3235,9 +3270,9 @@ int trace_vbprintk(unsigned long ip, con
+@@ -3242,9 +3277,9 @@ int trace_vbprintk(unsigned long ip, con
struct trace_buffer *buffer;
struct trace_array *tr = &global_trace;
struct bprint_entry *entry;
@@ -647,7 +647,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(tracing_selftest_running || tracing_disabled))
return 0;
-@@ -3245,7 +3280,7 @@ int trace_vbprintk(unsigned long ip, con
+@@ -3252,7 +3287,7 @@ int trace_vbprintk(unsigned long ip, con
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
@@ -656,7 +656,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_disable_notrace();
tbuffer = get_trace_buf();
-@@ -3259,12 +3294,11 @@ int trace_vbprintk(unsigned long ip, con
+@@ -3266,12 +3301,11 @@ int trace_vbprintk(unsigned long ip, con
if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
goto out_put;
@@ -670,7 +670,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
goto out;
entry = ring_buffer_event_data(event);
-@@ -3274,7 +3308,7 @@ int trace_vbprintk(unsigned long ip, con
+@@ -3281,7 +3315,7 @@ int trace_vbprintk(unsigned long ip, con
memcpy(entry->buf, tbuffer, sizeof(u32) * len);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
@@ -679,7 +679,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
out:
-@@ -3297,9 +3331,9 @@ static int
+@@ -3304,9 +3338,9 @@ static int
{
struct trace_event_call *call = &event_print;
struct ring_buffer_event *event;
@@ -691,7 +691,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
char *tbuffer;
if (tracing_disabled || tracing_selftest_running)
-@@ -3308,7 +3342,7 @@ static int
+@@ -3315,7 +3349,7 @@ static int
/* Don't pollute graph traces with trace_vprintk internals */
pause_graph_tracing();
@@ -700,7 +700,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_disable_notrace();
-@@ -3320,11 +3354,10 @@ static int
+@@ -3327,11 +3361,10 @@ static int
len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
@@ -713,7 +713,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!event)
goto out;
entry = ring_buffer_event_data(event);
-@@ -3333,7 +3366,7 @@ static int
+@@ -3340,7 +3373,7 @@ static int
memcpy(&entry->buf, tbuffer, len + 1);
if (!call_filter_check_discard(call, entry, buffer, event)) {
__buffer_unlock_commit(buffer, event);
@@ -722,7 +722,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
out:
-@@ -6646,7 +6679,6 @@ tracing_mark_write(struct file *filp, co
+@@ -6653,7 +6686,6 @@ tracing_mark_write(struct file *filp, co
enum event_trigger_type tt = ETT_NONE;
struct trace_buffer *buffer;
struct print_entry *entry;
@@ -730,7 +730,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ssize_t written;
int size;
int len;
-@@ -6666,7 +6698,6 @@ tracing_mark_write(struct file *filp, co
+@@ -6673,7 +6705,6 @@ tracing_mark_write(struct file *filp, co
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
@@ -738,7 +738,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
/* If less than "<faulted>", then make sure we can still add that */
-@@ -6675,7 +6706,7 @@ tracing_mark_write(struct file *filp, co
+@@ -6682,7 +6713,7 @@ tracing_mark_write(struct file *filp, co
buffer = tr->array_buffer.buffer;
event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
@@ -747,7 +747,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!event))
/* Ring buffer disabled, return as if not open for write */
return -EBADF;
-@@ -6727,7 +6758,6 @@ tracing_mark_raw_write(struct file *filp
+@@ -6734,7 +6765,6 @@ tracing_mark_raw_write(struct file *filp
struct ring_buffer_event *event;
struct trace_buffer *buffer;
struct raw_data_entry *entry;
@@ -755,7 +755,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ssize_t written;
int size;
int len;
-@@ -6749,14 +6779,13 @@ tracing_mark_raw_write(struct file *filp
+@@ -6756,14 +6786,13 @@ tracing_mark_raw_write(struct file *filp
BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
@@ -812,7 +812,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
}
#endif /* CONFIG_STACKTRACE */
-@@ -1003,10 +1001,10 @@ extern void graph_trace_open(struct trac
+@@ -1008,10 +1006,10 @@ extern void graph_trace_open(struct trac
extern void graph_trace_close(struct trace_iterator *iter);
extern int __trace_graph_entry(struct trace_array *tr,
struct ftrace_graph_ent *trace,
@@ -825,7 +825,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DYNAMIC_FTRACE
extern struct ftrace_hash __rcu *ftrace_graph_hash;
-@@ -1469,15 +1467,15 @@ extern int call_filter_check_discard(str
+@@ -1474,15 +1472,15 @@ extern int call_filter_check_discard(str
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
@@ -844,7 +844,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DECLARE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
-@@ -1538,8 +1536,7 @@ static inline bool
+@@ -1543,8 +1541,7 @@ static inline bool
* @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer
* @entry: The event itself
@@ -854,7 +854,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
-@@ -1549,12 +1546,12 @@ static inline void
+@@ -1554,12 +1551,12 @@ static inline void
event_trigger_unlock_commit(struct trace_event_file *file,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
@@ -869,7 +869,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (tt)
event_triggers_post_call(file, tt);
-@@ -1566,8 +1563,7 @@ event_trigger_unlock_commit(struct trace
+@@ -1571,8 +1568,7 @@ event_trigger_unlock_commit(struct trace
* @buffer: The ring buffer that the event is being written to
* @event: The event meta data in the ring buffer
* @entry: The event itself
@@ -879,7 +879,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* This is a helper function to handle triggers that require data
* from the event itself. It also tests the event against filters and
-@@ -1580,14 +1576,14 @@ static inline void
+@@ -1585,14 +1581,14 @@ static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct trace_buffer *buffer,
struct ring_buffer_event *event,
@@ -1407,63 +1407,81 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
check_critical_timing(tr, data, parent_ip ? : ip, cpu);
data->critical_start = 0;
atomic_dec(&data->disabled);
-@@ -441,7 +439,7 @@ void start_critical_timings(void)
- int pc = preempt_count();
-
- if (preempt_trace(pc) || irq_trace())
+@@ -438,20 +436,16 @@ stop_critical_timing(unsigned long ip, u
+ /* start and stop critical timings used to for stoppage (in idle) */
+ void start_critical_timings(void)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) || irq_trace())
- start_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
++ if (preempt_trace(preempt_count()) || irq_trace())
+ start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(start_critical_timings);
NOKPROBE_SYMBOL(start_critical_timings);
-@@ -451,7 +449,7 @@ void stop_critical_timings(void)
- int pc = preempt_count();
- if (preempt_trace(pc) || irq_trace())
+ void stop_critical_timings(void)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) || irq_trace())
- stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1, pc);
++ if (preempt_trace(preempt_count()) || irq_trace())
+ stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
EXPORT_SYMBOL_GPL(stop_critical_timings);
NOKPROBE_SYMBOL(stop_critical_timings);
-@@ -612,7 +610,7 @@ void tracer_hardirqs_on(unsigned long a0
- unsigned int pc = preempt_count();
-
- if (!preempt_trace(pc) && irq_trace())
+@@ -609,19 +603,15 @@ static void irqsoff_tracer_stop(struct t
+ */
+ void tracer_hardirqs_on(unsigned long a0, unsigned long a1)
+ {
+- unsigned int pc = preempt_count();
+-
+- if (!preempt_trace(pc) && irq_trace())
- stop_critical_timing(a0, a1, pc);
++ if (!preempt_trace(preempt_count()) && irq_trace())
+ stop_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_on);
-@@ -621,7 +619,7 @@ void tracer_hardirqs_off(unsigned long a
- unsigned int pc = preempt_count();
-
- if (!preempt_trace(pc) && irq_trace())
+ void tracer_hardirqs_off(unsigned long a0, unsigned long a1)
+ {
+- unsigned int pc = preempt_count();
+-
+- if (!preempt_trace(pc) && irq_trace())
- start_critical_timing(a0, a1, pc);
++ if (!preempt_trace(preempt_count()) && irq_trace())
+ start_critical_timing(a0, a1);
}
NOKPROBE_SYMBOL(tracer_hardirqs_off);
-@@ -664,7 +662,7 @@ void tracer_preempt_on(unsigned long a0,
- int pc = preempt_count();
-
- if (preempt_trace(pc) && !irq_trace())
+@@ -661,18 +651,14 @@ static struct tracer irqsoff_tracer __re
+ #ifdef CONFIG_PREEMPT_TRACER
+ void tracer_preempt_on(unsigned long a0, unsigned long a1)
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) && !irq_trace())
- stop_critical_timing(a0, a1, pc);
++ if (preempt_trace(preempt_count()) && !irq_trace())
+ stop_critical_timing(a0, a1);
}
void tracer_preempt_off(unsigned long a0, unsigned long a1)
-@@ -672,7 +670,7 @@ void tracer_preempt_off(unsigned long a0
- int pc = preempt_count();
-
- if (preempt_trace(pc) && !irq_trace())
+ {
+- int pc = preempt_count();
+-
+- if (preempt_trace(pc) && !irq_trace())
- start_critical_timing(a0, a1, pc);
++ if (preempt_trace(preempt_count()) && !irq_trace())
+ start_critical_timing(a0, a1);
}
static int preemptoff_tracer_init(struct trace_array *tr)
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
-@@ -1383,8 +1383,7 @@ static nokprobe_inline void
+@@ -1384,8 +1384,7 @@ static nokprobe_inline void
if (trace_trigger_soft_disabled(trace_file))
return;
@@ -1473,7 +1491,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
-@@ -1393,7 +1392,7 @@ static nokprobe_inline void
+@@ -1394,7 +1393,7 @@ static nokprobe_inline void
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
@@ -1482,7 +1500,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!fbuffer.event)
return;
-@@ -1402,7 +1401,7 @@ static nokprobe_inline void
+@@ -1403,7 +1402,7 @@ static nokprobe_inline void
entry->ip = (unsigned long)tk->rp.kp.addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
@@ -1491,7 +1509,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void
-@@ -1431,8 +1430,7 @@ static nokprobe_inline void
+@@ -1432,8 +1431,7 @@ static nokprobe_inline void
if (trace_trigger_soft_disabled(trace_file))
return;
@@ -1501,7 +1519,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fbuffer.trace_file = trace_file;
dsize = __get_data_size(&tk->tp, regs);
-@@ -1440,7 +1438,7 @@ static nokprobe_inline void
+@@ -1441,7 +1439,7 @@ static nokprobe_inline void
trace_event_buffer_lock_reserve(&fbuffer.buffer, trace_file,
call->event.type,
sizeof(*entry) + tk->tp.size + dsize,
@@ -1510,7 +1528,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!fbuffer.event)
return;
-@@ -1450,7 +1448,7 @@ static nokprobe_inline void
+@@ -1451,7 +1449,7 @@ static nokprobe_inline void
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);