summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch6
-rw-r--r--patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch2
-rw-r--r--patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch16
-rw-r--r--patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch2
-rw-r--r--patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch2
-rw-r--r--patches/0011-printk_safe-remove-printk-safe-code.patch4
-rw-r--r--patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch2
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch2
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch14
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch26
-rw-r--r--patches/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch117
-rw-r--r--patches/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch108
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch2
-rw-r--r--patches/printk-Force-a-line-break-on-pr_cont-n.patch33
-rw-r--r--patches/random-make-it-work-on-rt.patch4
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/series3
-rw-r--r--patches/skbufhead-raw-lock.patch4
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/softirq-preempt-fix-3-re.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-stackprot-no-random-on-rt.patch4
25 files changed, 196 insertions, 171 deletions
diff --git a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index 36b556ccace6..e5eaa1ba2098 100644
--- a/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0001-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -2788,13 +2795,18 @@ void drain_zone_pages(struct zone *zone,
+@@ -2797,13 +2804,18 @@ void drain_zone_pages(struct zone *zone,
{
unsigned long flags;
int to_drain, batch;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2810,14 +2822,21 @@ static void drain_pages_zone(unsigned in
+@@ -2819,14 +2831,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3045,7 +3064,10 @@ static void free_unref_page_commit(struc
+@@ -3054,7 +3073,10 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch b/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
index 7c5135b5e85b..c43a095290a1 100644
--- a/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
+++ b/patches/0001-bpf-Tighten-the-requirements-for-preallocated-hash-m.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
-@@ -8189,26 +8189,43 @@ static bool is_tracing_prog_type(enum bp
+@@ -8205,26 +8205,43 @@ static bool is_tracing_prog_type(enum bp
}
}
diff --git a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
index cb3958983d96..a07ed7c8df38 100644
--- a/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
+++ b/patches/0002-Split-IRQ-off-and-zone-lock-while-freeing-pages-from.patch
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__free_one_page(page, page_to_pfn(page), zone, 0, mt);
trace_mm_page_pcpu_drain(page, 0, mt);
}
-@@ -2806,7 +2821,7 @@ void drain_zone_pages(struct zone *zone,
+@@ -2815,7 +2830,7 @@ void drain_zone_pages(struct zone *zone,
local_irq_restore(flags);
if (to_drain > 0)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#endif
-@@ -2836,7 +2851,7 @@ static void drain_pages_zone(unsigned in
+@@ -2845,7 +2860,7 @@ static void drain_pages_zone(unsigned in
local_irq_restore(flags);
if (count)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3035,7 +3050,8 @@ static bool free_unref_page_prepare(stru
+@@ -3044,7 +3059,8 @@ static bool free_unref_page_prepare(stru
return true;
}
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
-@@ -3064,10 +3080,8 @@ static void free_unref_page_commit(struc
+@@ -3073,10 +3089,8 @@ static void free_unref_page_commit(struc
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3078,13 +3092,17 @@ void free_unref_page(struct page *page)
+@@ -3087,13 +3101,17 @@ void free_unref_page(struct page *page)
{
unsigned long flags;
unsigned long pfn = page_to_pfn(page);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -3095,6 +3113,11 @@ void free_unref_page_list(struct list_he
+@@ -3104,6 +3122,11 @@ void free_unref_page_list(struct list_he
struct page *page, *next;
unsigned long flags, pfn;
int batch_count = 0;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Prepare pages for freeing */
list_for_each_entry_safe(page, next, list, lru) {
-@@ -3107,10 +3130,12 @@ void free_unref_page_list(struct list_he
+@@ -3116,10 +3139,12 @@ void free_unref_page_list(struct list_he
local_irq_save(flags);
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Guard against excessive IRQ disabled times when we get
-@@ -3123,6 +3148,21 @@ void free_unref_page_list(struct list_he
+@@ -3132,6 +3157,21 @@ void free_unref_page_list(struct list_he
}
}
local_irq_restore(flags);
diff --git a/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch b/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
index 867cca867861..d788cd92faae 100644
--- a/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
+++ b/patches/0002-bpf-Enforce-preallocation-for-instrumentation-progra.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
-@@ -8214,16 +8214,21 @@ static int check_map_prog_compatibility(
+@@ -8230,16 +8230,21 @@ static int check_map_prog_compatibility(
* of the memory allocator or at a place where a recursion into the
* memory allocator would see inconsistent state.
*
diff --git a/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch b/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
index 34f71f678fd0..8bd4f26cf5bb 100644
--- a/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
+++ b/patches/0004-bpf-tracing-Remove-redundant-preempt_disable-in-__bp.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
-@@ -1475,10 +1475,9 @@ void bpf_put_raw_tracepoint(struct bpf_r
+@@ -1505,10 +1505,9 @@ void bpf_put_raw_tracepoint(struct bpf_r
static __always_inline
void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
{
diff --git a/patches/0011-printk_safe-remove-printk-safe-code.patch b/patches/0011-printk_safe-remove-printk-safe-code.patch
index 4b010241b34d..c4da53ba5e39 100644
--- a/patches/0011-printk_safe-remove-printk-safe-code.patch
+++ b/patches/0011-printk_safe-remove-printk-safe-code.patch
@@ -713,7 +713,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -9142,7 +9142,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9155,7 +9155,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -721,7 +721,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -9219,7 +9218,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -9232,7 +9231,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.array_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch b/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
index 111db14fa7d9..31b7ca793901 100644
--- a/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
+++ b/patches/dma-buf-Use-seqlock_t-instread-disabling-preemption.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_read_unlock();
goto retry;
}
-@@ -1197,12 +1197,12 @@ static int dma_buf_debug_show(struct seq
+@@ -1198,12 +1198,12 @@ static int dma_buf_debug_show(struct seq
robj = buf_obj->resv;
while (true) {
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 5f876bf78db0..a6a49d9f2e82 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
-@@ -1611,7 +1611,7 @@ struct nfs_unlinkdata {
+@@ -1613,7 +1613,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 4c1841b6475d..bbb08330835d 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt6
++-rt7
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index c5e8a1741f66..5ad4b6a1ed95 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -2902,9 +2902,9 @@ static void drain_local_pages_wq(struct
+@@ -2911,9 +2911,9 @@ static void drain_local_pages_wq(struct
* cpu which is allright but we also have to make sure to not move to
* a different one.
*/
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index 7a2f04e7b617..a61fa70bfac7 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1799,6 +1799,7 @@ choice
+@@ -1781,6 +1781,7 @@ choice
config SLAB
bool "SLAB"
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1819,6 +1820,7 @@ config SLUB
+@@ -1801,6 +1802,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 6aa7f0ee0a26..7eba95946fc4 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -5425,12 +5428,12 @@ static int mem_cgroup_move_account(struc
+@@ -5428,12 +5431,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -6494,10 +6497,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -6497,10 +6500,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6566,7 +6569,7 @@ static void uncharge_batch(const struct
+@@ -6569,7 +6572,7 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-@@ -6574,7 +6577,7 @@ static void uncharge_batch(const struct
+@@ -6577,7 +6580,7 @@ static void uncharge_batch(const struct
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -6735,11 +6738,11 @@ void mem_cgroup_migrate(struct page *old
+@@ -6738,11 +6741,11 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -6921,6 +6924,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6924,6 +6927,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -6966,10 +6970,14 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6969,10 +6973,14 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 26819c82dabd..3ddc717c3d7e 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __free_pages_core(struct page *page, unsigned int order)
-@@ -2812,13 +2815,13 @@ void drain_zone_pages(struct zone *zone,
+@@ -2821,13 +2824,13 @@ void drain_zone_pages(struct zone *zone,
int to_drain, batch;
LIST_HEAD(dst);
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (to_drain > 0)
free_pcppages_bulk(zone, &dst, false);
-@@ -2840,7 +2843,7 @@ static void drain_pages_zone(unsigned in
+@@ -2849,7 +2852,7 @@ static void drain_pages_zone(unsigned in
LIST_HEAD(dst);
int count;
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2848,7 +2851,7 @@ static void drain_pages_zone(unsigned in
+@@ -2857,7 +2860,7 @@ static void drain_pages_zone(unsigned in
if (count)
isolate_pcp_pages(count, pcp, &dst);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (count)
free_pcppages_bulk(zone, &dst, false);
-@@ -3098,9 +3101,9 @@ void free_unref_page(struct page *page)
+@@ -3107,9 +3110,9 @@ void free_unref_page(struct page *page)
if (!free_unref_page_prepare(page, pfn))
return;
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!list_empty(&dst))
free_pcppages_bulk(zone, &dst, false);
}
-@@ -3127,7 +3130,7 @@ void free_unref_page_list(struct list_he
+@@ -3136,7 +3139,7 @@ void free_unref_page_list(struct list_he
set_page_private(page, pfn);
}
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, next, list, lru) {
unsigned long pfn = page_private(page);
enum zone_type type;
-@@ -3142,12 +3145,12 @@ void free_unref_page_list(struct list_he
+@@ -3151,12 +3154,12 @@ void free_unref_page_list(struct list_he
* a large list of pages to free.
*/
if (++batch_count == SWAP_CLUSTER_MAX) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (i = 0; i < __MAX_NR_ZONES; ) {
struct page *page;
-@@ -3297,7 +3300,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3306,7 +3309,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, alloc_flags, pcp, list);
-@@ -3305,7 +3308,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -3314,7 +3317,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
zone_statistics(preferred_zone, zone);
}
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -3332,7 +3335,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3341,7 +3344,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -3352,7 +3355,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3361,7 +3364,7 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
out:
/* Separate test+clear to avoid unnecessary atomics */
-@@ -3365,7 +3368,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -3374,7 +3377,7 @@ struct page *rmqueue(struct zone *prefer
return page;
failed:
@@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -8713,7 +8716,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8722,7 +8725,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -8722,7 +8725,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -8731,7 +8734,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch b/patches/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
deleted file mode 100644
index de71a6386678..000000000000
--- a/patches/mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
+++ /dev/null
@@ -1,117 +0,0 @@
-From: "Luis Claudio R. Goncalves" <lclaudio@uudg.org>
-Date: Tue, 25 Jun 2019 11:28:04 -0300
-Subject: [PATCH] mm/zswap: Do not disable preemption in
- zswap_frontswap_store()
-
-Zswap causes "BUG: scheduling while atomic" by blocking on a rt_spin_lock() with
-preemption disabled. The preemption is disabled by get_cpu_var() in
-zswap_frontswap_store() to protect the access of the zswap_dstmem percpu variable.
-
-Use get_locked_var() to protect the percpu zswap_dstmem variable, making the
-code preemptive.
-
-As get_cpu_ptr() also disables preemption, replace it by this_cpu_ptr() and
-remove the counterpart put_cpu_ptr().
-
-Steps to Reproduce:
-
- 1. # grubby --args "zswap.enabled=1" --update-kernel DEFAULT
- 2. # reboot
- 3. Calculate the amount o memory to be used by the test:
- ---> grep MemAvailable /proc/meminfo
- ---> Add 25% ~ 50% to that value
- 4. # stress --vm 1 --vm-bytes ${MemAvailable+25%} --timeout 240s
-
-Usually, in less than 5 minutes the backtrace listed below appears, followed
-by a kernel panic:
-
-| BUG: scheduling while atomic: kswapd1/181/0x00000002
-|
-| Preemption disabled at:
-| [<ffffffff8b2a6cda>] zswap_frontswap_store+0x21a/0x6e1
-|
-| Kernel panic - not syncing: scheduling while atomic
-| CPU: 14 PID: 181 Comm: kswapd1 Kdump: loaded Not tainted 5.0.14-rt9 #1
-| Hardware name: AMD Pence/Pence, BIOS WPN2321X_Weekly_12_03_21 03/19/2012
-| Call Trace:
-| panic+0x106/0x2a7
-| __schedule_bug.cold+0x3f/0x51
-| __schedule+0x5cb/0x6f0
-| schedule+0x43/0xd0
-| rt_spin_lock_slowlock_locked+0x114/0x2b0
-| rt_spin_lock_slowlock+0x51/0x80
-| zbud_alloc+0x1da/0x2d0
-| zswap_frontswap_store+0x31a/0x6e1
-| __frontswap_store+0xab/0x130
-| swap_writepage+0x39/0x70
-| pageout.isra.0+0xe3/0x320
-| shrink_page_list+0xa8e/0xd10
-| shrink_inactive_list+0x251/0x840
-| shrink_node_memcg+0x213/0x770
-| shrink_node+0xd9/0x450
-| balance_pgdat+0x2d5/0x510
-| kswapd+0x218/0x470
-| kthread+0xfb/0x130
-| ret_from_fork+0x27/0x50
-
-Cc: stable-rt@vger.kernel.org
-Reported-by: Ping Fang <pifang@redhat.com>
-Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
-Reviewed-by: Daniel Bristot de Oliveira <bristot@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- mm/zswap.c | 12 +++++++-----
- 1 file changed, 7 insertions(+), 5 deletions(-)
-
---- a/mm/zswap.c
-+++ b/mm/zswap.c
-@@ -18,6 +18,7 @@
- #include <linux/highmem.h>
- #include <linux/slab.h>
- #include <linux/spinlock.h>
-+#include <linux/locallock.h>
- #include <linux/types.h>
- #include <linux/atomic.h>
- #include <linux/frontswap.h>
-@@ -995,6 +996,8 @@ static void zswap_fill_page(void *ptr, u
- memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
- }
-
-+/* protect zswap_dstmem from concurrency */
-+static DEFINE_LOCAL_IRQ_LOCK(zswap_dstmem_lock);
- /*********************************
- * frontswap hooks
- **********************************/
-@@ -1074,12 +1077,11 @@ static int zswap_frontswap_store(unsigne
- }
-
- /* compress */
-- dst = get_cpu_var(zswap_dstmem);
-- tfm = *get_cpu_ptr(entry->pool->tfm);
-+ dst = get_locked_var(zswap_dstmem_lock, zswap_dstmem);
-+ tfm = *this_cpu_ptr(entry->pool->tfm);
- src = kmap_atomic(page);
- ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
- kunmap_atomic(src);
-- put_cpu_ptr(entry->pool->tfm);
- if (ret) {
- ret = -EINVAL;
- goto put_dstmem;
-@@ -1103,7 +1105,7 @@ static int zswap_frontswap_store(unsigne
- memcpy(buf, &zhdr, hlen);
- memcpy(buf + hlen, dst, dlen);
- zpool_unmap_handle(entry->pool->zpool, handle);
-- put_cpu_var(zswap_dstmem);
-+ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
-
- /* populate entry */
- entry->offset = offset;
-@@ -1131,7 +1133,7 @@ static int zswap_frontswap_store(unsigne
- return 0;
-
- put_dstmem:
-- put_cpu_var(zswap_dstmem);
-+ put_locked_var(zswap_dstmem_lock, zswap_dstmem);
- zswap_pool_put(entry->pool);
- freepage:
- zswap_entry_cache_free(entry);
diff --git a/patches/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch b/patches/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
new file mode 100644
index 000000000000..7274ad911b3a
--- /dev/null
+++ b/patches/mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
@@ -0,0 +1,108 @@
+From: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com>
+Date: Tue, 25 Jun 2019 11:28:04 -0300
+Subject: [PATCH] mm/zswap: Use local lock to protect per-CPU data
+
+zwap uses per-CPU compression. The per-CPU data pointer is acquired with
+get_cpu_ptr() which implicitly disables preemption. It allocates
+memory inside the preempt disabled region which conflicts with the
+PREEMPT_RT semantics.
+
+Replace the implicit preemption control with an explicit local lock.
+This allows RT kernels to substitute it with a real per CPU lock, which
+serializes the access but keeps the code section preemptible. On non RT
+kernels this maps to preempt_disable() as before, i.e. no functional
+change.
+
+[bigeasy: Use local_lock(), additional hunks, patch description]
+
+Cc: Seth Jennings <sjenning@redhat.com>
+Cc: Dan Streetman <ddstreet@ieee.org>
+Cc: Vitaly Wool <vitaly.wool@konsulko.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mm@kvack.org
+Signed-off-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/zswap.c | 23 ++++++++++++++---------
+ 1 file changed, 14 insertions(+), 9 deletions(-)
+
+--- a/mm/zswap.c
++++ b/mm/zswap.c
+@@ -18,6 +18,7 @@
+ #include <linux/highmem.h>
+ #include <linux/slab.h>
+ #include <linux/spinlock.h>
++#include <linux/locallock.h>
+ #include <linux/types.h>
+ #include <linux/atomic.h>
+ #include <linux/frontswap.h>
+@@ -390,6 +391,8 @@ static struct zswap_entry *zswap_entry_f
+ * per-cpu code
+ **********************************/
+ static DEFINE_PER_CPU(u8 *, zswap_dstmem);
++/* Used for zswap_dstmem and tfm */
++static DEFINE_LOCAL_IRQ_LOCK(zswap_cpu_lock);
+
+ static int zswap_dstmem_prepare(unsigned int cpu)
+ {
+@@ -919,10 +922,11 @@ static int zswap_writeback_entry(struct
+ dlen = PAGE_SIZE;
+ src = (u8 *)zhdr + sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(zswap_cpu_lock);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ ret = crypto_comp_decompress(tfm, src, entry->length,
+ dst, &dlen);
+- put_cpu_ptr(entry->pool->tfm);
++ local_unlock(zswap_cpu_lock);
+ kunmap_atomic(dst);
+ BUG_ON(ret);
+ BUG_ON(dlen != PAGE_SIZE);
+@@ -1074,12 +1078,12 @@ static int zswap_frontswap_store(unsigne
+ }
+
+ /* compress */
+- dst = get_cpu_var(zswap_dstmem);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(zswap_cpu_lock);
++ dst = *this_cpu_ptr(&zswap_dstmem);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ src = kmap_atomic(page);
+ ret = crypto_comp_compress(tfm, src, PAGE_SIZE, dst, &dlen);
+ kunmap_atomic(src);
+- put_cpu_ptr(entry->pool->tfm);
+ if (ret) {
+ ret = -EINVAL;
+ goto put_dstmem;
+@@ -1103,7 +1107,7 @@ static int zswap_frontswap_store(unsigne
+ memcpy(buf, &zhdr, hlen);
+ memcpy(buf + hlen, dst, dlen);
+ zpool_unmap_handle(entry->pool->zpool, handle);
+- put_cpu_var(zswap_dstmem);
++ local_unlock(zswap_cpu_lock);
+
+ /* populate entry */
+ entry->offset = offset;
+@@ -1131,7 +1135,7 @@ static int zswap_frontswap_store(unsigne
+ return 0;
+
+ put_dstmem:
+- put_cpu_var(zswap_dstmem);
++ local_unlock(zswap_cpu_lock);
+ zswap_pool_put(entry->pool);
+ freepage:
+ zswap_entry_cache_free(entry);
+@@ -1176,9 +1180,10 @@ static int zswap_frontswap_load(unsigned
+ if (zpool_evictable(entry->pool->zpool))
+ src += sizeof(struct zswap_header);
+ dst = kmap_atomic(page);
+- tfm = *get_cpu_ptr(entry->pool->tfm);
++ local_lock(zswap_cpu_lock);
++ tfm = *this_cpu_ptr(entry->pool->tfm);
+ ret = crypto_comp_decompress(tfm, src, entry->length, dst, &dlen);
+- put_cpu_ptr(entry->pool->tfm);
++ local_unlock(zswap_cpu_lock);
+ kunmap_atomic(dst);
+ zpool_unmap_handle(entry->pool->zpool, entry->handle);
+ BUG_ON(ret);
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index 3e2a60f09d74..18b6530300ca 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -160,7 +160,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (qdisc->flags & TCQ_F_NOLOCK)
spin_unlock(&qdisc->seqlock);
}
-@@ -541,7 +556,7 @@ static inline spinlock_t *qdisc_root_sle
+@@ -542,7 +557,7 @@ static inline spinlock_t *qdisc_root_sle
return qdisc_lock(root);
}
diff --git a/patches/printk-Force-a-line-break-on-pr_cont-n.patch b/patches/printk-Force-a-line-break-on-pr_cont-n.patch
new file mode 100644
index 000000000000..2283a8e76911
--- /dev/null
+++ b/patches/printk-Force-a-line-break-on-pr_cont-n.patch
@@ -0,0 +1,33 @@
+From: =?UTF-8?q?=E6=B1=AA=E5=8B=8710269566?= <wang.yong12@zte.com.cn>
+Date: Thu, 21 May 2020 09:37:44 +0800
+Subject: [PATCH] printk: Force a line break on pr_cont("\n")
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Since the printk rework, pr_cont("\n") will not lead to a line break.
+A new line will only be created if
+- cpu != c->cpu_owner || !(flags & LOG_CONT)
+- c->len + len > sizeof(c->buf)
+
+Flush the buffer to enforce a new line on pr_cont().
+
+[bigeasy: reword commit message ]
+
+Signed-off-by: 汪勇10269566 <wang.yong12@zte.com.cn>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Acked-by: John Ogness <john.ogness@linutronix.de>
+---
+ kernel/printk/printk.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1871,6 +1871,7 @@ static void cont_add(int ctx, int cpu, u
+ // but later continuations can add a newline.
+ if (flags & LOG_NEWLINE) {
+ c->flags |= LOG_NEWLINE;
++ cont_flush(ctx);
+ }
+ }
+
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 3f9f8c9e3808..32c3fdc8d958 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <asm/mshyperv.h>
#include <linux/delay.h>
-@@ -1233,6 +1234,8 @@ static void vmbus_isr(void)
+@@ -1247,6 +1248,8 @@ static void vmbus_isr(void)
void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bool handled = false;
if (unlikely(page_addr == NULL))
-@@ -1277,7 +1280,7 @@ static void vmbus_isr(void)
+@@ -1291,7 +1294,7 @@ static void vmbus_isr(void)
tasklet_schedule(&hv_cpu->msg_dpc);
}
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index e8d7a2aecfe0..e85c89af99f5 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -907,6 +907,7 @@ config CFS_BANDWIDTH
+@@ -891,6 +891,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/series b/patches/series
index b78a5694f9b2..8ebfdfe74522 100644
--- a/patches/series
+++ b/patches/series
@@ -51,6 +51,7 @@ printk-kmsg_dump-remove-mutex-usage.patch
printk-devkmsg-read-Return-EPIPE-when-the-first-mess.patch
printk-handle-iterating-while-buffer-changing.patch
printk-hack-out-emergency-loglevel-usage.patch
+printk-Force-a-line-break-on-pr_cont-n.patch
serial-8250-only-atomic-lock-for-console.patch
serial-8250-fsl-ingenic-mtk-fix-atomic-console.patch
@@ -258,7 +259,7 @@ mm-Don-t-warn-about-atomic-memory-allocations-during.patch
mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
mm-memcontrol-do_not_disable_irq.patch
mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
-mm-zswap-Do-not-disable-preemption-in-zswap_frontswa.patch
+mm-zswap-Use-local-lock-to-protect-per-CPU-data.patch
# RADIX TREE
# Local lock use case ....
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 4d1a960d6edf..21e5c7eac378 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -10186,10 +10199,13 @@ static int dev_cpu_dead(unsigned int old
+@@ -10188,10 +10201,13 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -10502,8 +10518,9 @@ static int __init net_dev_init(void)
+@@ -10504,8 +10520,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 509e4aef5e00..473802ed4d66 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1887,7 +1887,7 @@ config SHUFFLE_PAGE_ALLOCATOR
+@@ -1869,7 +1869,7 @@ config SHUFFLE_PAGE_ALLOCATOR
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index e8483ccdcc0a..83fe32d12396 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -10168,6 +10174,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -10170,6 +10176,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 8a2eea952cc5..ffea54287267 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -7333,6 +7333,14 @@ int kvm_arch_init(void *opaque)
+@@ -7350,6 +7350,14 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-stackprot-no-random-on-rt.patch b/patches/x86-stackprot-no-random-on-rt.patch
index 32419846de56..f5f4521f92a1 100644
--- a/patches/x86-stackprot-no-random-on-rt.patch
+++ b/patches/x86-stackprot-no-random-on-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
-@@ -60,7 +60,7 @@
+@@ -65,7 +65,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
{
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
u64 tsc;
#ifdef CONFIG_X86_64
-@@ -71,8 +71,14 @@ static __always_inline void boot_init_st
+@@ -76,8 +76,14 @@ static __always_inline void boot_init_st
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.