summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-30 20:01:51 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-10-30 20:01:51 +0100
commit5b60e72400e039e1d6dd473222a2dd7c302c5c96 (patch)
treed4995cefbb1902545266c33651096fe0853bf610
parentb1e652e1e3b664c72e163c36d036821d6bfed746 (diff)
downloadlinux-rt-5b60e72400e039e1d6dd473222a2dd7c302c5c96.tar.gz
[ANNOUNCE] v5.10-rc1-rt2v5.10-rc1-rt2-patches
Dear RT folks! I'm pleased to announce the v5.10-rc1-rt2 patch set. Changes since v5.10-rc1-rt1: - Add Thomas Gleixner's "Preemptible variant of kmap_atomic & friends" series. - Apply patch by Paul E. McKenney which avoids a warning while a RCU stall is printed. - The last update of block-mq patches can trigger a warning if used by some drivers (USB storage for instance). Reported by Mike Galbraith. Known issues - It has been pointed out that due to changes to the printk code the internal buffer representation changed. This is only an issue if tools like `crash' are used to extract the printk buffer from a kernel memory image. The delta patch against v5.10-rc1-rt1 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/incr/patch-5.10-rc1-rt1-rt2.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.10-rc1-rt2 The RT patch against v5.10-rc1 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patch-5.10-rc1-rt2.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10-rc1-rt2.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch274
-rw-r--r--patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch25
-rw-r--r--patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch292
-rw-r--r--patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch283
-rw-r--r--patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch108
-rw-r--r--patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch206
-rw-r--r--patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch136
-rw-r--r--patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch157
-rw-r--r--patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch144
-rw-r--r--patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch119
-rw-r--r--patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch155
-rw-r--r--patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch189
-rw-r--r--patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch115
-rw-r--r--patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch147
-rw-r--r--patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch10
-rw-r--r--patches/0015-io-mapping-Cleanup-atomic-iomap.patch78
-rw-r--r--patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch12
-rw-r--r--patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch292
-rw-r--r--patches/0017-mm-highmem-Provide-kmap_local.patch177
-rw-r--r--patches/0018-io-mapping-Provide-iomap_local-variant.patch69
-rw-r--r--patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch20
-rw-r--r--patches/arm-enable-highmem-for-rt.patch160
-rw-r--r--patches/arm-highmem-flush-tlb-on-unmap.patch27
-rw-r--r--patches/block-mq-Disable-preemption-in-blk_mq_complete_reque.patch31
-rw-r--r--patches/block-mq-drop-preempt-disable.patch2
-rw-r--r--patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch114
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/mips-disable-highmem-on-rt.patch22
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch287
-rw-r--r--patches/oleg-signal-rt-fix.patch4
-rw-r--r--patches/power-disable-highmem-on-rt.patch22
-rw-r--r--patches/preempt-lazy-support.patch22
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch10
-rw-r--r--patches/rcu-Don-t-invoke-try_invoke_on_locked_down_task-with.patch97
-rw-r--r--patches/rt-local-irq-lock.patch42
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-mmdrop-delayed.patch4
-rw-r--r--patches/series38
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch4
-rw-r--r--patches/softirq--Add-RT-variant.patch2
-rw-r--r--patches/x86-Enable-RT-also-on-32bit.patch4
-rw-r--r--patches/x86-Enable-RT.patch2
-rw-r--r--patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch2
-rw-r--r--patches/x86-highmem-add-a-already-used-pte-check.patch22
-rw-r--r--patches/x86-preempt-lazy.patch4
46 files changed, 3317 insertions, 619 deletions
diff --git a/patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch b/patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch
new file mode 100644
index 000000000000..34317954e82a
--- /dev/null
+++ b/patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch
@@ -0,0 +1,274 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 22 Sep 2020 15:52:06 +0200
+Subject: [PATCH 01/18] sched: Make migrate_disable/enable() independent of RT
+
+Now that the scheduler can deal with migrate disable properly, there is no
+real compelling reason to make it only available for RT.
+
+There are quite some code pathes which needlessly disable preemption in
+order to prevent migration and some constructs like kmap_atomic() enforce
+it implicitly.
+
+Making it available independent of RT allows to provide a preemptible
+variant of kmap_atomic() and makes the code more consistent in general.
+
+FIXME: Rework the comment in preempt.h
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Juri Lelli <juri.lelli@redhat.com>
+Cc: Vincent Guittot <vincent.guittot@linaro.org>
+Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Ben Segall <bsegall@google.com>
+Cc: Mel Gorman <mgorman@suse.de>
+Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+---
+ include/linux/kernel.h | 21 ++++++++++++++-------
+ include/linux/preempt.h | 38 +++-----------------------------------
+ include/linux/sched.h | 2 +-
+ kernel/sched/core.c | 45 +++++++++++++++++++++++++++++++++++----------
+ kernel/sched/sched.h | 4 ++--
+ lib/smp_processor_id.c | 2 +-
+ 6 files changed, 56 insertions(+), 56 deletions(-)
+
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -204,6 +204,7 @@ extern int _cond_resched(void);
+ extern void ___might_sleep(const char *file, int line, int preempt_offset);
+ extern void __might_sleep(const char *file, int line, int preempt_offset);
+ extern void __cant_sleep(const char *file, int line, int preempt_offset);
++extern void __cant_migrate(const char *file, int line);
+
+ /**
+ * might_sleep - annotation for functions that can sleep
+@@ -227,6 +228,18 @@ extern void __cant_sleep(const char *fil
+ # define cant_sleep() \
+ do { __cant_sleep(__FILE__, __LINE__, 0); } while (0)
+ # define sched_annotate_sleep() (current->task_state_change = 0)
++
++/**
++ * cant_migrate - annotation for functions that cannot migrate
++ *
++ * Will print a stack trace if executed in code which is migratable
++ */
++# define cant_migrate() \
++ do { \
++ if (IS_ENABLED(CONFIG_SMP)) \
++ __cant_migrate(__FILE__, __LINE__); \
++ } while (0)
++
+ /**
+ * non_block_start - annotate the start of section where sleeping is prohibited
+ *
+@@ -251,6 +264,7 @@ extern void __cant_sleep(const char *fil
+ int preempt_offset) { }
+ # define might_sleep() do { might_resched(); } while (0)
+ # define cant_sleep() do { } while (0)
++# define cant_migrate() do { } while (0)
+ # define sched_annotate_sleep() do { } while (0)
+ # define non_block_start() do { } while (0)
+ # define non_block_end() do { } while (0)
+@@ -258,13 +272,6 @@ extern void __cant_sleep(const char *fil
+
+ #define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+
+-#ifndef CONFIG_PREEMPT_RT
+-# define cant_migrate() cant_sleep()
+-#else
+- /* Placeholder for now */
+-# define cant_migrate() do { } while (0)
+-#endif
+-
+ /**
+ * abs - return absolute value of an argument
+ * @x: the value. If it is unsigned type, it is converted to signed type first.
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -322,7 +322,7 @@ static inline void preempt_notifier_init
+
+ #endif
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++#ifdef CONFIG_SMP
+
+ /*
+ * Migrate-Disable and why it is undesired.
+@@ -382,43 +382,11 @@ static inline void preempt_notifier_init
+ extern void migrate_disable(void);
+ extern void migrate_enable(void);
+
+-#elif defined(CONFIG_PREEMPT_RT)
++#else
+
+ static inline void migrate_disable(void) { }
+ static inline void migrate_enable(void) { }
+
+-#else /* !CONFIG_PREEMPT_RT */
+-
+-/**
+- * migrate_disable - Prevent migration of the current task
+- *
+- * Maps to preempt_disable() which also disables preemption. Use
+- * migrate_disable() to annotate that the intent is to prevent migration,
+- * but not necessarily preemption.
+- *
+- * Can be invoked nested like preempt_disable() and needs the corresponding
+- * number of migrate_enable() invocations.
+- */
+-static __always_inline void migrate_disable(void)
+-{
+- preempt_disable();
+-}
+-
+-/**
+- * migrate_enable - Allow migration of the current task
+- *
+- * Counterpart to migrate_disable().
+- *
+- * As migrate_disable() can be invoked nested, only the outermost invocation
+- * reenables migration.
+- *
+- * Currently mapped to preempt_enable().
+- */
+-static __always_inline void migrate_enable(void)
+-{
+- preempt_enable();
+-}
+-
+-#endif /* CONFIG_SMP && CONFIG_PREEMPT_RT */
++#endif /* CONFIG_SMP */
+
+ #endif /* __LINUX_PREEMPT_H */
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -715,7 +715,7 @@ struct task_struct {
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
+ void *migration_pending;
+-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++#ifdef CONFIG_SMP
+ unsigned short migration_disabled;
+ #endif
+ unsigned short migration_flags;
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1696,8 +1696,6 @@ void check_preempt_curr(struct rq *rq, s
+
+ #ifdef CONFIG_SMP
+
+-#ifdef CONFIG_PREEMPT_RT
+-
+ static void
+ __do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask, u32 flags);
+
+@@ -1772,8 +1770,6 @@ static inline bool rq_has_pinned_tasks(s
+ return rq->nr_pinned;
+ }
+
+-#endif
+-
+ /*
+ * Per-CPU kthreads are allowed to run on !active && online CPUs, see
+ * __set_cpus_allowed_ptr() and select_fallback_rq().
+@@ -2841,7 +2837,7 @@ void sched_set_stop_task(int cpu, struct
+ }
+ }
+
+-#else
++#else /* CONFIG_SMP */
+
+ static inline int __set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask,
+@@ -2850,10 +2846,6 @@ static inline int __set_cpus_allowed_ptr
+ return set_cpus_allowed_ptr(p, new_mask);
+ }
+
+-#endif /* CONFIG_SMP */
+-
+-#if !defined(CONFIG_SMP) || !defined(CONFIG_PREEMPT_RT)
+-
+ static inline void migrate_disable_switch(struct rq *rq, struct task_struct *p) { }
+
+ static inline bool rq_has_pinned_tasks(struct rq *rq)
+@@ -2861,7 +2853,7 @@ static inline bool rq_has_pinned_tasks(s
+ return false;
+ }
+
+-#endif
++#endif /* !CONFIG_SMP */
+
+ static void
+ ttwu_stat(struct task_struct *p, int cpu, int wake_flags)
+@@ -7886,6 +7878,39 @@ void __cant_sleep(const char *file, int
+ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
+ }
+ EXPORT_SYMBOL_GPL(__cant_sleep);
++
++#ifdef CONFIG_SMP
++void __cant_migrate(const char *file, int line)
++{
++ static unsigned long prev_jiffy;
++
++ if (irqs_disabled())
++ return;
++
++ if (is_migration_disabled(current))
++ return;
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_COUNT))
++ return;
++
++ if (preempt_count() > 0)
++ return;
++
++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
++ return;
++ prev_jiffy = jiffies;
++
++ pr_err("BUG: assuming non migratable context at %s:%d\n", file, line);
++ pr_err("in_atomic(): %d, irqs_disabled(): %d, migration_disabled() %u pid: %d, name: %s\n",
++ in_atomic(), irqs_disabled(), is_migration_disabled(current),
++ current->pid, current->comm);
++
++ debug_show_held_locks(current);
++ dump_stack();
++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
++}
++EXPORT_SYMBOL_GPL(__cant_migrate);
++#endif
+ #endif
+
+ #ifdef CONFIG_MAGIC_SYSRQ
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1054,7 +1054,7 @@ struct rq {
+ struct cpuidle_state *idle_state;
+ #endif
+
+-#if defined(CONFIG_PREEMPT_RT) && defined(CONFIG_SMP)
++#ifdef CONFIG_SMP
+ unsigned int nr_pinned;
+ #endif
+ unsigned int push_busy;
+@@ -1090,7 +1090,7 @@ static inline int cpu_of(struct rq *rq)
+
+ static inline bool is_migration_disabled(struct task_struct *p)
+ {
+-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++#ifdef CONFIG_SMP
+ return p->migration_disabled;
+ #else
+ return false;
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -26,7 +26,7 @@ unsigned int check_preemption_disabled(c
+ if (current->nr_cpus_allowed == 1)
+ goto out;
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT)
++#ifdef CONFIG_SMP
+ if (current->migration_disabled)
+ goto out;
+ #endif
diff --git a/patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch b/patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
new file mode 100644
index 000000000000..2bdfa461d755
--- /dev/null
+++ b/patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
@@ -0,0 +1,25 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 20:38:45 +0200
+Subject: [PATCH 02/18] mm/highmem: Un-EXPORT __kmap_atomic_idx()
+
+Nothing in modules can use that.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mm@kvack.org
+---
+ mm/highmem.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -108,8 +108,6 @@ static inline wait_queue_head_t *get_pkm
+ atomic_long_t _totalhigh_pages __read_mostly;
+ EXPORT_SYMBOL(_totalhigh_pages);
+
+-EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
+-
+ unsigned int nr_free_highpages (void)
+ {
+ struct zone *zone;
diff --git a/patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch b/patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch
new file mode 100644
index 000000000000..fcbf4a88c524
--- /dev/null
+++ b/patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch
@@ -0,0 +1,292 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 09:30:47 +0200
+Subject: [PATCH 03/18] highmem: Provide generic variant of kmap_atomic*
+
+The kmap_atomic* interfaces in all architectures are pretty much the same
+except for post map operations (flush) and pre- and post unmap operations.
+
+Provide a generic variant for that.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: linux-mm@kvack.org
+---
+ include/linux/highmem.h | 79 ++++++++++++++++++++++++++------
+ mm/Kconfig | 3 +
+ mm/highmem.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 183 insertions(+), 17 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -31,9 +31,16 @@ static inline void invalidate_kernel_vma
+
+ #include <asm/kmap_types.h>
+
++/*
++ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
++ */
++#ifdef CONFIG_KMAP_LOCAL
++void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
++void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
++void kunmap_local_indexed(void *vaddr);
++#endif
++
+ #ifdef CONFIG_HIGHMEM
+-extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+-extern void kunmap_atomic_high(void *kvaddr);
+ #include <asm/highmem.h>
+
+ #ifndef ARCH_HAS_KMAP_FLUSH_TLB
+@@ -81,6 +88,11 @@ static inline void kunmap(struct page *p
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
++
++#ifndef CONFIG_KMAP_LOCAL
++void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
++void kunmap_atomic_high(void *kvaddr);
++
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+ preempt_disable();
+@@ -89,7 +101,38 @@ static inline void *kmap_atomic_prot(str
+ return page_address(page);
+ return kmap_atomic_high_prot(page, prot);
+ }
+-#define kmap_atomic(page) kmap_atomic_prot(page, kmap_prot)
++
++static inline void __kunmap_atomic(void *vaddr)
++{
++ kunmap_atomic_high(vaddr);
++}
++#else /* !CONFIG_KMAP_LOCAL */
++
++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++{
++ preempt_disable();
++ pagefault_disable();
++ return __kmap_local_page_prot(page, prot);
++}
++
++static inline void *kmap_atomic_pfn(unsigned long pfn)
++{
++ preempt_disable();
++ pagefault_disable();
++ return __kmap_local_pfn_prot(pfn, kmap_prot);
++}
++
++static inline void __kunmap_atomic(void *addr)
++{
++ kunmap_local_indexed(addr);
++}
++
++#endif /* CONFIG_KMAP_LOCAL */
++
++static inline void *kmap_atomic(struct page *page)
++{
++ return kmap_atomic_prot(page, kmap_prot);
++}
+
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
+@@ -157,21 +200,28 @@ static inline void *kmap_atomic(struct p
+ pagefault_disable();
+ return page_address(page);
+ }
+-#define kmap_atomic_prot(page, prot) kmap_atomic(page)
+
+-static inline void kunmap_atomic_high(void *addr)
++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++{
++ return kmap_atomic(page);
++}
++
++static inline void *kmap_atomic_pfn(unsigned long pfn)
++{
++ return kmap_atomic(pfn_to_page(pfn));
++}
++
++static inline void __kunmap_atomic(void *addr)
+ {
+ /*
+ * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+- * handles re-enabling faults + preemption
++ * handles re-enabling faults and preemption
+ */
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+ #endif
+ }
+
+-#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
+-
+ #define kmap_flush_unused() do {} while(0)
+
+ #endif /* CONFIG_HIGHMEM */
+@@ -213,15 +263,14 @@ static inline void kmap_atomic_idx_pop(v
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+ */
+-#define kunmap_atomic(addr) \
+-do { \
+- BUILD_BUG_ON(__same_type((addr), struct page *)); \
+- kunmap_atomic_high(addr); \
+- pagefault_enable(); \
+- preempt_enable(); \
++#define kunmap_atomic(__addr) \
++do { \
++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
++ __kunmap_atomic(__addr); \
++ pagefault_enable(); \
++ preempt_enable(); \
+ } while (0)
+
+-
+ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+ #ifndef clear_user_highpage
+ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+--- a/mm/Kconfig
++++ b/mm/Kconfig
+@@ -872,4 +872,7 @@ config ARCH_HAS_HUGEPD
+ config MAPPING_DIRTY_HELPERS
+ bool
+
++config KMAP_LOCAL
++ bool
++
+ endmenu
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -30,6 +30,7 @@
+ #include <linux/kgdb.h>
+ #include <asm/tlbflush.h>
+ #include <linux/vmalloc.h>
++#include <asm/fixmap.h>
+
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+@@ -365,9 +366,122 @@ void kunmap_high(struct page *page)
+ if (need_wakeup)
+ wake_up(pkmap_map_wait);
+ }
+-
+ EXPORT_SYMBOL(kunmap_high);
+-#endif /* CONFIG_HIGHMEM */
++#endif /* CONFIG_HIGHMEM */
++
++#ifdef CONFIG_KMAP_LOCAL
++#ifndef arch_kmap_local_post_map
++# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
++#endif
++
++#ifndef arch_kmap_local_pre_unmap
++# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
++#endif
++
++#ifndef arch_kmap_local_post_unmap
++# define arch_kmap_local_post_unmap(vaddr) do { } while (0)
++#endif
++
++#ifndef arch_kmap_local_map_idx
++#define arch_kmap_local_map_idx(type, pfn) kmap_local_calc_idx(type)
++#endif
++
++#ifndef arch_kmap_local_unmap_idx
++#define arch_kmap_local_unmap_idx(type, vaddr) kmap_local_calc_idx(type)
++#endif
++
++#ifndef arch_kmap_local_high_get
++static inline void *arch_kmap_local_high_get(struct page *page)
++{
++ return NULL;
++}
++#endif
++
++/* Unmap a local mapping which was obtained by kmap_high_get() */
++static inline void kmap_high_unmap_local(unsigned long vaddr)
++{
++#ifdef ARCH_NEEDS_KMAP_HIGH_GET
++ if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP))
++ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
++#endif
++}
++
++static inline int kmap_local_calc_idx(int type)
++{
++ return type + KM_TYPE_NR * smp_processor_id();
++}
++
++static pte_t *__kmap_pte;
++
++static pte_t *kmap_get_pte(void)
++{
++ if (!__kmap_pte)
++ __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
++ return __kmap_pte;
++}
++
++void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
++{
++ pte_t pteval, *kmap_pte = kmap_get_pte();
++ unsigned long vaddr;
++ int idx;
++
++ preempt_disable();
++ idx = arch_kmap_local_map_idx(kmap_atomic_idx_push(), pfn);
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ BUG_ON(!pte_none(*(kmap_pte - idx)));
++ pteval = pfn_pte(pfn, prot);
++ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
++ arch_kmap_local_post_map(vaddr, pteval);
++ preempt_enable();
++
++ return (void *)vaddr;
++}
++EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
++
++void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ void *kmap;
++
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ /* Try kmap_high_get() if architecture has it enabled */
++ kmap = arch_kmap_local_high_get(page);
++ if (kmap)
++ return kmap;
++
++ return __kmap_local_pfn_prot(page_to_pfn(page), prot);
++}
++EXPORT_SYMBOL(__kmap_local_page_prot);
++
++void kunmap_local_indexed(void *vaddr)
++{
++ unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
++ pte_t *kmap_pte = kmap_get_pte();
++ int idx;
++
++ if (addr < __fix_to_virt(FIX_KMAP_END) ||
++ addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
++ WARN_ON_ONCE(addr < PAGE_OFFSET);
++
++ /* Handle mappings which were obtained by kmap_high_get() */
++ kmap_high_unmap_local(addr);
++ return;
++ }
++
++ preempt_disable();
++ idx = arch_kmap_local_unmap_idx(kmap_atomic_idx(), addr);
++ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
++
++ arch_kmap_local_pre_unmap(addr);
++ pte_clear(&init_mm, addr, kmap_pte - idx);
++ arch_kmap_local_post_unmap(addr);
++ kmap_atomic_idx_pop();
++ preempt_enable();
++}
++EXPORT_SYMBOL(kunmap_local_indexed);
++#endif
+
+ #if defined(HASHED_PAGE_VIRTUAL)
+
diff --git a/patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
new file mode 100644
index 000000000000..2bc27f02ac5e
--- /dev/null
+++ b/patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
@@ -0,0 +1,283 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 10:42:27 +0200
+Subject: [PATCH 04/18] x86/mm/highmem: Use generic kmap atomic implementation
+
+Convert X86 to the generic kmap atomic implementation and make the
+iomap_atomic() naming convention consistent while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: x86@kernel.org
+---
+ arch/x86/Kconfig | 3 +-
+ arch/x86/include/asm/fixmap.h | 1
+ arch/x86/include/asm/highmem.h | 12 ++++++--
+ arch/x86/include/asm/iomap.h | 18 ++++++------
+ arch/x86/mm/highmem_32.c | 59 -----------------------------------------
+ arch/x86/mm/init_32.c | 15 ----------
+ arch/x86/mm/iomap_32.c | 59 +++--------------------------------------
+ include/linux/io-mapping.h | 2 -
+ 8 files changed, 27 insertions(+), 142 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -14,10 +14,11 @@ config X86_32
+ select ARCH_WANT_IPC_PARSE_VERSION
+ select CLKSRC_I8253
+ select CLONE_BACKWARDS
++ select GENERIC_VDSO_32
+ select HAVE_DEBUG_STACKOVERFLOW
++ select KMAP_LOCAL
+ select MODULES_USE_ELF_REL
+ select OLD_SIGACTION
+- select GENERIC_VDSO_32
+
+ config X86_64
+ def_bool y
+--- a/arch/x86/include/asm/fixmap.h
++++ b/arch/x86/include/asm/fixmap.h
+@@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned
+
+ extern int fixmaps_set;
+
+-extern pte_t *kmap_pte;
+ extern pte_t *pkmap_page_table;
+
+ void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
+--- a/arch/x86/include/asm/highmem.h
++++ b/arch/x86/include/asm/highmem.h
+@@ -58,11 +58,17 @@ extern unsigned long highstart_pfn, high
+ #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+ #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+
+-void *kmap_atomic_pfn(unsigned long pfn);
+-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
+-
+ #define flush_cache_kmaps() do { } while (0)
+
++#define arch_kmap_local_post_map(vaddr, pteval) \
++ arch_flush_lazy_mmu_mode()
++
++#define arch_kmap_local_post_unmap(vaddr) \
++ do { \
++ flush_tlb_one_kernel((vaddr)); \
++ arch_flush_lazy_mmu_mode(); \
++ } while (0)
++
+ extern void add_highpages_with_active_regions(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+
+--- a/arch/x86/include/asm/iomap.h
++++ b/arch/x86/include/asm/iomap.h
+@@ -9,19 +9,21 @@
+ #include <linux/fs.h>
+ #include <linux/mm.h>
+ #include <linux/uaccess.h>
++#include <linux/highmem.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+
+-void __iomem *
+-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
++void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+-void
+-iounmap_atomic(void __iomem *kvaddr);
++static inline void iounmap_atomic(void __iomem *vaddr)
++{
++ kunmap_local_indexed((void __force *)vaddr);
++ pagefault_enable();
++ preempt_enable();
++}
+
+-int
+-iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
++int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
+
+-void
+-iomap_free(resource_size_t base, unsigned long size);
++void iomap_free(resource_size_t base, unsigned long size);
+
+ #endif /* _ASM_X86_IOMAP_H */
+--- a/arch/x86/mm/highmem_32.c
++++ b/arch/x86/mm/highmem_32.c
+@@ -4,65 +4,6 @@
+ #include <linux/swap.h> /* for totalram_pages */
+ #include <linux/memblock.h>
+
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
+- arch_flush_lazy_mmu_mode();
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-/*
+- * This is the same as kmap_atomic() but can map memory that doesn't
+- * have a struct page associated with it.
+- */
+-void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- return kmap_atomic_prot_pfn(pfn, kmap_prot);
+-}
+-EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+-
+- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+- int idx, type;
+-
+- type = kmap_atomic_idx();
+- idx = type + KM_TYPE_NR * smp_processor_id();
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-#endif
+- /*
+- * Force other mappings to Oops if they'll try to access this
+- * pte without first remap it. Keeping stale mappings around
+- * is a bad idea also, in case the page changes cacheability
+- * attributes or becomes a protected page in a hypervisor.
+- */
+- kpte_clear_flush(kmap_pte-idx, vaddr);
+- kmap_atomic_idx_pop();
+- arch_flush_lazy_mmu_mode();
+- }
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- else {
+- BUG_ON(vaddr < PAGE_OFFSET);
+- BUG_ON(vaddr >= (unsigned long)high_memory);
+- }
+-#endif
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+-
+ void __init set_highmem_pages_init(void)
+ {
+ struct zone *zone;
+--- a/arch/x86/mm/init_32.c
++++ b/arch/x86/mm/init_32.c
+@@ -394,19 +394,6 @@ kernel_physical_mapping_init(unsigned lo
+ return last_map_addr;
+ }
+
+-pte_t *kmap_pte;
+-
+-static void __init kmap_init(void)
+-{
+- unsigned long kmap_vstart;
+-
+- /*
+- * Cache the first kmap pte:
+- */
+- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+- kmap_pte = virt_to_kpte(kmap_vstart);
+-}
+-
+ #ifdef CONFIG_HIGHMEM
+ static void __init permanent_kmaps_init(pgd_t *pgd_base)
+ {
+@@ -712,8 +699,6 @@ void __init paging_init(void)
+
+ __flush_tlb_all();
+
+- kmap_init();
+-
+ /*
+ * NOTE: at this point the bootmem allocator is fully available.
+ */
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -44,28 +44,7 @@ void iomap_free(resource_size_t base, un
+ }
+ EXPORT_SYMBOL_GPL(iomap_free);
+
+-void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- preempt_disable();
+- pagefault_disable();
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR * smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+- arch_flush_lazy_mmu_mode();
+-
+- return (void *)vaddr;
+-}
+-
+-/*
+- * Map 'pfn' using protections 'prot'
+- */
+-void __iomem *
+-iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
++void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
+ {
+ /*
+ * For non-PAT systems, translate non-WB request to UC- just in
+@@ -81,36 +60,8 @@ iomap_atomic_prot_pfn(unsigned long pfn,
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(prot) &= __default_kernel_pte_mask;
+
+- return (void __force __iomem *) kmap_atomic_prot_pfn(pfn, prot);
+-}
+-EXPORT_SYMBOL_GPL(iomap_atomic_prot_pfn);
+-
+-void
+-iounmap_atomic(void __iomem *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+-
+- if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
+- vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
+- int idx, type;
+-
+- type = kmap_atomic_idx();
+- idx = type + KM_TYPE_NR * smp_processor_id();
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-#endif
+- /*
+- * Force other mappings to Oops if they'll try to access this
+- * pte without first remap it. Keeping stale mappings around
+- * is a bad idea also, in case the page changes cacheability
+- * attributes or becomes a protected page in a hypervisor.
+- */
+- kpte_clear_flush(kmap_pte-idx, vaddr);
+- kmap_atomic_idx_pop();
+- }
+-
+- pagefault_enable();
+- preempt_enable();
++ preempt_disable();
++ pagefault_disable();
++ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
+ }
+-EXPORT_SYMBOL_GPL(iounmap_atomic);
++EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -69,7 +69,7 @@ io_mapping_map_atomic_wc(struct io_mappi
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+- return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
++ return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+ }
+
+ static inline void
diff --git a/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
new file mode 100644
index 000000000000..61218ed26449
--- /dev/null
+++ b/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
@@ -0,0 +1,108 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 11:14:55 +0200
+Subject: [PATCH 05/18] arc/mm/highmem: Use generic kmap atomic implementation
+
+Adopt the map ordering to match the other architectures and the generic
+code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: linux-snps-arc@lists.infradead.org
+---
+ arch/arc/Kconfig | 1
+ arch/arc/include/asm/highmem.h | 8 ++++++-
+ arch/arc/mm/highmem.c | 44 -----------------------------------------
+ 3 files changed, 9 insertions(+), 44 deletions(-)
+
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -507,6 +507,7 @@ config LINUX_RAM_BASE
+ config HIGHMEM
+ bool "High Memory Support"
+ select ARCH_DISCONTIGMEM_ENABLE
++ select KMAP_LOCAL
+ help
+ With ARC 2G:2G address split, only upper 2G is directly addressable by
+ kernel. Enable this to potentially allow access to rest of 2G and PAE
+--- a/arch/arc/include/asm/highmem.h
++++ b/arch/arc/include/asm/highmem.h
+@@ -15,7 +15,10 @@
+ #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+ #define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
+ #define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+-#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
++
++#define FIX_KMAP_BEGIN (0)
++#define FIX_KMAP_END ((FIXMAP_SIZE >> PAGE_SHIFT) - 1)
++#define FIXADDR_TOP (FIXMAP_BASE + FIXMAP_SIZE - PAGE_SIZE)
+
+ /* start after fixmap area */
+ #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+@@ -29,6 +32,9 @@
+
+ extern void kmap_init(void);
+
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
++
+ static inline void flush_cache_kmaps(void)
+ {
+ flush_cache_all();
+--- a/arch/arc/mm/highmem.c
++++ b/arch/arc/mm/highmem.c
+@@ -47,48 +47,6 @@
+ */
+
+ extern pte_t * pkmap_page_table;
+-static pte_t * fixmap_page_table;
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- int idx, cpu_idx;
+- unsigned long vaddr;
+-
+- cpu_idx = kmap_atomic_idx_push();
+- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+- vaddr = FIXMAP_ADDR(idx);
+-
+- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+- mk_pte(page, prot));
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kv)
+-{
+- unsigned long kvaddr = (unsigned long)kv;
+-
+- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+-
+- /*
+- * Because preemption is disabled, this vaddr can be associated
+- * with the current allocated index.
+- * But in case of multiple live kmap_atomic(), it still relies on
+- * callers to unmap in right order.
+- */
+- int cpu_idx = kmap_atomic_idx();
+- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+-
+- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+-
+- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+-
+- kmap_atomic_idx_pop();
+- }
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+
+ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+ {
+@@ -113,5 +71,5 @@ void __init kmap_init(void)
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+
+ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
++ alloc_kmap_pgtable(FIXMAP_BASE);
+ }
diff --git a/patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..776381fcb798
--- /dev/null
+++ b/patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,206 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 12:05:18 +0200
+Subject: [PATCH 06/18] ARM: highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: linux-arm-kernel@lists.infradead.org
+---
+ arch/arm/Kconfig | 1
+ arch/arm/include/asm/highmem.h | 31 +++++++---
+ arch/arm/mm/Makefile | 1
+ arch/arm/mm/highmem.c | 121 -----------------------------------------
+ 4 files changed, 23 insertions(+), 131 deletions(-)
+ delete mode 100644 arch/arm/mm/highmem.c
+
+--- a/arch/arm/Kconfig
++++ b/arch/arm/Kconfig
+@@ -1498,6 +1498,7 @@ config HAVE_ARCH_PFN_VALID
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU
++ select KMAP_LOCAL
+ help
+ The address space of ARM processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+--- a/arch/arm/include/asm/highmem.h
++++ b/arch/arm/include/asm/highmem.h
+@@ -46,19 +46,32 @@ extern pte_t *pkmap_page_table;
+
+ #ifdef ARCH_NEEDS_KMAP_HIGH_GET
+ extern void *kmap_high_get(struct page *page);
+-#else
++
++static inline void *arch_kmap_local_high_get(struct page *page)
++{
++ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !cache_is_vivt())
++ return NULL;
++ return kmap_high_get(page);
++}
++#define arch_kmap_local_high_get arch_kmap_local_high_get
++
++#else /* ARCH_NEEDS_KMAP_HIGH_GET */
+ static inline void *kmap_high_get(struct page *page)
+ {
+ return NULL;
+ }
+-#endif
++#endif /* !ARCH_NEEDS_KMAP_HIGH_GET */
+
+-/*
+- * The following functions are already defined by <linux/highmem.h>
+- * when CONFIG_HIGHMEM is not set.
+- */
+-#ifdef CONFIG_HIGHMEM
+-extern void *kmap_atomic_pfn(unsigned long pfn);
+-#endif
++#define arch_kmap_local_post_map(vaddr, pteval) \
++ local_flush_tlb_kernel_page(vaddr)
++
++#define arch_kmap_local_pre_unmap(vaddr) \
++do { \
++ if (cache_is_vivt()) \
++ __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); \
++} while (0)
++
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_kernel_page(vaddr)
+
+ #endif
+--- a/arch/arm/mm/Makefile
++++ b/arch/arm/mm/Makefile
+@@ -19,7 +19,6 @@ obj-$(CONFIG_MODULES) += proc-syms.o
+ obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
+
+ obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+-obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_ARM_PV_FIXUP) += pv-fixup-asm.o
+
+--- a/arch/arm/mm/highmem.c
++++ /dev/null
+@@ -1,121 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0-only
+-/*
+- * arch/arm/mm/highmem.c -- ARM highmem support
+- *
+- * Author: Nicolas Pitre
+- * Created: september 8, 2008
+- * Copyright: Marvell Semiconductors Inc.
+- */
+-
+-#include <linux/module.h>
+-#include <linux/highmem.h>
+-#include <linux/interrupt.h>
+-#include <asm/fixmap.h>
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-#include "mm.h"
+-
+-static inline void set_fixmap_pte(int idx, pte_t pte)
+-{
+- unsigned long vaddr = __fix_to_virt(idx);
+- pte_t *ptep = virt_to_kpte(vaddr);
+-
+- set_pte_ext(ptep, pte, 0);
+- local_flush_tlb_kernel_page(vaddr);
+-}
+-
+-static inline pte_t get_fixmap_pte(unsigned long vaddr)
+-{
+- pte_t *ptep = virt_to_kpte(vaddr);
+-
+- return *ptep;
+-}
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned int idx;
+- unsigned long vaddr;
+- void *kmap;
+- int type;
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- /*
+- * There is no cache coherency issue when non VIVT, so force the
+- * dedicated kmap usage for better debugging purposes in that case.
+- */
+- if (!cache_is_vivt())
+- kmap = NULL;
+- else
+-#endif
+- kmap = kmap_high_get(page);
+- if (kmap)
+- return kmap;
+-
+- type = kmap_atomic_idx_push();
+-
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+- vaddr = __fix_to_virt(idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- /*
+- * With debugging enabled, kunmap_atomic forces that entry to 0.
+- * Make sure it was indeed properly unmapped.
+- */
+- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+-#endif
+- /*
+- * When debugging is off, kunmap_atomic leaves the previous mapping
+- * in place, so the contained TLB flush ensures the TLB is updated
+- * with the new mapping.
+- */
+- set_fixmap_pte(idx, mk_pte(page, prot));
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int idx, type;
+-
+- if (kvaddr >= (void *)FIXADDR_START) {
+- type = kmap_atomic_idx();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+-
+- if (cache_is_vivt())
+- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(vaddr != __fix_to_virt(idx));
+- set_fixmap_pte(idx, __pte(0));
+-#else
+- (void) idx; /* to kill a warning */
+-#endif
+- kmap_atomic_idx_pop();
+- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
+- /* this address was obtained through kmap_high_get() */
+- kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
+- }
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+-
+-void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- unsigned long vaddr;
+- int idx, type;
+- struct page *page = pfn_to_page(pfn);
+-
+- preempt_disable();
+- pagefault_disable();
+- if (!PageHighMem(page))
+- return page_address(page);
+-
+- type = kmap_atomic_idx_push();
+- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
+- vaddr = __fix_to_virt(idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
+-#endif
+- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
+-
+- return (void *)vaddr;
+-}
diff --git a/patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..3482405a77df
--- /dev/null
+++ b/patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,136 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 13:01:33 +0200
+Subject: [PATCH 07/18] csky/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Guo Ren <guoren@kernel.org>
+Cc: linux-csky@vger.kernel.org
+---
+ arch/csky/Kconfig | 1
+ arch/csky/include/asm/highmem.h | 4 +-
+ arch/csky/mm/highmem.c | 75 ----------------------------------------
+ 3 files changed, 5 insertions(+), 75 deletions(-)
+
+--- a/arch/csky/Kconfig
++++ b/arch/csky/Kconfig
+@@ -286,6 +286,7 @@ config NR_CPUS
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on !CPU_CK610
++ select KMAP_LOCAL
+ default y
+
+ config FORCE_MAX_ZONEORDER
+--- a/arch/csky/include/asm/highmem.h
++++ b/arch/csky/include/asm/highmem.h
+@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
+
+ #define ARCH_HAS_KMAP_FLUSH_TLB
+ extern void kmap_flush_tlb(unsigned long addr);
+-extern void *kmap_atomic_pfn(unsigned long pfn);
+
+ #define flush_cache_kmaps() do {} while (0)
+
++#define arch_kmap_local_post_map(vaddr, pteval) kmap_flush_tlb(vaddr)
++#define arch_kmap_local_post_unmap(vaddr) kmap_flush_tlb(vaddr)
++
+ extern void kmap_init(void);
+
+ #endif /* __KERNEL__ */
+--- a/arch/csky/mm/highmem.c
++++ b/arch/csky/mm/highmem.c
+@@ -9,8 +9,6 @@
+ #include <asm/tlbflush.h>
+ #include <asm/cacheflush.h>
+
+-static pte_t *kmap_pte;
+-
+ unsigned long highstart_pfn, highend_pfn;
+
+ void kmap_flush_tlb(unsigned long addr)
+@@ -19,67 +17,7 @@ void kmap_flush_tlb(unsigned long addr)
+ }
+ EXPORT_SYMBOL(kmap_flush_tlb);
+
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte - idx)));
+-#endif
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
+- flush_tlb_one((unsigned long)vaddr);
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int idx;
+-
+- if (vaddr < FIXADDR_START)
+- return;
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- idx = KM_TYPE_NR*smp_processor_id() + kmap_atomic_idx();
+-
+- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-
+- pte_clear(&init_mm, vaddr, kmap_pte - idx);
+- flush_tlb_one(vaddr);
+-#else
+- (void) idx; /* to kill a warning */
+-#endif
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+-
+-/*
+- * This is the same as kmap_atomic() but can map memory that doesn't
+- * have a struct page associated with it.
+- */
+-void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- pagefault_disable();
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+- flush_tlb_one(vaddr);
+-
+- return (void *) vaddr;
+-}
+-
+-static void __init kmap_pages_init(void)
++void __init kmap_init(void)
+ {
+ unsigned long vaddr;
+ pgd_t *pgd;
+@@ -96,14 +34,3 @@ static void __init kmap_pages_init(void)
+ pte = pte_offset_kernel(pmd, vaddr);
+ pkmap_page_table = pte;
+ }
+-
+-void __init kmap_init(void)
+-{
+- unsigned long vaddr;
+-
+- kmap_pages_init();
+-
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
+-
+- kmap_pte = pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+-}
diff --git a/patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..dd4c0486ae6b
--- /dev/null
+++ b/patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,157 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 13:06:09 +0200
+Subject: [PATCH 08/18] microblaze/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Michal Simek <monstr@monstr.eu>
+---
+ arch/microblaze/Kconfig | 1
+ arch/microblaze/include/asm/highmem.h | 6 ++
+ arch/microblaze/mm/Makefile | 1
+ arch/microblaze/mm/highmem.c | 78 ----------------------------------
+ arch/microblaze/mm/init.c | 6 --
+ 5 files changed, 6 insertions(+), 86 deletions(-)
+ delete mode 100644 arch/microblaze/mm/highmem.c
+
+--- a/arch/microblaze/Kconfig
++++ b/arch/microblaze/Kconfig
+@@ -155,6 +155,7 @@ config XILINX_UNCACHED_SHADOW
+ config HIGHMEM
+ bool "High memory support"
+ depends on MMU
++ select KMAP_LOCAL
+ help
+ The address space of Microblaze processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+--- a/arch/microblaze/include/asm/highmem.h
++++ b/arch/microblaze/include/asm/highmem.h
+@@ -25,7 +25,6 @@
+ #include <linux/uaccess.h>
+ #include <asm/fixmap.h>
+
+-extern pte_t *kmap_pte;
+ extern pte_t *pkmap_page_table;
+
+ /*
+@@ -52,6 +51,11 @@ extern pte_t *pkmap_page_table;
+
+ #define flush_cache_kmaps() { flush_icache(); flush_dcache(); }
+
++#define arch_kmap_local_post_map(vaddr, pteval) \
++ local_flush_tlb_page(NULL, vaddr);
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_page(NULL, vaddr);
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_HIGHMEM_H */
+--- a/arch/microblaze/mm/Makefile
++++ b/arch/microblaze/mm/Makefile
+@@ -6,4 +6,3 @@
+ obj-y := consistent.o init.o
+
+ obj-$(CONFIG_MMU) += pgtable.o mmu_context.o fault.o
+-obj-$(CONFIG_HIGHMEM) += highmem.o
+--- a/arch/microblaze/mm/highmem.c
++++ /dev/null
+@@ -1,78 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * highmem.c: virtual kernel memory mappings for high memory
+- *
+- * PowerPC version, stolen from the i386 version.
+- *
+- * Used in CONFIG_HIGHMEM systems for memory pages which
+- * are not addressable by direct kernel virtual addresses.
+- *
+- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+- * Gerhard.Wichert@pdb.siemens.de
+- *
+- *
+- * Redesigned the x86 32-bit VM architecture to deal with
+- * up to 16 Terrabyte physical memory. With current x86 CPUs
+- * we now support up to 64 Gigabytes physical RAM.
+- *
+- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+- *
+- * Reworked for PowerPC by various contributors. Moved from
+- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+- */
+-
+-#include <linux/export.h>
+-#include <linux/highmem.h>
+-
+-/*
+- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+- * gives a more generic (and caching) interface. But kmap_atomic can
+- * be used in IRQ contexts, so in some (very limited) cases we need
+- * it.
+- */
+-#include <asm/tlbflush.h>
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+-
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
+-#endif
+- set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
+- local_flush_tlb_page(NULL, vaddr);
+-
+- return (void *) vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int type;
+- unsigned int idx;
+-
+- if (vaddr < __fix_to_virt(FIX_KMAP_END))
+- return;
+-
+- type = kmap_atomic_idx();
+-
+- idx = type + KM_TYPE_NR * smp_processor_id();
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-#endif
+- /*
+- * force other mappings to Oops if they'll try to access
+- * this pte without first remap it
+- */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+- local_flush_tlb_page(NULL, vaddr);
+-
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+--- a/arch/microblaze/mm/init.c
++++ b/arch/microblaze/mm/init.c
+@@ -49,17 +49,11 @@ unsigned long lowmem_size;
+ EXPORT_SYMBOL(min_low_pfn);
+ EXPORT_SYMBOL(max_low_pfn);
+
+-#ifdef CONFIG_HIGHMEM
+-pte_t *kmap_pte;
+-EXPORT_SYMBOL(kmap_pte);
+-
+ static void __init highmem_init(void)
+ {
+ pr_debug("%x\n", (u32)PKMAP_BASE);
+ map_page(PKMAP_BASE, 0, 0); /* XXX gross */
+ pkmap_page_table = virt_to_kpte(PKMAP_BASE);
+-
+- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
+ }
+
+ static void highmem_setup(void)
diff --git a/patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..696751587c72
--- /dev/null
+++ b/patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,144 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 13:30:40 +0200
+Subject: [PATCH 09/18] mips/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: linux-mips@vger.kernel.org
+---
+ arch/mips/Kconfig | 1
+ arch/mips/include/asm/highmem.h | 4 +-
+ arch/mips/mm/highmem.c | 77 ----------------------------------------
+ arch/mips/mm/init.c | 3 -
+ 4 files changed, 3 insertions(+), 82 deletions(-)
+
+--- a/arch/mips/Kconfig
++++ b/arch/mips/Kconfig
+@@ -2719,6 +2719,7 @@ config WAR_MIPS34K_MISSED_ITLB
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
++ select KMAP_LOCAL
+
+ config CPU_SUPPORTS_HIGHMEM
+ bool
+--- a/arch/mips/include/asm/highmem.h
++++ b/arch/mips/include/asm/highmem.h
+@@ -48,11 +48,11 @@ extern pte_t *pkmap_page_table;
+
+ #define ARCH_HAS_KMAP_FLUSH_TLB
+ extern void kmap_flush_tlb(unsigned long addr);
+-extern void *kmap_atomic_pfn(unsigned long pfn);
+
+ #define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
+
+-extern void kmap_init(void);
++#define arch_kmap_local_post_map(vaddr, pteval) local_flush_tlb_one(vaddr)
++#define arch_kmap_local_post_unmap(vaddr) local_flush_tlb_one(vaddr)
+
+ #endif /* __KERNEL__ */
+
+--- a/arch/mips/mm/highmem.c
++++ b/arch/mips/mm/highmem.c
+@@ -8,8 +8,6 @@
+ #include <asm/fixmap.h>
+ #include <asm/tlbflush.h>
+
+-static pte_t *kmap_pte;
+-
+ unsigned long highstart_pfn, highend_pfn;
+
+ void kmap_flush_tlb(unsigned long addr)
+@@ -17,78 +15,3 @@ void kmap_flush_tlb(unsigned long addr)
+ flush_tlb_one(addr);
+ }
+ EXPORT_SYMBOL(kmap_flush_tlb);
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte - idx)));
+-#endif
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
+- local_flush_tlb_one((unsigned long)vaddr);
+-
+- return (void*) vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int type __maybe_unused;
+-
+- if (vaddr < FIXADDR_START)
+- return;
+-
+- type = kmap_atomic_idx();
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- {
+- int idx = type + KM_TYPE_NR * smp_processor_id();
+-
+- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-
+- /*
+- * force other mappings to Oops if they'll try to access
+- * this pte without first remap it
+- */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+- local_flush_tlb_one(vaddr);
+- }
+-#endif
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+-
+-/*
+- * This is the same as kmap_atomic() but can map memory that doesn't
+- * have a struct page associated with it.
+- */
+-void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- preempt_disable();
+- pagefault_disable();
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL));
+- flush_tlb_one(vaddr);
+-
+- return (void*) vaddr;
+-}
+-
+-void __init kmap_init(void)
+-{
+- unsigned long kmap_vstart;
+-
+- /* cache the first kmap pte */
+- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+- kmap_pte = virt_to_kpte(kmap_vstart);
+-}
+--- a/arch/mips/mm/init.c
++++ b/arch/mips/mm/init.c
+@@ -402,9 +402,6 @@ void __init paging_init(void)
+
+ pagetable_init();
+
+-#ifdef CONFIG_HIGHMEM
+- kmap_init();
+-#endif
+ #ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+ #endif
diff --git a/patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..b696d1fafdf4
--- /dev/null
+++ b/patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,119 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 13:33:51 +0200
+Subject: [PATCH 10/18] nds32/mm/highmem: Switch to generic kmap atomic
+
+The mapping code is odd and looks broken. See FIXME in the comment.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Nick Hu <nickhu@andestech.com>
+Cc: Greentime Hu <green.hu@gmail.com>
+Cc: Vincent Chen <deanbo422@gmail.com>
+---
+ arch/nds32/Kconfig.cpu | 1
+ arch/nds32/include/asm/highmem.h | 21 +++++++++++++----
+ arch/nds32/mm/Makefile | 1
+ arch/nds32/mm/highmem.c | 48 ---------------------------------------
+ 4 files changed, 17 insertions(+), 54 deletions(-)
+ delete mode 100644 arch/nds32/mm/highmem.c
+
+--- a/arch/nds32/Kconfig.cpu
++++ b/arch/nds32/Kconfig.cpu
+@@ -157,6 +157,7 @@ config HW_SUPPORT_UNALIGNMENT_ACCESS
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU && !CPU_CACHE_ALIASING
++ select KMAP_LOCAL
+ help
+ The address space of Andes processors is only 4 Gigabytes large
+ and it has to accommodate user address space, kernel address
+--- a/arch/nds32/include/asm/highmem.h
++++ b/arch/nds32/include/asm/highmem.h
+@@ -45,11 +45,22 @@ extern pte_t *pkmap_page_table;
+ extern void kmap_init(void);
+
+ /*
+- * The following functions are already defined by <linux/highmem.h>
+- * when CONFIG_HIGHMEM is not set.
++ * FIXME: The below looks broken vs. a kmap_atomic() in task context which
++ * is interupted and another kmap_atomic() happens in interrupt context.
++ * But what do I know about nds32. -- tglx
+ */
+-#ifdef CONFIG_HIGHMEM
+-extern void *kmap_atomic_pfn(unsigned long pfn);
+-#endif
++#define arch_kmap_local_post_map(vaddr, pteval) \
++ do { \
++ __nds32__tlbop_inv(vaddr); \
++ __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN); \
++ __nds32__tlbop_rwr(pteval); \
++ __nds32__isb(); \
++ } while (0)
++
++#define arch_kmap_local_pre_unmap(vaddr, pte) \
++ do { \
++ __nds32__tlbop_inv(vaddr); \
++ __nds32__isb(); \
++ } while (0)
+
+ #endif
+--- a/arch/nds32/mm/Makefile
++++ b/arch/nds32/mm/Makefile
+@@ -3,7 +3,6 @@ obj-y := extable.o tlb.o fault.o init
+ mm-nds32.o cacheflush.o proc.o
+
+ obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
+-obj-$(CONFIG_HIGHMEM) += highmem.o
+
+ ifdef CONFIG_FUNCTION_TRACER
+ CFLAGS_REMOVE_proc.o = $(CC_FLAGS_FTRACE)
+--- a/arch/nds32/mm/highmem.c
++++ /dev/null
+@@ -1,48 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-// Copyright (C) 2005-2017 Andes Technology Corporation
+-
+-#include <linux/export.h>
+-#include <linux/highmem.h>
+-#include <linux/sched.h>
+-#include <linux/smp.h>
+-#include <linux/interrupt.h>
+-#include <linux/memblock.h>
+-#include <asm/fixmap.h>
+-#include <asm/tlbflush.h>
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned int idx;
+- unsigned long vaddr, pte;
+- int type;
+- pte_t *ptep;
+-
+- type = kmap_atomic_idx_push();
+-
+- idx = type + KM_TYPE_NR * smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- pte = (page_to_pfn(page) << PAGE_SHIFT) | prot;
+- ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+- set_pte(ptep, pte);
+-
+- __nds32__tlbop_inv(vaddr);
+- __nds32__mtsr_dsb(vaddr, NDS32_SR_TLB_VPN);
+- __nds32__tlbop_rwr(pte);
+- __nds32__isb();
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- if (kvaddr >= (void *)FIXADDR_START) {
+- unsigned long vaddr = (unsigned long)kvaddr;
+- pte_t *ptep;
+- kmap_atomic_idx_pop();
+- __nds32__tlbop_inv(vaddr);
+- __nds32__isb();
+- ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
+- set_pte(ptep, 0);
+- }
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
diff --git a/patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..2f072ccc8ed1
--- /dev/null
+++ b/patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,155 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 13:45:08 +0200
+Subject: [PATCH 11/18] powerpc/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: linuxppc-dev@lists.ozlabs.org
+---
+ arch/powerpc/Kconfig | 1
+ arch/powerpc/include/asm/highmem.h | 6 ++-
+ arch/powerpc/mm/Makefile | 1
+ arch/powerpc/mm/highmem.c | 67 -------------------------------------
+ arch/powerpc/mm/mem.c | 7 ---
+ 5 files changed, 6 insertions(+), 76 deletions(-)
+ delete mode 100644 arch/powerpc/mm/highmem.c
+
+--- a/arch/powerpc/Kconfig
++++ b/arch/powerpc/Kconfig
+@@ -409,6 +409,7 @@ menu "Kernel options"
+ config HIGHMEM
+ bool "High memory support"
+ depends on PPC32
++ select KMAP_LOCAL
+
+ source "kernel/Kconfig.hz"
+
+--- a/arch/powerpc/include/asm/highmem.h
++++ b/arch/powerpc/include/asm/highmem.h
+@@ -29,7 +29,6 @@
+ #include <asm/page.h>
+ #include <asm/fixmap.h>
+
+-extern pte_t *kmap_pte;
+ extern pte_t *pkmap_page_table;
+
+ /*
+@@ -60,6 +59,11 @@ extern pte_t *pkmap_page_table;
+
+ #define flush_cache_kmaps() flush_cache_all()
+
++#define arch_kmap_local_post_map(vaddr, pteval) \
++ local_flush_tlb_page(NULL, vaddr)
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_page(NULL, vaddr)
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_HIGHMEM_H */
+--- a/arch/powerpc/mm/Makefile
++++ b/arch/powerpc/mm/Makefile
+@@ -16,7 +16,6 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += num
+ obj-$(CONFIG_PPC_MM_SLICES) += slice.o
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_NOT_COHERENT_CACHE) += dma-noncoherent.o
+-obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_PPC_COPRO_BASE) += copro_fault.o
+ obj-$(CONFIG_PPC_PTDUMP) += ptdump/
+ obj-$(CONFIG_KASAN) += kasan/
+--- a/arch/powerpc/mm/highmem.c
++++ /dev/null
+@@ -1,67 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * highmem.c: virtual kernel memory mappings for high memory
+- *
+- * PowerPC version, stolen from the i386 version.
+- *
+- * Used in CONFIG_HIGHMEM systems for memory pages which
+- * are not addressable by direct kernel virtual addresses.
+- *
+- * Copyright (C) 1999 Gerhard Wichert, Siemens AG
+- * Gerhard.Wichert@pdb.siemens.de
+- *
+- *
+- * Redesigned the x86 32-bit VM architecture to deal with
+- * up to 16 Terrabyte physical memory. With current x86 CPUs
+- * we now support up to 64 Gigabytes physical RAM.
+- *
+- * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
+- *
+- * Reworked for PowerPC by various contributors. Moved from
+- * highmem.h by Benjamin Herrenschmidt (c) 2009 IBM Corp.
+- */
+-
+-#include <linux/highmem.h>
+-#include <linux/module.h>
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- int idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+- WARN_ON(IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !pte_none(*(kmap_pte - idx)));
+- __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1);
+- local_flush_tlb_page(NULL, vaddr);
+-
+- return (void*) vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+-
+- if (vaddr < __fix_to_virt(FIX_KMAP_END))
+- return;
+-
+- if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM)) {
+- int type = kmap_atomic_idx();
+- unsigned int idx;
+-
+- idx = type + KM_TYPE_NR * smp_processor_id();
+- WARN_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+-
+- /*
+- * force other mappings to Oops if they'll try to access
+- * this pte without first remap it
+- */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+- local_flush_tlb_page(NULL, vaddr);
+- }
+-
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+--- a/arch/powerpc/mm/mem.c
++++ b/arch/powerpc/mm/mem.c
+@@ -61,11 +61,6 @@
+ unsigned long long memory_limit;
+ bool init_mem_is_free;
+
+-#ifdef CONFIG_HIGHMEM
+-pte_t *kmap_pte;
+-EXPORT_SYMBOL(kmap_pte);
+-#endif
+-
+ pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
+ unsigned long size, pgprot_t vma_prot)
+ {
+@@ -235,8 +230,6 @@ void __init paging_init(void)
+
+ map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
+ pkmap_page_table = virt_to_kpte(PKMAP_BASE);
+-
+- kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
+ #endif /* CONFIG_HIGHMEM */
+
+ printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
diff --git a/patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..6dfc05892133
--- /dev/null
+++ b/patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,189 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 14:00:29 +0200
+Subject: [PATCH 12/18] sparc/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: sparclinux@vger.kernel.org
+---
+ arch/sparc/Kconfig | 1
+ arch/sparc/include/asm/highmem.h | 7 +-
+ arch/sparc/mm/Makefile | 3 -
+ arch/sparc/mm/highmem.c | 115 ---------------------------------------
+ arch/sparc/mm/srmmu.c | 2
+ 5 files changed, 6 insertions(+), 122 deletions(-)
+ delete mode 100644 arch/sparc/mm/highmem.c
+
+--- a/arch/sparc/Kconfig
++++ b/arch/sparc/Kconfig
+@@ -139,6 +139,7 @@ config MMU
+ config HIGHMEM
+ bool
+ default y if SPARC32
++ select KMAP_LOCAL
+
+ config ZONE_DMA
+ bool
+--- a/arch/sparc/include/asm/highmem.h
++++ b/arch/sparc/include/asm/highmem.h
+@@ -33,8 +33,6 @@ extern unsigned long highstart_pfn, high
+ #define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
+ extern pte_t *pkmap_page_table;
+
+-void kmap_init(void) __init;
+-
+ /*
+ * Right now we initialize only a single pte table. It can be extended
+ * easily, subsequent pte tables have to be allocated in one physical
+@@ -53,6 +51,11 @@ void kmap_init(void) __init;
+
+ #define flush_cache_kmaps() flush_cache_all()
+
++/* FIXME: Use __flush_tlb_one(vaddr) instead of flush_cache_all() -- Anton */
++#define arch_kmap_local_post_map(vaddr, pteval) flush_cache_all()
++#define arch_kmap_local_post_unmap(vaddr) flush_cache_all()
++
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ASM_HIGHMEM_H */
+--- a/arch/sparc/mm/Makefile
++++ b/arch/sparc/mm/Makefile
+@@ -15,6 +15,3 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
+
+ # Only used by sparc64
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+-
+-# Only used by sparc32
+-obj-$(CONFIG_HIGHMEM) += highmem.o
+--- a/arch/sparc/mm/highmem.c
++++ /dev/null
+@@ -1,115 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-/*
+- * highmem.c: virtual kernel memory mappings for high memory
+- *
+- * Provides kernel-static versions of atomic kmap functions originally
+- * found as inlines in include/asm-sparc/highmem.h. These became
+- * needed as kmap_atomic() and kunmap_atomic() started getting
+- * called from within modules.
+- * -- Tomas Szepe <szepe@pinerecords.com>, September 2002
+- *
+- * But kmap_atomic() and kunmap_atomic() cannot be inlined in
+- * modules because they are loaded with btfixup-ped functions.
+- */
+-
+-/*
+- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+- * gives a more generic (and caching) interface. But kmap_atomic can
+- * be used in IRQ contexts, so in some (very limited) cases we need it.
+- *
+- * XXX This is an old text. Actually, it's good to use atomic kmaps,
+- * provided you remember that they are atomic and not try to sleep
+- * with a kmap taken, much like a spinlock. Non-atomic kmaps are
+- * shared by CPUs, and so precious, and establishing them requires IPI.
+- * Atomic kmaps are lightweight and we may have NCPUS more of them.
+- */
+-#include <linux/highmem.h>
+-#include <linux/export.h>
+-#include <linux/mm.h>
+-
+-#include <asm/cacheflush.h>
+-#include <asm/tlbflush.h>
+-#include <asm/vaddrs.h>
+-
+-static pte_t *kmap_pte;
+-
+-void __init kmap_init(void)
+-{
+- unsigned long address = __fix_to_virt(FIX_KMAP_BEGIN);
+-
+- /* cache the first kmap pte */
+- kmap_pte = virt_to_kpte(address);
+-}
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- unsigned long vaddr;
+- long idx, type;
+-
+- type = kmap_atomic_idx_push();
+- idx = type + KM_TYPE_NR*smp_processor_id();
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-
+-/* XXX Fix - Anton */
+-#if 0
+- __flush_cache_one(vaddr);
+-#else
+- flush_cache_all();
+-#endif
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte-idx)));
+-#endif
+- set_pte(kmap_pte-idx, mk_pte(page, prot));
+-/* XXX Fix - Anton */
+-#if 0
+- __flush_tlb_one(vaddr);
+-#else
+- flush_tlb_all();
+-#endif
+-
+- return (void*) vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kvaddr)
+-{
+- unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
+- int type;
+-
+- if (vaddr < FIXADDR_START)
+- return;
+-
+- type = kmap_atomic_idx();
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- {
+- unsigned long idx;
+-
+- idx = type + KM_TYPE_NR * smp_processor_id();
+- BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx));
+-
+- /* XXX Fix - Anton */
+-#if 0
+- __flush_cache_one(vaddr);
+-#else
+- flush_cache_all();
+-#endif
+-
+- /*
+- * force other mappings to Oops if they'll try to access
+- * this pte without first remap it
+- */
+- pte_clear(&init_mm, vaddr, kmap_pte-idx);
+- /* XXX Fix - Anton */
+-#if 0
+- __flush_tlb_one(vaddr);
+-#else
+- flush_tlb_all();
+-#endif
+- }
+-#endif
+-
+- kmap_atomic_idx_pop();
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -971,8 +971,6 @@ void __init srmmu_paging_init(void)
+
+ sparc_context_init(num_contexts);
+
+- kmap_init();
+-
+ {
+ unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
+
diff --git a/patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
new file mode 100644
index 000000000000..8bab507b8dc8
--- /dev/null
+++ b/patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -0,0 +1,115 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 14:04:36 +0200
+Subject: [PATCH 13/18] xtensa/mm/highmem: Switch to generic kmap atomic
+
+No reason having the same code in every architecture
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Chris Zankel <chris@zankel.net>
+Cc: Max Filippov <jcmvbkbc@gmail.com>
+Cc: linux-xtensa@linux-xtensa.org
+---
+ arch/xtensa/Kconfig | 1
+ arch/xtensa/include/asm/highmem.h | 9 +++++++
+ arch/xtensa/mm/highmem.c | 44 +++-----------------------------------
+ 3 files changed, 14 insertions(+), 40 deletions(-)
+
+--- a/arch/xtensa/Kconfig
++++ b/arch/xtensa/Kconfig
+@@ -666,6 +666,7 @@ endchoice
+ config HIGHMEM
+ bool "High Memory Support"
+ depends on MMU
++ select KMAP_LOCAL
+ help
+ Linux can use the full amount of RAM in the system by
+ default. However, the default MMUv2 setup only maps the
+--- a/arch/xtensa/include/asm/highmem.h
++++ b/arch/xtensa/include/asm/highmem.h
+@@ -68,6 +68,15 @@ static inline void flush_cache_kmaps(voi
+ flush_cache_all();
+ }
+
++enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn);
++#define arch_kmap_local_map_idx kmap_local_map_idx
++
++enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr);
++#define arch_kmap_local_unmap_idx kmap_local_unmap_idx
++
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
++
+ void kmap_init(void);
+
+ #endif
+--- a/arch/xtensa/mm/highmem.c
++++ b/arch/xtensa/mm/highmem.c
+@@ -12,8 +12,6 @@
+ #include <linux/highmem.h>
+ #include <asm/tlbflush.h>
+
+-static pte_t *kmap_pte;
+-
+ #if DCACHE_WAY_SIZE > PAGE_SIZE
+ unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
+ wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
+@@ -37,55 +35,21 @@ static inline enum fixed_addresses kmap_
+ color;
+ }
+
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
++enum fixed_addresses kmap_local_map_idx(int type, unsigned long pfn)
+ {
+- enum fixed_addresses idx;
+- unsigned long vaddr;
+-
+- idx = kmap_idx(kmap_atomic_idx_push(),
+- DCACHE_ALIAS(page_to_phys(page)));
+- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- BUG_ON(!pte_none(*(kmap_pte + idx)));
+-#endif
+- set_pte(kmap_pte + idx, mk_pte(page, prot));
+-
+- return (void *)vaddr;
++ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT);
+ }
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+
+-void kunmap_atomic_high(void *kvaddr)
++enum fixed_addresses kmap_local_unmap_idx(int type, unsigned long addr)
+ {
+- if (kvaddr >= (void *)FIXADDR_START &&
+- kvaddr < (void *)FIXADDR_TOP) {
+- int idx = kmap_idx(kmap_atomic_idx(),
+- DCACHE_ALIAS((unsigned long)kvaddr));
+-
+- /*
+- * Force other mappings to Oops if they'll try to access this
+- * pte without first remap it. Keeping stale mappings around
+- * is a bad idea also, in case the page changes cacheability
+- * attributes or becomes a protected page in a hypervisor.
+- */
+- pte_clear(&init_mm, kvaddr, kmap_pte + idx);
+- local_flush_tlb_kernel_range((unsigned long)kvaddr,
+- (unsigned long)kvaddr + PAGE_SIZE);
+-
+- kmap_atomic_idx_pop();
+- }
++ return kmap_idx(type, DCACHE_ALIAS(addr));
+ }
+-EXPORT_SYMBOL(kunmap_atomic_high);
+
+ void __init kmap_init(void)
+ {
+- unsigned long kmap_vstart;
+-
+ /* Check if this memory layout is broken because PKMAP overlaps
+ * page table.
+ */
+ BUILD_BUG_ON(PKMAP_BASE < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
+- /* cache the first kmap pte */
+- kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
+- kmap_pte = virt_to_kpte(kmap_vstart);
+ kmap_waitqueues_init();
+ }
diff --git a/patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch b/patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
new file mode 100644
index 000000000000..4c5f89936221
--- /dev/null
+++ b/patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
@@ -0,0 +1,147 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 14:58:47 +0200
+Subject: [PATCH 14/18] mm/highmem: Remove the old kmap_atomic cruft
+
+All users gone.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/highmem.h | 61 ++----------------------------------------------
+ mm/highmem.c | 28 ++++++++++++++++++----
+ 2 files changed, 27 insertions(+), 62 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -88,31 +88,16 @@ static inline void kunmap(struct page *p
+ * be used in IRQ contexts, so in some (very limited) cases we need
+ * it.
+ */
+-
+-#ifndef CONFIG_KMAP_LOCAL
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot);
+-void kunmap_atomic_high(void *kvaddr);
+-
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+ preempt_disable();
+ pagefault_disable();
+- if (!PageHighMem(page))
+- return page_address(page);
+- return kmap_atomic_high_prot(page, prot);
+-}
+-
+-static inline void __kunmap_atomic(void *vaddr)
+-{
+- kunmap_atomic_high(vaddr);
++ return __kmap_local_page_prot(page, prot);
+ }
+-#else /* !CONFIG_KMAP_LOCAL */
+
+-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
+- pagefault_disable();
+- return __kmap_local_page_prot(page, prot);
++ return kmap_atomic_prot(page, kmap_prot);
+ }
+
+ static inline void *kmap_atomic_pfn(unsigned long pfn)
+@@ -127,13 +112,6 @@ static inline void __kunmap_atomic(void
+ kunmap_local_indexed(addr);
+ }
+
+-#endif /* CONFIG_KMAP_LOCAL */
+-
+-static inline void *kmap_atomic(struct page *page)
+-{
+- return kmap_atomic_prot(page, kmap_prot);
+-}
+-
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
+ extern atomic_long_t _totalhigh_pages;
+@@ -226,39 +204,6 @@ static inline void __kunmap_atomic(void
+
+ #endif /* CONFIG_HIGHMEM */
+
+-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+-
+-DECLARE_PER_CPU(int, __kmap_atomic_idx);
+-
+-static inline int kmap_atomic_idx_push(void)
+-{
+- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- WARN_ON_ONCE(in_irq() && !irqs_disabled());
+- BUG_ON(idx >= KM_TYPE_NR);
+-#endif
+- return idx;
+-}
+-
+-static inline int kmap_atomic_idx(void)
+-{
+- return __this_cpu_read(__kmap_atomic_idx) - 1;
+-}
+-
+-static inline void kmap_atomic_idx_pop(void)
+-{
+-#ifdef CONFIG_DEBUG_HIGHMEM
+- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+-
+- BUG_ON(idx < 0);
+-#else
+- __this_cpu_dec(__kmap_atomic_idx);
+-#endif
+-}
+-
+-#endif
+-
+ /*
+ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -32,10 +32,6 @@
+ #include <linux/vmalloc.h>
+ #include <asm/fixmap.h>
+
+-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+-DEFINE_PER_CPU(int, __kmap_atomic_idx);
+-#endif
+-
+ /*
+ * Virtual_count is not a pure "count".
+ * 0 means that it is not mapped, and has not been mapped
+@@ -370,6 +366,30 @@ EXPORT_SYMBOL(kunmap_high);
+ #endif /* CONFIG_HIGHMEM */
+
+ #ifdef CONFIG_KMAP_LOCAL
++
++static DEFINE_PER_CPU(int, __kmap_atomic_idx);
++
++static inline int kmap_atomic_idx_push(void)
++{
++ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
++
++ WARN_ON_ONCE(in_irq() && !irqs_disabled());
++ BUG_ON(idx >= KM_TYPE_NR);
++ return idx;
++}
++
++static inline int kmap_atomic_idx(void)
++{
++ return __this_cpu_read(__kmap_atomic_idx) - 1;
++}
++
++static inline void kmap_atomic_idx_pop(void)
++{
++ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
++
++ BUG_ON(idx < 0);
++}
++
+ #ifndef arch_kmap_local_post_map
+ # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
+ #endif
diff --git a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
index 49d6be82b4c8..81b40d586222 100644
--- a/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
+++ b/patches/0014-sched-Add-saved_state-for-tasks-blocked-on-sleeping-.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -639,6 +639,8 @@ struct task_struct {
+@@ -647,6 +647,8 @@ struct task_struct {
#endif
/* -1 unrunnable, 0 runnable, >0 stopped: */
volatile long state;
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1741,6 +1743,7 @@ extern struct task_struct *find_get_task
+@@ -1750,6 +1752,7 @@ extern struct task_struct *find_get_task
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3274,7 +3274,7 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3266,7 +3266,7 @@ try_to_wake_up(struct task_struct *p, un
int cpu, success = 0;
preempt_disable();
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We're waking current, this means 'p->on_rq' and 'task_cpu(p)
* == smp_processor_id()'. Together this means we can special
-@@ -3304,8 +3304,26 @@ try_to_wake_up(struct task_struct *p, un
+@@ -3296,8 +3296,26 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -3495,6 +3513,18 @@ int wake_up_process(struct task_struct *
+@@ -3487,6 +3505,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/0015-io-mapping-Cleanup-atomic-iomap.patch b/patches/0015-io-mapping-Cleanup-atomic-iomap.patch
new file mode 100644
index 000000000000..99f5130a9781
--- /dev/null
+++ b/patches/0015-io-mapping-Cleanup-atomic-iomap.patch
@@ -0,0 +1,78 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Oct 2020 14:38:06 +0100
+Subject: [PATCH 15/18] io-mapping: Cleanup atomic iomap
+
+Switch the atomic iomap implementation over to kmap_local and stick the
+preempt/pagefault mechanics into the generic code similar to the
+kmap_atomic variants.
+
+Rename the x86 map function in preparation for a non-atomic variant.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/x86/include/asm/iomap.h | 9 +--------
+ arch/x86/mm/iomap_32.c | 6 ++----
+ include/linux/io-mapping.h | 8 ++++++--
+ 3 files changed, 9 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/include/asm/iomap.h
++++ b/arch/x86/include/asm/iomap.h
+@@ -13,14 +13,7 @@
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
+
+-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot);
+-
+-static inline void iounmap_atomic(void __iomem *vaddr)
+-{
+- kunmap_local_indexed((void __force *)vaddr);
+- pagefault_enable();
+- preempt_enable();
+-}
++void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+
+ int iomap_create_wc(resource_size_t base, unsigned long size, pgprot_t *prot);
+
+--- a/arch/x86/mm/iomap_32.c
++++ b/arch/x86/mm/iomap_32.c
+@@ -44,7 +44,7 @@ void iomap_free(resource_size_t base, un
+ }
+ EXPORT_SYMBOL_GPL(iomap_free);
+
+-void __iomem *iomap_atomic_pfn_prot(unsigned long pfn, pgprot_t prot)
++void __iomem *__iomap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
+ {
+ /*
+ * For non-PAT systems, translate non-WB request to UC- just in
+@@ -60,8 +60,6 @@ void __iomem *iomap_atomic_pfn_prot(unsi
+ /* Filter out unsupported __PAGE_KERNEL* bits: */
+ pgprot_val(prot) &= __default_kernel_pte_mask;
+
+- preempt_disable();
+- pagefault_disable();
+ return (void __force __iomem *)__kmap_local_pfn_prot(pfn, prot);
+ }
+-EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
++EXPORT_SYMBOL_GPL(__iomap_local_pfn_prot);
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -69,13 +69,17 @@ io_mapping_map_atomic_wc(struct io_mappi
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+- return iomap_atomic_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
++ preempt_disable();
++ pagefault_disable();
++ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+ }
+
+ static inline void
+ io_mapping_unmap_atomic(void __iomem *vaddr)
+ {
+- iounmap_atomic(vaddr);
++ kunmap_local_indexed((void __force *)vaddr);
++ pagefault_enable();
++ preempt_enable();
+ }
+
+ static inline void __iomem *
diff --git a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
index e88ab534c6d4..f61b2556937f 100644
--- a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -219,6 +219,10 @@ extern void __cant_sleep(const char *fil
+@@ -220,6 +220,10 @@ extern void __cant_migrate(const char *f
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -34,14 +34,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* cant_sleep - annotation for functions that cannot sleep
*
-@@ -250,6 +254,7 @@ extern void __cant_sleep(const char *fil
+@@ -263,6 +267,7 @@ extern void __cant_migrate(const char *f
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
+# define might_sleep_no_state_check() do { might_resched(); } while (0)
# define cant_sleep() do { } while (0)
+ # define cant_migrate() do { } while (0)
# define sched_annotate_sleep() do { } while (0)
- # define non_block_start() do { } while (0)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -118,7 +118,11 @@
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -140,6 +140,9 @@ struct io_uring_task;
+@@ -141,6 +141,9 @@ struct io_uring_task;
smp_store_mb(current->state, (state_value)); \
} while (0)
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define set_special_state(state_value) \
do { \
unsigned long flags; /* may shadow */ \
-@@ -193,6 +196,9 @@ struct io_uring_task;
+@@ -194,6 +197,9 @@ struct io_uring_task;
#define set_current_state(state_value) \
smp_store_mb(current->state, (state_value))
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* set_special_state() should be used for those states when the blocking task
* can not use the regular condition based wait-loop. In that case we must
-@@ -984,6 +990,7 @@ struct task_struct {
+@@ -992,6 +998,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
diff --git a/patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch b/patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch
new file mode 100644
index 000000000000..4849c7ea2d4f
--- /dev/null
+++ b/patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch
@@ -0,0 +1,292 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 09:27:11 +0200
+Subject: [PATCH 16/18] sched: highmem: Store local kmaps in task struct
+
+Instead of storing the map per CPU provide and use per task storage. That
+prepares for local kmaps which are preemptible.
+
+The context switch code is preparatory and not yet in use because
+kmap_atomic() runs with preemption disabled. Will be made usable in the
+next step.
+
+The context switch logic is safe even when an interrupt happens after
+clearing or before restoring the kmaps. The kmap index in task struct is
+not modified so any nesting kmap in an interrupt will use unused indices
+and on return the counter is the same as before.
+
+Also add an assert into the return to user space code. Going back to user
+space with an active kmap local is a nono.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/highmem.h | 10 +++++
+ include/linux/sched.h | 9 ++++
+ kernel/entry/common.c | 2 +
+ kernel/fork.c | 1
+ kernel/sched/core.c | 18 +++++++++
+ mm/highmem.c | 96 +++++++++++++++++++++++++++++++++++++++++-------
+ 6 files changed, 123 insertions(+), 13 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -38,6 +38,16 @@ static inline void invalidate_kernel_vma
+ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+ void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+ void kunmap_local_indexed(void *vaddr);
++void kmap_local_fork(struct task_struct *tsk);
++void __kmap_local_sched_out(void);
++void __kmap_local_sched_in(void);
++static inline void kmap_assert_nomap(void)
++{
++ DEBUG_LOCKS_WARN_ON(current->kmap_ctrl.idx);
++}
++#else
++static inline void kmap_local_fork(struct task_struct *tsk) { }
++static inline void kmap_assert_nomap(void) { }
+ #endif
+
+ #ifdef CONFIG_HIGHMEM
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -34,6 +34,7 @@
+ #include <linux/rseq.h>
+ #include <linux/seqlock.h>
+ #include <linux/kcsan.h>
++#include <asm/kmap_types.h>
+
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -629,6 +630,13 @@ struct wake_q_node {
+ struct wake_q_node *next;
+ };
+
++struct kmap_ctrl {
++#ifdef CONFIG_KMAP_LOCAL
++ int idx;
++ pte_t pteval[KM_TYPE_NR];
++#endif
++};
++
+ struct task_struct {
+ #ifdef CONFIG_THREAD_INFO_IN_TASK
+ /*
+@@ -1294,6 +1302,7 @@ struct task_struct {
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
+ #endif
++ struct kmap_ctrl kmap_ctrl;
+ #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
+ unsigned long task_state_change;
+ #endif
+--- a/kernel/entry/common.c
++++ b/kernel/entry/common.c
+@@ -2,6 +2,7 @@
+
+ #include <linux/context_tracking.h>
+ #include <linux/entry-common.h>
++#include <linux/highmem.h>
+ #include <linux/livepatch.h>
+ #include <linux/audit.h>
+
+@@ -194,6 +195,7 @@ static void exit_to_user_mode_prepare(st
+
+ /* Ensure that the address limit is intact and no locks are held */
+ addr_limit_user_check();
++ kmap_assert_nomap();
+ lockdep_assert_irqs_disabled();
+ lockdep_sys_exit();
+ }
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -930,6 +930,7 @@ static struct task_struct *dup_task_stru
+ account_kernel_stack(tsk, 1);
+
+ kcov_task_init(tsk);
++ kmap_local_fork(tsk);
+
+ #ifdef CONFIG_FAULT_INJECTION
+ tsk->fail_nth = 0;
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -4053,6 +4053,22 @@ static inline void finish_lock_switch(st
+ # define finish_arch_post_lock_switch() do { } while (0)
+ #endif
+
++static inline void kmap_local_sched_out(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++ if (unlikely(current->kmap_ctrl.idx))
++ __kmap_local_sched_out();
++#endif
++}
++
++static inline void kmap_local_sched_in(void)
++{
++#ifdef CONFIG_KMAP_LOCAL
++ if (unlikely(current->kmap_ctrl.idx))
++ __kmap_local_sched_in();
++#endif
++}
++
+ /**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+@@ -4075,6 +4091,7 @@ prepare_task_switch(struct rq *rq, struc
+ perf_event_task_sched_out(prev, next);
+ rseq_preempt(prev);
+ fire_sched_out_preempt_notifiers(prev, next);
++ kmap_local_sched_out();
+ prepare_task(next);
+ prepare_arch_switch(next);
+ }
+@@ -4141,6 +4158,7 @@ static struct rq *finish_task_switch(str
+ finish_lock_switch(rq);
+ finish_arch_post_lock_switch();
+ kcov_finish_switch(current);
++ kmap_local_sched_in();
+
+ fire_sched_in_preempt_notifiers(current);
+ /*
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -367,27 +367,24 @@ EXPORT_SYMBOL(kunmap_high);
+
+ #ifdef CONFIG_KMAP_LOCAL
+
+-static DEFINE_PER_CPU(int, __kmap_atomic_idx);
+-
+-static inline int kmap_atomic_idx_push(void)
++static inline int kmap_local_idx_push(void)
+ {
+- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
++ int idx = current->kmap_ctrl.idx++;
+
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_TYPE_NR);
+ return idx;
+ }
+
+-static inline int kmap_atomic_idx(void)
++static inline int kmap_local_idx(void)
+ {
+- return __this_cpu_read(__kmap_atomic_idx) - 1;
++ return current->kmap_ctrl.idx - 1;
+ }
+
+-static inline void kmap_atomic_idx_pop(void)
++static inline void kmap_local_idx_pop(void)
+ {
+- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+-
+- BUG_ON(idx < 0);
++ current->kmap_ctrl.idx--;
++ BUG_ON(current->kmap_ctrl.idx < 0);
+ }
+
+ #ifndef arch_kmap_local_post_map
+@@ -447,12 +444,13 @@ void *__kmap_local_pfn_prot(unsigned lon
+ int idx;
+
+ preempt_disable();
+- idx = arch_kmap_local_map_idx(kmap_atomic_idx_push(), pfn);
++ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+ pteval = pfn_pte(pfn, prot);
+ set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
+ arch_kmap_local_post_map(vaddr, pteval);
++ current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
+ preempt_enable();
+
+ return (void *)vaddr;
+@@ -491,16 +489,88 @@ void kunmap_local_indexed(void *vaddr)
+ }
+
+ preempt_disable();
+- idx = arch_kmap_local_unmap_idx(kmap_atomic_idx(), addr);
++ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
+ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ arch_kmap_local_pre_unmap(addr);
+ pte_clear(&init_mm, addr, kmap_pte - idx);
+ arch_kmap_local_post_unmap(addr);
+- kmap_atomic_idx_pop();
++ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
++ kmap_local_idx_pop();
+ preempt_enable();
+ }
+ EXPORT_SYMBOL(kunmap_local_indexed);
++
++/*
++ * Invoked before switch_to(). This is safe even when during or after
++ * clearing the maps an interrupt which needs a kmap_local happens because
++ * the task::kmap_ctrl.idx is not modified by the unmapping code so a
++ * nested kmap_local will use the next unused index and restore the index
++ * on unmap. The already cleared kmaps of the outgoing task are irrelevant
++ * because the interrupt context does not know about them. The same applies
++ * when scheduling back in for an interrupt which happens before the
++ * restore is complete.
++ */
++void __kmap_local_sched_out(void)
++{
++ struct task_struct *tsk = current;
++ pte_t *kmap_pte = kmap_get_pte();
++ int i;
++
++ /* Clear kmaps */
++ for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
++ pte_t pteval = tsk->kmap_ctrl.pteval[i];
++ unsigned long addr;
++ int idx;
++
++ if (WARN_ON_ONCE(pte_none(pteval)))
++ continue;
++
++ /*
++ * This is a horrible hack for XTENSA to calculate the
++ * coloured PTE index. Uses the PFN encoded into the pteval
++ * and the map index calculation because the actual mapped
++ * virtual address is not stored in task::kmap_ctrl.
++ * For any sane architecture this is optimized out.
++ */
++ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
++
++ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ arch_kmap_local_pre_unmap(addr);
++ pte_clear(&init_mm, addr, kmap_pte - idx);
++ arch_kmap_local_post_unmap(addr);
++ }
++}
++
++void __kmap_local_sched_in(void)
++{
++ struct task_struct *tsk = current;
++ pte_t *kmap_pte = kmap_get_pte();
++ int i;
++
++ /* Restore kmaps */
++ for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
++ pte_t pteval = tsk->kmap_ctrl.pteval[i];
++ unsigned long addr;
++ int idx;
++
++ if (WARN_ON_ONCE(pte_none(pteval)))
++ continue;
++
++ /* See comment in __kmap_local_sched_out() */
++ idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
++ addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte_at(&init_mm, addr, kmap_pte - idx, pteval);
++ arch_kmap_local_post_map(addr, pteval);
++ }
++}
++
++void kmap_local_fork(struct task_struct *tsk)
++{
++ if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
++ memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
++}
++
+ #endif
+
+ #if defined(HASHED_PAGE_VIRTUAL)
diff --git a/patches/0017-mm-highmem-Provide-kmap_local.patch b/patches/0017-mm-highmem-Provide-kmap_local.patch
new file mode 100644
index 000000000000..405744c5dd58
--- /dev/null
+++ b/patches/0017-mm-highmem-Provide-kmap_local.patch
@@ -0,0 +1,177 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Fri, 18 Sep 2020 21:29:41 +0200
+Subject: [PATCH 17/18] mm/highmem: Provide kmap_local*
+
+Now that the kmap atomic index is stored in task struct provide a
+preemptible variant. On context switch the maps of an outgoing task are
+removed and the map of the incoming task are restored. That's obviously
+slow, but highmem is slow anyway.
+
+The kmap_local.*() functions can be invoked from both preemptible and
+atomic context. kmap local sections disable migration to keep the resulting
+virtual mapping address correct, but disable neither pagefaults nor
+preemption.
+
+A wholesale conversion of kmap_atomic to be fully preemptible is not
+possible because some of the usage sites might rely on the preemption
+disable for serialization or on the implicit pagefault disable. Needs to be
+done on a case by case basis.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/highmem.h | 117 +++++++++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 101 insertions(+), 16 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -86,17 +86,56 @@ static inline void kunmap(struct page *p
+ }
+
+ /*
+- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+- * no global lock is needed and because the kmap code must perform a global TLB
+- * invalidation when the kmap pool wraps.
+- *
+- * However when holding an atomic kmap it is not legal to sleep, so atomic
+- * kmaps are appropriate for short, tight code paths only.
+- *
+- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+- * gives a more generic (and caching) interface. But kmap_atomic can
+- * be used in IRQ contexts, so in some (very limited) cases we need
+- * it.
++ * For highmem systems it is required to temporarily map pages
++ * which reside in the portion of memory which is not covered
++ * by the permanent kernel mapping.
++ *
++ * This comes in three flavors:
++ *
++ * 1) kmap/kunmap:
++ *
++ * An interface to acquire longer term mappings with no restrictions
++ * on preemption and migration. This comes with an overhead as the
++ * mapping space is restricted and protected by a global lock. It
++ * also requires global TLB invalidation when the kmap pool wraps.
++ *
++ * kmap() might block when the mapping space is fully utilized until a
++ * slot becomes available. Only callable from preemptible thread
++ * context.
++ *
++ * 2) kmap_local.*()/kunmap_local.*()
++ *
++ * An interface to acquire short term mappings. Can be invoked from any
++ * context including interrupts. The mapping is per thread, CPU local
++ * and not globaly visible. It can only be used in the context which
++ * acquried the mapping. Nesting kmap_local.*() and kmap_atomic.*()
++ * mappings is allowed to a certain extent (up to KMAP_TYPE_NR).
++ *
++ * Nested kmap_local.*() and kunmap_local.*() invocations have to be
++ * strictly ordered because the map implementation is stack based.
++ *
++ * kmap_local.*() disables migration, but keeps preemption enabled. It's
++ * valid to take pagefaults in a kmap_local region unless the context in
++ * which the local kmap is acquired does not allow it for other reasons.
++ *
++ * If a task holding local kmaps is preempted, the maps are removed on
++ * context switch and restored when the task comes back on the CPU. As
++ * the maps are strictly CPU local it is guaranteed that the task stays
++ * on the CPU and the CPU cannot be unplugged until the local kmaps are
++ * released.
++ *
++ * 3) kmap_atomic.*()/kunmap_atomic.*()
++ *
++ * Based on the same mechanism as kmap local. Atomic kmap disables
++ * preemption and pagefaults. Only use if absolutely required, use
++ * the corresponding kmap_local variant if possible.
++ *
++ * Local and atomic kmaps are faster than kmap/kunmap, but impose
++ * restrictions. Only use them when required.
++ *
++ * For !HIGHMEM enabled systems the kmap flavours are not doing any mapping
++ * operation and kmap() won't sleep, but the kmap local and atomic variants
++ * still disable migration resp. pagefaults and preemption.
+ */
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+@@ -122,6 +161,28 @@ static inline void __kunmap_atomic(void
+ kunmap_local_indexed(addr);
+ }
+
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ migrate_disable();
++ return __kmap_local_page_prot(page, prot);
++}
++
++static inline void *kmap_local_page(struct page *page)
++{
++ return kmap_local_page_prot(page, kmap_prot);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ migrate_disable();
++ return __kmap_local_pfn_prot(pfn, kmap_prot);
++}
++
++static inline void __kunmap_local(void *vaddr)
++{
++ kunmap_local_indexed(vaddr);
++}
++
+ /* declarations for linux/mm/highmem.c */
+ unsigned int nr_free_highpages(void);
+ extern atomic_long_t _totalhigh_pages;
+@@ -199,17 +260,34 @@ static inline void *kmap_atomic_pfn(unsi
+ return kmap_atomic(pfn_to_page(pfn));
+ }
+
+-static inline void __kunmap_atomic(void *addr)
++static inline void __kunmap_local(void *addr)
+ {
+- /*
+- * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+- * handles re-enabling faults and preemption
+- */
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+ kunmap_flush_on_unmap(addr);
+ #endif
+ }
+
++static inline void __kunmap_atomic(void *addr)
++{
++ __kunmap_local(addr);
++}
++
++static inline void *kmap_local_page(struct page *page)
++{
++ migrate_disable();
++ return page_address(page);
++}
++
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ return kmap_local_page(page);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ return kmap_local_page(pfn_to_page(pfn));
++}
++
+ #define kmap_flush_unused() do {} while(0)
+
+ #endif /* CONFIG_HIGHMEM */
+@@ -226,6 +304,13 @@ do { \
+ preempt_enable(); \
+ } while (0)
+
++#define kunmap_local(__addr) \
++do { \
++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
++ __kunmap_local(__addr); \
++ migrate_enable(); \
++} while (0)
++
+ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+ #ifndef clear_user_highpage
+ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
diff --git a/patches/0018-io-mapping-Provide-iomap_local-variant.patch b/patches/0018-io-mapping-Provide-iomap_local-variant.patch
new file mode 100644
index 000000000000..2f00fd9b99a3
--- /dev/null
+++ b/patches/0018-io-mapping-Provide-iomap_local-variant.patch
@@ -0,0 +1,69 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 29 Oct 2020 21:59:08 +0100
+Subject: [PATCH 18/18] io-mapping: Provide iomap_local variant
+
+Similar to kmap local provide a iomap local variant which only disables
+migration, but neither disables pagefaults nor preemption.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/io-mapping.h | 34 ++++++++++++++++++++++++++++++++--
+ 1 file changed, 32 insertions(+), 2 deletions(-)
+
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -83,6 +83,23 @@ io_mapping_unmap_atomic(void __iomem *va
+ }
+
+ static inline void __iomem *
++io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
++{
++ resource_size_t phys_addr;
++
++ BUG_ON(offset >= mapping->size);
++ phys_addr = mapping->base + offset;
++ migrate_disable();
++ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
++}
++
++static inline void io_mapping_unmap_local(void __iomem *vaddr)
++{
++ kunmap_local_indexed((void __force *)vaddr);
++ migrate_enable();
++}
++
++static inline void __iomem *
+ io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
+@@ -101,7 +118,7 @@ io_mapping_unmap(void __iomem *vaddr)
+ iounmap(vaddr);
+ }
+
+-#else
++#else /* HAVE_ATOMIC_IOMAP */
+
+ #include <linux/uaccess.h>
+
+@@ -166,7 +183,20 @@ io_mapping_unmap_atomic(void __iomem *va
+ preempt_enable();
+ }
+
+-#endif /* HAVE_ATOMIC_IOMAP */
++static inline void __iomem *
++io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
++{
++ migrate_disable();
++ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
++}
++
++static inline void io_mapping_unmap_local(void __iomem *vaddr)
++{
++ io_mapping_unmap(vaddr);
++ migrate_enable();
++}
++
++#endif /* !HAVE_ATOMIC_IOMAP */
+
+ static inline struct io_mapping *
+ io_mapping_create_wc(resource_size_t base,
diff --git a/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch b/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
index a19d4ea656a0..d2c3c57cbea5 100644
--- a/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
+++ b/patches/0022-locking-rtmutex-Use-custom-scheduling-function-for-s.patch
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4953,7 +4953,7 @@ pick_next_task(struct rq *rq, struct tas
+@@ -4963,7 +4963,7 @@ pick_next_task(struct rq *rq, struct tas
*
* WARNING: must be called with preemption disabled!
*/
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *prev, *next;
unsigned long *switch_count;
-@@ -5006,7 +5006,7 @@ static void __sched notrace __schedule(b
+@@ -5016,7 +5016,7 @@ static void __sched notrace __schedule(b
* - ptrace_{,un}freeze_traced() can change ->state underneath us.
*/
prev_state = prev->state;
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (signal_pending_state(prev_state, prev)) {
prev->state = TASK_RUNNING;
} else {
-@@ -5090,7 +5090,7 @@ void __noreturn do_task_dead(void)
+@@ -5100,7 +5100,7 @@ void __noreturn do_task_dead(void)
/* Tell freezer to ignore us: */
current->flags |= PF_NOFREEZE;
@@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
BUG();
/* Avoid "noreturn function does return" - but don't continue if BUG() is a NOP: */
-@@ -5123,9 +5123,6 @@ static inline void sched_submit_work(str
+@@ -5133,9 +5133,6 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -5151,7 +5148,7 @@ asmlinkage __visible void __sched schedu
+@@ -5161,7 +5158,7 @@ asmlinkage __visible void __sched schedu
sched_submit_work(tsk);
do {
preempt_disable();
@@ -166,7 +166,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sched_preempt_enable_no_resched();
} while (need_resched());
sched_update_worker(tsk);
-@@ -5179,7 +5176,7 @@ void __sched schedule_idle(void)
+@@ -5189,7 +5186,7 @@ void __sched schedule_idle(void)
*/
WARN_ON_ONCE(current->state);
do {
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} while (need_resched());
}
-@@ -5232,7 +5229,7 @@ static void __sched notrace preempt_sche
+@@ -5242,7 +5239,7 @@ static void __sched notrace preempt_sche
*/
preempt_disable_notrace();
preempt_latency_start(1);
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_latency_stop(1);
preempt_enable_no_resched_notrace();
-@@ -5262,6 +5259,19 @@ asmlinkage __visible void __sched notrac
+@@ -5272,6 +5269,19 @@ asmlinkage __visible void __sched notrac
NOKPROBE_SYMBOL(preempt_schedule);
EXPORT_SYMBOL(preempt_schedule);
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* preempt_schedule_notrace - preempt_schedule called by tracing
*
-@@ -5305,7 +5315,7 @@ asmlinkage __visible void __sched notrac
+@@ -5315,7 +5325,7 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
exception_exit(prev_ctx);
preempt_latency_stop(1);
-@@ -5334,7 +5344,7 @@ asmlinkage __visible void __sched preemp
+@@ -5344,7 +5354,7 @@ asmlinkage __visible void __sched preemp
do {
preempt_disable();
local_irq_enable();
diff --git a/patches/arm-enable-highmem-for-rt.patch b/patches/arm-enable-highmem-for-rt.patch
deleted file mode 100644
index 83fd112763dd..000000000000
--- a/patches/arm-enable-highmem-for-rt.patch
+++ /dev/null
@@ -1,160 +0,0 @@
-Subject: arm: Enable highmem for rt
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Wed, 13 Feb 2013 11:03:11 +0100
-
-fixup highmem for ARM.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/arm/include/asm/switch_to.h | 8 +++++
- arch/arm/mm/highmem.c | 53 ++++++++++++++++++++++++++++++++++-----
- include/linux/highmem.h | 1
- 3 files changed, 56 insertions(+), 6 deletions(-)
-
---- a/arch/arm/include/asm/switch_to.h
-+++ b/arch/arm/include/asm/switch_to.h
-@@ -4,6 +4,13 @@
-
- #include <linux/thread_info.h>
-
-+#if defined CONFIG_PREEMPT_RT && defined CONFIG_HIGHMEM
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
- /*
- * For v7 SMP cores running a preemptible kernel we may be pre-empted
- * during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(s
- #define switch_to(prev,next,last) \
- do { \
- __complete_pending_tlbi(); \
-+ switch_kmaps(prev, next); \
- last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
- } while (0)
-
---- a/arch/arm/mm/highmem.c
-+++ b/arch/arm/mm/highmem.c
-@@ -31,8 +31,14 @@ static inline pte_t get_fixmap_pte(unsig
- return *ptep;
- }
-
-+static unsigned int fixmap_idx(int type)
-+{
-+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+}
-+
- void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
- {
-+ pte_t pte = mk_pte(page, kmap_prot);
- unsigned int idx;
- unsigned long vaddr;
- void *kmap;
-@@ -53,7 +59,7 @@ void *kmap_atomic_high_prot(struct page
-
- type = kmap_atomic_idx_push();
-
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- /*
-@@ -62,12 +68,15 @@ void *kmap_atomic_high_prot(struct page
- */
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = pte;
-+#endif
- /*
- * When debugging is off, kunmap_atomic leaves the previous mapping
- * in place, so the contained TLB flush ensures the TLB is updated
- * with the new mapping.
- */
-- set_fixmap_pte(idx, mk_pte(page, prot));
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-@@ -80,10 +89,13 @@ void kunmap_atomic_high(void *kvaddr)
-
- if (kvaddr >= (void *)FIXADDR_START) {
- type = kmap_atomic_idx();
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
-
- if (cache_is_vivt())
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
- #else
-@@ -100,22 +112,51 @@ EXPORT_SYMBOL(kunmap_atomic_high);
-
- void *kmap_atomic_pfn(unsigned long pfn)
- {
-+ pte_t pte = pfn_pte(pfn, kmap_prot);
- unsigned long vaddr;
- int idx, type;
- struct page *page = pfn_to_page(pfn);
-
-- preempt_disable();
-+ migrate_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-
- type = kmap_atomic_idx_push();
-- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id();
-+ idx = fixmap_idx(type);
- vaddr = __fix_to_virt(idx);
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
- #endif
-- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_fixmap_pte(idx, pte);
-
- return (void *)vaddr;
- }
-+
-+#if defined CONFIG_PREEMPT_RT
-+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = fixmap_idx(i);
-+
-+ set_fixmap_pte(idx, __pte(0));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = fixmap_idx(i);
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_fixmap_pte(idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#endif
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -8,6 +8,7 @@
- #include <linux/mm.h>
- #include <linux/uaccess.h>
- #include <linux/hardirq.h>
-+#include <linux/sched.h>
-
- #include <asm/cacheflush.h>
-
diff --git a/patches/arm-highmem-flush-tlb-on-unmap.patch b/patches/arm-highmem-flush-tlb-on-unmap.patch
deleted file mode 100644
index 1db1dd614e83..000000000000
--- a/patches/arm-highmem-flush-tlb-on-unmap.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 11 Mar 2013 21:37:27 +0100
-Subject: arm/highmem: Flush tlb on unmap
-
-The tlb should be flushed on unmap and thus make the mapping entry
-invalid. This is only done in the non-debug case which does not look
-right.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/mm/highmem.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/arm/mm/highmem.c
-+++ b/arch/arm/mm/highmem.c
-@@ -86,10 +86,10 @@ void kunmap_atomic_high(void *kvaddr)
- __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
- #ifdef CONFIG_DEBUG_HIGHMEM
- BUG_ON(vaddr != __fix_to_virt(idx));
-- set_fixmap_pte(idx, __pte(0));
- #else
- (void) idx; /* to kill a warning */
- #endif
-+ set_fixmap_pte(idx, __pte(0));
- kmap_atomic_idx_pop();
- } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
- /* this address was obtained through kmap_high_get() */
diff --git a/patches/block-mq-Disable-preemption-in-blk_mq_complete_reque.patch b/patches/block-mq-Disable-preemption-in-blk_mq_complete_reque.patch
new file mode 100644
index 000000000000..2b296a5435ba
--- /dev/null
+++ b/patches/block-mq-Disable-preemption-in-blk_mq_complete_reque.patch
@@ -0,0 +1,31 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2020 19:23:14 +0100
+Subject: [PATCH] block-mq: Disable preemption in
+ blk_mq_complete_request_remote()
+
+There callers of blk_mq_complete_request() which invoke it in
+preemptible context.
+
+Disable preemption while an item is added on the local-CPU to ensure
+that the softirq is fired on the same CPU.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ block/blk-mq.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/block/blk-mq.c
++++ b/block/blk-mq.c
+@@ -644,9 +644,12 @@ bool blk_mq_complete_request_remote(stru
+ } else {
+ if (rq->q->nr_hw_queues > 1)
+ return false;
++
++ preempt_disable();
+ cpu_list = this_cpu_ptr(&blk_cpu_done);
+ if (llist_add(&rq->ipi_list, cpu_list))
+ raise_softirq(BLOCK_SOFTIRQ);
++ preempt_enable();
+ }
+
+ return true;
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 74111952e3f2..c24219d2f069 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -1572,14 +1572,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1575,14 +1575,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch b/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
new file mode 100644
index 000000000000..72c60a32e97b
--- /dev/null
+++ b/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -0,0 +1,114 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 30 Oct 2020 13:59:06 +0100
+Subject: [PATCH] highmem: Don't disable preemption on RT in kmap_atomic()
+
+Disabling preemption make it impossible to acquire sleeping locks within
+kmap_atomic() section.
+For PREEMPT_RT it is sufficient to disable migration.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem.h | 20 ++++++++++++++++----
+ include/linux/io-mapping.h | 20 ++++++++++++++++----
+ 2 files changed, 32 insertions(+), 8 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -139,7 +139,10 @@ static inline void kunmap(struct page *p
+ */
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_page_prot(page, prot);
+ }
+@@ -151,7 +154,10 @@ static inline void *kmap_atomic(struct p
+
+ static inline void *kmap_atomic_pfn(unsigned long pfn)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return __kmap_local_pfn_prot(pfn, kmap_prot);
+ }
+@@ -245,7 +251,10 @@ static inline void kunmap(struct page *p
+
+ static inline void *kmap_atomic(struct page *page)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return page_address(page);
+ }
+@@ -301,7 +310,10 @@ do { \
+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+ __kunmap_atomic(__addr); \
+ pagefault_enable(); \
+- preempt_enable(); \
++ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
++ migrate_enable(); \
++ else \
++ preempt_enable(); \
+ } while (0)
+
+ #define kunmap_local(__addr) \
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mappi
+
+ BUG_ON(offset >= mapping->size);
+ phys_addr = mapping->base + offset;
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+ }
+@@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *va
+ {
+ kunmap_local_indexed((void __force *)vaddr);
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ static inline void __iomem *
+@@ -170,7 +176,10 @@ static inline void __iomem *
+ io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset)
+ {
+- preempt_disable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_disable();
++ else
++ preempt_disable();
+ pagefault_disable();
+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+ }
+@@ -180,7 +189,10 @@ io_mapping_unmap_atomic(void __iomem *va
+ {
+ io_mapping_unmap(vaddr);
+ pagefault_enable();
+- preempt_enable();
++ if (IS_ENABLED(CONFIG_PREEMPT_RT))
++ migrate_enable();
++ else
++ preempt_enable();
+ }
+
+ static inline void __iomem *
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index d6ef3ca112db..1f58e19127f8 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
task_numa_free(tsk, true);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4232,15 +4232,6 @@ static struct rq *finish_task_switch(str
+@@ -4242,15 +4242,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index a02382e6df70..279489a1d145 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt1
++-rt2
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
deleted file mode 100644
index 8756cef5b6de..000000000000
--- a/patches/mips-disable-highmem-on-rt.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-Subject: mips: Disable highmem on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 18 Jul 2011 17:10:12 +0200
-
-The current highmem handling on -RT is not compatible and needs fixups.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/mips/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/mips/Kconfig
-+++ b/arch/mips/Kconfig
-@@ -2718,7 +2718,7 @@ config WAR_MIPS34K_MISSED_ITLB
- #
- config HIGHMEM
- bool "High Memory Support"
-- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
-+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT
-
- config CPU_SUPPORTS_HIGHMEM
- bool
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
deleted file mode 100644
index 152bbec84b34..000000000000
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ /dev/null
@@ -1,287 +0,0 @@
-Subject: mm, rt: kmap_atomic scheduling
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Thu, 28 Jul 2011 10:43:51 +0200
-
-In fact, with migrate_disable() existing one could play games with
-kmap_atomic. You could save/restore the kmap_atomic slots on context
-switch (if there are any in use of course), this should be esp easy now
-that we have a kmap_atomic stack.
-
-Something like the below.. it wants replacing all the preempt_disable()
-stuff with pagefault_disable() && migrate_disable() of course, but then
-you can flip kmaps around like below.
-
-Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-[dvhart@linux.intel.com: build fix]
-Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
-
-[tglx@linutronix.de: Get rid of the per cpu variable and store the idx
- and the pte content right away in the task struct.
- Shortens the context switch code. ]
----
- arch/x86/kernel/process_32.c | 32 ++++++++++++++++++++++++++++++++
- arch/x86/mm/highmem_32.c | 10 +++++++++-
- arch/x86/mm/iomap_32.c | 13 ++++++++++---
- include/linux/highmem.h | 33 ++++++++++++++++++++++++++-------
- include/linux/sched.h | 7 +++++++
- mm/highmem.c | 5 +++--
- 6 files changed, 87 insertions(+), 13 deletions(-)
-
---- a/arch/x86/kernel/process_32.c
-+++ b/arch/x86/kernel/process_32.c
-@@ -38,6 +38,7 @@
- #include <linux/io.h>
- #include <linux/kdebug.h>
- #include <linux/syscalls.h>
-+#include <linux/highmem.h>
-
- #include <asm/ldt.h>
- #include <asm/processor.h>
-@@ -126,6 +127,35 @@ start_thread(struct pt_regs *regs, unsig
- }
- EXPORT_SYMBOL_GPL(start_thread);
-
-+#ifdef CONFIG_PREEMPT_RT
-+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
-+{
-+ int i;
-+
-+ /*
-+ * Clear @prev's kmap_atomic mappings
-+ */
-+ for (i = 0; i < prev_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+ pte_t *ptep = kmap_pte - idx;
-+
-+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
-+ }
-+ /*
-+ * Restore @next_p's kmap_atomic mappings
-+ */
-+ for (i = 0; i < next_p->kmap_idx; i++) {
-+ int idx = i + KM_TYPE_NR * smp_processor_id();
-+
-+ if (!pte_none(next_p->kmap_pte[i]))
-+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
-+ }
-+}
-+#else
-+static inline void
-+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
-+#endif
-+
-
- /*
- * switch_to(x,y) should switch tasks from x to y.
-@@ -187,6 +217,8 @@ EXPORT_SYMBOL_GPL(start_thread);
-
- switch_to_extra(prev_p, next_p);
-
-+ switch_kmaps(prev_p, next_p);
-+
- /*
- * Leave lazy mode, flushing any hypercalls made here.
- * This must be done before restoring TLS segments so
---- a/arch/x86/mm/highmem_32.c
-+++ b/arch/x86/mm/highmem_32.c
-@@ -8,12 +8,17 @@ void *kmap_atomic_high_prot(struct page
- {
- unsigned long vaddr;
- int idx, type;
-+ pte_t pte;
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR*smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte-idx)));
-- set_pte(kmap_pte-idx, mk_pte(page, prot));
-+ pte = mk_pte(page, prot);
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte-idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -50,6 +55,9 @@ void kunmap_atomic_high(void *kvaddr)
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- arch_flush_lazy_mmu_mode();
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -46,16 +46,20 @@ EXPORT_SYMBOL_GPL(iomap_free);
-
- void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
- {
-+ pte_t pte = pfn_pte(pfn, prot);
- unsigned long vaddr;
- int idx, type;
-
-- preempt_disable();
-+ migrate_disable();
- pagefault_disable();
-
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-- set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = pte;
-+#endif
-+ set_pte(kmap_pte - idx, pte);
- arch_flush_lazy_mmu_mode();
-
- return (void *)vaddr;
-@@ -106,11 +110,14 @@ iounmap_atomic(void __iomem *kvaddr)
- * is a bad idea also, in case the page changes cacheability
- * attributes or becomes a protected page in a hypervisor.
- */
-+#ifdef CONFIG_PREEMPT_RT
-+ current->kmap_pte[type] = __pte(0);
-+#endif
- kpte_clear_flush(kmap_pte-idx, vaddr);
- kmap_atomic_idx_pop();
- }
-
- pagefault_enable();
-- preempt_enable();
-+ migrate_enable();
- }
- EXPORT_SYMBOL_GPL(iounmap_atomic);
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -83,7 +83,7 @@ static inline void kunmap(struct page *p
- */
- static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-- preempt_disable();
-+ migrate_disable();
- pagefault_disable();
- if (!PageHighMem(page))
- return page_address(page);
-@@ -153,7 +153,7 @@ static inline void kunmap(struct page *p
-
- static inline void *kmap_atomic(struct page *page)
- {
-- preempt_disable();
-+ migrate_disable();
- pagefault_disable();
- return page_address(page);
- }
-@@ -178,32 +178,51 @@ static inline void kunmap_atomic_high(vo
-
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
-
-+#ifndef CONFIG_PREEMPT_RT
- DECLARE_PER_CPU(int, __kmap_atomic_idx);
-+#endif
-
- static inline int kmap_atomic_idx_push(void)
- {
-+#ifndef CONFIG_PREEMPT_RT
- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-
--#ifdef CONFIG_DEBUG_HIGHMEM
-+# ifdef CONFIG_DEBUG_HIGHMEM
- WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
--#endif
-+# endif
- return idx;
-+#else
-+ current->kmap_idx++;
-+ BUG_ON(current->kmap_idx > KM_TYPE_NR);
-+ return current->kmap_idx - 1;
-+#endif
- }
-
- static inline int kmap_atomic_idx(void)
- {
-+#ifndef CONFIG_PREEMPT_RT
- return __this_cpu_read(__kmap_atomic_idx) - 1;
-+#else
-+ return current->kmap_idx - 1;
-+#endif
- }
-
- static inline void kmap_atomic_idx_pop(void)
- {
--#ifdef CONFIG_DEBUG_HIGHMEM
-+#ifndef CONFIG_PREEMPT_RT
-+# ifdef CONFIG_DEBUG_HIGHMEM
- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-
- BUG_ON(idx < 0);
--#else
-+# else
- __this_cpu_dec(__kmap_atomic_idx);
-+# endif
-+#else
-+ current->kmap_idx--;
-+# ifdef CONFIG_DEBUG_HIGHMEM
-+ BUG_ON(current->kmap_idx < 0);
-+# endif
- #endif
- }
-
-@@ -218,7 +237,7 @@ do {
- BUILD_BUG_ON(__same_type((addr), struct page *)); \
- kunmap_atomic_high(addr); \
- pagefault_enable(); \
-- preempt_enable(); \
-+ migrate_enable(); \
- } while (0)
-
-
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -34,6 +34,7 @@
- #include <linux/rseq.h>
- #include <linux/seqlock.h>
- #include <linux/kcsan.h>
-+#include <asm/kmap_types.h>
-
- /* task_struct member predeclarations (sorted alphabetically): */
- struct audit_context;
-@@ -1306,6 +1307,12 @@ struct task_struct {
- unsigned int sequential_io;
- unsigned int sequential_io_avg;
- #endif
-+#ifdef CONFIG_PREEMPT_RT
-+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
-+ int kmap_idx;
-+ pte_t kmap_pte[KM_TYPE_NR];
-+# endif
-+#endif
- #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
- #endif
---- a/mm/highmem.c
-+++ b/mm/highmem.c
-@@ -31,8 +31,11 @@
- #include <asm/tlbflush.h>
- #include <linux/vmalloc.h>
-
-+#ifndef CONFIG_PREEMPT_RT
- #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
- DEFINE_PER_CPU(int, __kmap_atomic_idx);
-+EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
-+#endif
- #endif
-
- /*
-@@ -108,8 +111,6 @@ static inline wait_queue_head_t *get_pkm
- atomic_long_t _totalhigh_pages __read_mostly;
- EXPORT_SYMBOL(_totalhigh_pages);
-
--EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx);
--
- unsigned int nr_free_highpages (void)
- {
- struct zone *zone;
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 62bf87e62cf9..3feb85349ffe 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -964,6 +964,10 @@ struct task_struct {
+@@ -972,6 +972,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int sas_ss_flags;
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -151,6 +151,14 @@ static unsigned long exit_to_user_mode_l
+@@ -152,6 +152,14 @@ static unsigned long exit_to_user_mode_l
if (ti_work & _TIF_NEED_RESCHED)
schedule();
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
deleted file mode 100644
index cb4ff3e5c7d2..000000000000
--- a/patches/power-disable-highmem-on-rt.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-Subject: powerpc: Disable highmem on RT
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Mon, 18 Jul 2011 17:08:34 +0200
-
-The current highmem handling on -RT is not compatible and needs fixups.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- arch/powerpc/Kconfig | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/powerpc/Kconfig
-+++ b/arch/powerpc/Kconfig
-@@ -409,7 +409,7 @@ menu "Kernel options"
-
- config HIGHMEM
- bool "High memory support"
-- depends on PPC32
-+ depends on PPC32 && !PREEMPT_RT
-
- source "kernel/Kconfig.hz"
-
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index d408628b6469..cec724bd7030 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -146,7 +146,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -410,8 +448,15 @@ extern void migrate_enable(void);
- #elif defined(CONFIG_PREEMPT_RT)
+ #else
-static inline void migrate_disable(void) { }
-static inline void migrate_enable(void) { }
@@ -160,11 +160,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_lazy_enable();
+}
- #else /* !CONFIG_PREEMPT_RT */
+ #endif /* CONFIG_SMP */
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1848,6 +1848,44 @@ static inline int test_tsk_need_resched(
+@@ -1850,6 +1850,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -305,7 +305,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -1760,6 +1802,7 @@ void migrate_disable(void)
+@@ -1758,6 +1800,7 @@ void migrate_disable(void)
preempt_disable();
this_rq()->nr_pinned++;
p->migration_disabled = 1;
@@ -313,7 +313,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
}
EXPORT_SYMBOL_GPL(migrate_disable);
-@@ -1788,6 +1831,7 @@ void migrate_enable(void)
+@@ -1786,6 +1829,7 @@ void migrate_enable(void)
barrier();
p->migration_disabled = 0;
this_rq()->nr_pinned--;
@@ -321,7 +321,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
trace_sched_migrate_enable_tp(p);
-@@ -3812,6 +3856,9 @@ int sched_fork(unsigned long clone_flags
+@@ -3804,6 +3848,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -5053,6 +5100,7 @@ static void __sched notrace __schedule(b
+@@ -5063,6 +5110,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -339,7 +339,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -5252,6 +5300,30 @@ static void __sched notrace preempt_sche
+@@ -5262,6 +5310,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPTION
/*
* This is the entry point to schedule() from in-kernel preemption
-@@ -5265,7 +5337,8 @@ asmlinkage __visible void __sched notrac
+@@ -5275,7 +5347,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -5305,6 +5378,9 @@ asmlinkage __visible void __sched notrac
+@@ -5315,6 +5388,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -390,7 +390,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -7143,7 +7219,9 @@ void init_idle(struct task_struct *idle,
+@@ -7153,7 +7229,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 3cc8696628ec..eb56dd23b97d 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -111,12 +111,8 @@ struct io_uring_task;
+@@ -112,12 +112,8 @@ struct io_uring_task;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
/*
-@@ -1845,6 +1841,51 @@ static inline int test_tsk_need_resched(
+@@ -1854,6 +1850,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2564,6 +2564,18 @@ int migrate_swap(struct task_struct *cur
+@@ -2560,6 +2560,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -2608,7 +2620,7 @@ unsigned long wait_task_inactive(struct
+@@ -2604,7 +2616,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -2623,7 +2635,8 @@ unsigned long wait_task_inactive(struct
+@@ -2619,7 +2631,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/rcu-Don-t-invoke-try_invoke_on_locked_down_task-with.patch b/patches/rcu-Don-t-invoke-try_invoke_on_locked_down_task-with.patch
new file mode 100644
index 000000000000..6b74a5d3d357
--- /dev/null
+++ b/patches/rcu-Don-t-invoke-try_invoke_on_locked_down_task-with.patch
@@ -0,0 +1,97 @@
+From: "Paul E. McKenney" <paulmck@kernel.org>
+Date: Thu, 24 Sep 2020 15:11:55 -0700
+Subject: [PATCH] rcu: Don't invoke try_invoke_on_locked_down_task() with
+ irqs disabled
+
+The try_invoke_on_locked_down_task() function requires that
+interrupts be enabled, but it is called with interrupts disabled from
+rcu_print_task_stall(), resulting in an "IRQs not enabled as expected"
+diagnostic. This commit therefore updates rcu_print_task_stall()
+to accumulate a list of the first few tasks while holding the current
+leaf rcu_node structure's ->lock, then releases that lock and only then
+uses try_invoke_on_locked_down_task() to attempt to obtain per-task
+detailed information. Of course, as soon as ->lock is released, the
+task might exit, so the get_task_struct() function is used to prevent
+the task structure from going away in the meantime.
+
+Link: https://lore.kernel.org/lkml/000000000000903d5805ab908fc4@google.com/
+Reported-by: syzbot+cb3b69ae80afd6535b0e@syzkaller.appspotmail.com
+Reported-by: syzbot+f04854e1c5c9e913cc27@syzkaller.appspotmail.com
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/rcu/tree_stall.h | 22 +++++++++++++++++-----
+ 1 file changed, 17 insertions(+), 5 deletions(-)
+
+--- a/kernel/rcu/tree_stall.h
++++ b/kernel/rcu/tree_stall.h
+@@ -249,13 +249,16 @@ static bool check_slow_task(struct task_
+
+ /*
+ * Scan the current list of tasks blocked within RCU read-side critical
+- * sections, printing out the tid of each.
++ * sections, printing out the tid of each of the first few of them.
+ */
+-static int rcu_print_task_stall(struct rcu_node *rnp)
++static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
++ __releases(rnp->lock)
+ {
++ int i = 0;
+ int ndetected = 0;
+ struct rcu_stall_chk_rdr rscr;
+ struct task_struct *t;
++ struct task_struct *ts[8];
+
+ if (!rcu_preempt_blocked_readers_cgp(rnp))
+ return 0;
+@@ -264,6 +267,14 @@ static int rcu_print_task_stall(struct r
+ t = list_entry(rnp->gp_tasks->prev,
+ struct task_struct, rcu_node_entry);
+ list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
++ get_task_struct(t);
++ ts[i++] = t;
++ if (i >= ARRAY_SIZE(ts))
++ break;
++ }
++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++ for (i--; i; i--) {
++ t = ts[i];
+ if (!try_invoke_on_locked_down_task(t, check_slow_task, &rscr))
+ pr_cont(" P%d", t->pid);
+ else
+@@ -273,6 +284,7 @@ static int rcu_print_task_stall(struct r
+ ".q"[rscr.rs.b.need_qs],
+ ".e"[rscr.rs.b.exp_hint],
+ ".l"[rscr.on_blkd_list]);
++ put_task_struct(t);
+ ndetected++;
+ }
+ pr_cont("\n");
+@@ -293,8 +305,9 @@ static void rcu_print_detail_task_stall_
+ * Because preemptible RCU does not exist, we never have to check for
+ * tasks blocked within RCU read-side critical sections.
+ */
+-static int rcu_print_task_stall(struct rcu_node *rnp)
++static int rcu_print_task_stall(struct rcu_node *rnp, unsigned long flags)
+ {
++ raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
+ return 0;
+ }
+ #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+@@ -472,7 +485,6 @@ static void print_other_cpu_stall(unsign
+ pr_err("INFO: %s detected stalls on CPUs/tasks:\n", rcu_state.name);
+ rcu_for_each_leaf_node(rnp) {
+ raw_spin_lock_irqsave_rcu_node(rnp, flags);
+- ndetected += rcu_print_task_stall(rnp);
+ if (rnp->qsmask != 0) {
+ for_each_leaf_node_possible_cpu(rnp, cpu)
+ if (rnp->qsmask & leaf_node_cpu_bit(rnp, cpu)) {
+@@ -480,7 +492,7 @@ static void print_other_cpu_stall(unsign
+ ndetected++;
+ }
+ }
+- raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
++ ndetected += rcu_print_task_stall(rnp, flags); // Releases rnp->lock.
+ }
+
+ for_each_possible_cpu(cpu)
diff --git a/patches/rt-local-irq-lock.patch b/patches/rt-local-irq-lock.patch
index 5cabef82aada..5a2744d22b2c 100644
--- a/patches/rt-local-irq-lock.patch
+++ b/patches/rt-local-irq-lock.patch
@@ -12,8 +12,8 @@ is held and the owner is preempted.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/local_lock_internal.h | 118 +++++++++++++++++++++++++++++++-----
- 1 file changed, 103 insertions(+), 15 deletions(-)
+ include/linux/local_lock_internal.h | 126 ++++++++++++++++++++++++++++++++----
+ 1 file changed, 113 insertions(+), 13 deletions(-)
--- a/include/linux/local_lock_internal.h
+++ b/include/linux/local_lock_internal.h
@@ -118,8 +118,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void local_lock_acquire(local_lock_t *l)
{
lock_map_acquire(&l->dep_map);
-@@ -55,26 +112,55 @@ static inline void local_lock_release(lo
+@@ -53,21 +110,50 @@ static inline void local_lock_acquire(lo
+ static inline void local_lock_release(local_lock_t *l) { }
+ #endif /* !CONFIG_DEBUG_LOCK_ALLOC */
++#ifdef CONFIG_PREEMPT_RT
++
#define __local_lock(lock) \
do { \
- preempt_disable(); \
@@ -133,8 +137,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ migrate_enable(); \
+ } while (0)
+
-+#ifdef CONFIG_PREEMPT_RT
-+
#define __local_lock_irq(lock) \
do { \
- local_irq_disable(); \
@@ -147,10 +149,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- local_irq_save(flags); \
+ migrate_disable(); \
+ flags = 0; \
- local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
--#define __local_unlock(lock) \
++ local_lock_acquire(this_cpu_ptr(lock)); \
++ } while (0)
++
+#define __local_unlock_irq(lock) \
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
@@ -158,14 +159,23 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ } while (0)
+
+#define __local_unlock_irqrestore(lock, flags) \
- do { \
- local_lock_release(this_cpu_ptr(lock)); \
-- preempt_enable(); \
++ do { \
++ local_lock_release(this_cpu_ptr(lock)); \
+ migrate_enable(); \
+ } while (0)
+
+#else
+
++#define __local_lock(lock) \
++ do { \
++ preempt_disable(); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
+ } while (0)
+
+@@ -77,6 +163,18 @@ static inline void local_lock_release(lo
+ preempt_enable(); \
+ } while (0)
+
+#define __local_lock_irq(lock) \
+ do { \
+ local_irq_disable(); \
@@ -176,10 +186,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ do { \
+ local_irq_save(flags); \
+ local_lock_acquire(this_cpu_ptr(lock)); \
- } while (0)
-
++ } while (0)
++
#define __local_unlock_irq(lock) \
-@@ -88,3 +174,5 @@ static inline void local_lock_release(lo
+ do { \
+ local_lock_release(this_cpu_ptr(lock)); \
+@@ -88,3 +186,5 @@ static inline void local_lock_release(lo
local_lock_release(this_cpu_ptr(lock)); \
local_irq_restore(flags); \
} while (0)
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 07813312c4f6..ff647402b2b4 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7852,7 +7852,7 @@ void __init sched_init(void)
+@@ -7862,7 +7862,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 278b14d358b6..fc553be015ee 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4220,9 +4220,13 @@ static struct rq *finish_task_switch(str
+@@ -4230,9 +4230,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -7240,6 +7244,7 @@ void sched_setnuma(struct task_struct *p
+@@ -7250,6 +7254,7 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/series b/patches/series
index 5836fb6dd9d0..fca447ae6c24 100644
--- a/patches/series
+++ b/patches/series
@@ -32,6 +32,33 @@
0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
0019-sched-Comment-affine_move_task.patch
+# 2020-10-29 23:18 Thomas Gleixner ( 113) [patch V2 00/18] mm/highmem: Preemptible variant of kmap_atomic & friend
+# 20201029221806.189523375@linutronix.de
+# + fixes
+0001-sched-Make-migrate_disable-enable-independent-of-RT.patch
+0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
+0003-highmem-Provide-generic-variant-of-kmap_atomic.patch
+0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch
+0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
+0015-io-mapping-Cleanup-atomic-iomap.patch
+0016-sched-highmem-Store-local-kmaps-in-task-struct.patch
+0017-mm-highmem-Provide-kmap_local.patch
+0018-io-mapping-Provide-iomap_local-variant.patch
+#
+highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
+
+# 8a26c219cafe66431da3350da1687a50f635f3c2
+rcu-Don-t-invoke-try_invoke_on_locked_down_task-with.patch
+
############################################################
# POSTED
############################################################
@@ -42,6 +69,7 @@ Use-CONFIG_PREEMPTION.patch
0001-blk-mq-Don-t-complete-on-a-remote-CPU-in-force-threa.patch
0002-blk-mq-Always-complete-remote-completions-requests-i.patch
0003-blk-mq-Use-llist_head-for-blk_cpu_done.patch
+block-mq-Disable-preemption-in-blk_mq_complete_reque.patch
# 20201028181041.xyeothhkouc3p4md@linutronix.de
lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch
@@ -334,12 +362,6 @@ x86-Enable-RT.patch
################################################################################
# KMAP/HIGHMEM
-mm-rt-kmap-atomic-scheduling.patch
-x86-highmem-add-a-already-used-pte-check.patch
-arm-highmem-flush-tlb-on-unmap.patch
-arm-enable-highmem-for-rt.patch
-# Rewrite as pagefault disabled is upstream splitted already. The problem comes
-# with the highmem pieces.
mm-scatterlist-dont-disable-irqs-on-RT.patch
# PREEMPT LAZY
@@ -380,13 +402,9 @@ ARM64-Allow-to-enable-RT.patch
# PowerPC
powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
-power-disable-highmem-on-rt.patch
powerpc-stackprotector-work-around-stack-guard-init-.patch
POWERPC-Allow-to-enable-RT.patch
-# MIPS
-mips-disable-highmem-on-rt.patch
-
# DRIVERS
# Postpone, disable
drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 61a1b37a122b..67b9b3560baf 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -956,6 +956,7 @@ struct task_struct {
+@@ -963,6 +963,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct __rcu *sighand;
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -2015,6 +2015,7 @@ static __latent_entropy struct task_stru
+@@ -2016,6 +2016,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/patches/softirq--Add-RT-variant.patch b/patches/softirq--Add-RT-variant.patch
index d2a36d19b0fc..76cbdb8c185d 100644
--- a/patches/softirq--Add-RT-variant.patch
+++ b/patches/softirq--Add-RT-variant.patch
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* The preempt_count offset after preempt_disable();
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1009,6 +1009,9 @@ struct task_struct {
+@@ -1017,6 +1017,9 @@ struct task_struct {
int softirq_context;
int irq_config;
#endif
diff --git a/patches/x86-Enable-RT-also-on-32bit.patch b/patches/x86-Enable-RT-also-on-32bit.patch
index bc10ed609c6f..b00d91a3827e 100644
--- a/patches/x86-Enable-RT-also-on-32bit.patch
+++ b/patches/x86-Enable-RT-also-on-32bit.patch
@@ -9,7 +9,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -25,7 +25,6 @@ config X86_64
+@@ -26,7 +26,6 @@ config X86_64
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA
-@@ -93,6 +92,7 @@ config X86
+@@ -94,6 +93,7 @@ config X86
select ARCH_SUPPORTS_ACPI
select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
diff --git a/patches/x86-Enable-RT.patch b/patches/x86-Enable-RT.patch
index ea8565518dbc..85676261018a 100644
--- a/patches/x86-Enable-RT.patch
+++ b/patches/x86-Enable-RT.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -25,6 +25,7 @@ config X86_64
+@@ -26,6 +26,7 @@ config X86_64
# Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
diff --git a/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch b/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
index 91233e18d04d..b307f49cb45f 100644
--- a/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
+++ b/patches/x86-entry-Use-should_resched-in-idtentry_exit_cond_r.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -361,7 +361,7 @@ void irqentry_exit_cond_resched(void)
+@@ -363,7 +363,7 @@ void irqentry_exit_cond_resched(void)
rcu_irq_exit_check_preempt();
if (IS_ENABLED(CONFIG_DEBUG_ENTRY))
WARN_ON_ONCE(!on_thread_stack());
diff --git a/patches/x86-highmem-add-a-already-used-pte-check.patch b/patches/x86-highmem-add-a-already-used-pte-check.patch
deleted file mode 100644
index a703fa27c667..000000000000
--- a/patches/x86-highmem-add-a-already-used-pte-check.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 11 Mar 2013 17:09:55 +0100
-Subject: x86/highmem: Add a "already used pte" check
-
-This is a copy from kmap_atomic_prot().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/mm/iomap_32.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/x86/mm/iomap_32.c
-+++ b/arch/x86/mm/iomap_32.c
-@@ -56,6 +56,8 @@ void *kmap_atomic_prot_pfn(unsigned long
- type = kmap_atomic_idx_push();
- idx = type + KM_TYPE_NR * smp_processor_id();
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-+ WARN_ON(!pte_none(*(kmap_pte - idx)));
-+
- #ifdef CONFIG_PREEMPT_RT
- current->kmap_pte[type] = pte;
- #endif
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 0b3d5f831f10..c6a0828f6c2b 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -209,6 +209,7 @@ config X86
+@@ -210,6 +210,7 @@ config X86
select HAVE_PCI
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/kernel/entry/common.c
+++ b/kernel/entry/common.c
-@@ -148,7 +148,7 @@ static unsigned long exit_to_user_mode_l
+@@ -149,7 +149,7 @@ static unsigned long exit_to_user_mode_l
local_irq_enable_exit_to_user(ti_work);