summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch (renamed from patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch)5
-rw-r--r--patches/0002-highmem-Remove-unused-functions.patch36
-rw-r--r--patches/0003-fs-Remove-asm-kmap_types.h-includes.patch41
-rw-r--r--patches/0004-sh-highmem-Remove-all-traces-of-unused-cruft.patch82
-rw-r--r--patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch108
-rw-r--r--patches/0005-asm-generic-Provide-kmap_size.h.patch60
-rw-r--r--patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch12
-rw-r--r--patches/0006-highmem-Provide-generic-variant-of-kmap_atomic.patch (renamed from patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch)81
-rw-r--r--patches/0007-highmem-Make-DEBUG_HIGHMEM-functional.patch54
-rw-r--r--patches/0008-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch (renamed from patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch)104
-rw-r--r--patches/0009-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch197
-rw-r--r--patches/0010-ARM-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch)62
-rw-r--r--patches/0011-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch)40
-rw-r--r--patches/0012-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch)28
-rw-r--r--patches/0013-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch)73
-rw-r--r--patches/0014-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch)44
-rw-r--r--patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch6
-rw-r--r--patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch)65
-rw-r--r--patches/0016-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch)64
-rw-r--r--patches/0017-mm-highmem-Provide-kmap_local.patch177
-rw-r--r--patches/0017-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch (renamed from patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch)54
-rw-r--r--patches/0018-highmem-Get-rid-of-kmap_types.h.patch159
-rw-r--r--patches/0018-io-mapping-Provide-iomap_local-variant.patch69
-rw-r--r--patches/0019-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch (renamed from patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch)73
-rw-r--r--patches/0020-io-mapping-Cleanup-atomic-iomap.patch (renamed from patches/0015-io-mapping-Cleanup-atomic-iomap.patch)5
-rw-r--r--patches/0021-Documentation-io-mapping-Remove-outdated-blurb.patch41
-rw-r--r--patches/0022-highmem-High-implementation-details-and-document-API.patch534
-rw-r--r--patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch (renamed from patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch)5
-rw-r--r--patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch (renamed from patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch)98
-rw-r--r--patches/0025-mm-highmem-Provide-kmap_local.patch196
-rw-r--r--patches/0026-io-mapping-Provide-iomap_local-variant.patch171
-rw-r--r--patches/0027-x86-crashdump-32-Simplify-copy_oldmem_page.patch92
-rw-r--r--patches/0028-mips-crashdump-Simplify-copy_oldmem_page.patch88
-rw-r--r--patches/0029-ARM-mm-Replace-kmap_atomic_pfn.patch62
-rw-r--r--patches/0030-highmem-Remove-kmap_atomic_pfn.patch40
-rw-r--r--patches/0031-drm-ttm-Replace-kmap_atomic-usage.patch67
-rw-r--r--patches/0032-drm-vmgfx-Replace-kmap_atomic.patch97
-rw-r--r--patches/0033-highmem-Remove-kmap_atomic_prot.patch45
-rw-r--r--patches/0034-drm-qxl-Replace-io_mapping_map_atomic_wc.patch242
-rw-r--r--patches/0035-drm-nouveau-device-Replace-io_mapping_map_atomic_wc.patch46
-rw-r--r--patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch160
-rw-r--r--patches/0037-io-mapping-Remove-io_mapping_map_atomic_wc.patch131
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch2
-rw-r--r--patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch87
-rw-r--r--patches/hrtimer-Allow-raw-wakeups-during-boot.patch2
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/preempt-lazy-support.patch18
-rw-r--r--patches/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch57
-rw-r--r--patches/rt-introduce-cpu-chill.patch2
-rw-r--r--patches/series64
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch2
-rw-r--r--patches/timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch52
54 files changed, 3454 insertions, 654 deletions
diff --git a/patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch b/patches/0001-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
index 2bdfa461d755..f267aaed9685 100644
--- a/patches/0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
+++ b/patches/0001-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 20:38:45 +0200
-Subject: [PATCH 02/18] mm/highmem: Un-EXPORT __kmap_atomic_idx()
+Date: Tue, 3 Nov 2020 10:27:13 +0100
+Subject: [PATCH 01/37] mm/highmem: Un-EXPORT __kmap_atomic_idx()
Nothing in modules can use that.
@@ -8,6 +8,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
mm/highmem.c | 2 --
1 file changed, 2 deletions(-)
diff --git a/patches/0002-highmem-Remove-unused-functions.patch b/patches/0002-highmem-Remove-unused-functions.patch
new file mode 100644
index 000000000000..8860fd43fbdd
--- /dev/null
+++ b/patches/0002-highmem-Remove-unused-functions.patch
@@ -0,0 +1,36 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:14 +0100
+Subject: [PATCH 02/37] highmem: Remove unused functions
+
+Nothing uses totalhigh_pages_dec() and totalhigh_pages_set().
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem.h | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -104,21 +104,11 @@ static inline void totalhigh_pages_inc(v
+ atomic_long_inc(&_totalhigh_pages);
+ }
+
+-static inline void totalhigh_pages_dec(void)
+-{
+- atomic_long_dec(&_totalhigh_pages);
+-}
+-
+ static inline void totalhigh_pages_add(long count)
+ {
+ atomic_long_add(count, &_totalhigh_pages);
+ }
+
+-static inline void totalhigh_pages_set(long val)
+-{
+- atomic_long_set(&_totalhigh_pages, val);
+-}
+-
+ void kmap_flush_unused(void);
+
+ struct page *kmap_to_page(void *addr);
diff --git a/patches/0003-fs-Remove-asm-kmap_types.h-includes.patch b/patches/0003-fs-Remove-asm-kmap_types.h-includes.patch
new file mode 100644
index 000000000000..524a83e3d6b4
--- /dev/null
+++ b/patches/0003-fs-Remove-asm-kmap_types.h-includes.patch
@@ -0,0 +1,41 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:15 +0100
+Subject: [PATCH 03/37] fs: Remove asm/kmap_types.h includes
+
+Historical leftovers from the time where kmap() had fixed slots.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Benjamin LaHaise <bcrl@kvack.org>
+Cc: linux-fsdevel@vger.kernel.org
+Cc: linux-aio@kvack.org
+Cc: Chris Mason <clm@fb.com>
+Cc: Josef Bacik <josef@toxicpanda.com>
+Cc: David Sterba <dsterba@suse.com>
+Cc: linux-btrfs@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ fs/aio.c | 1 -
+ fs/btrfs/ctree.h | 1 -
+ 2 files changed, 2 deletions(-)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -43,7 +43,6 @@
+ #include <linux/mount.h>
+ #include <linux/pseudo_fs.h>
+
+-#include <asm/kmap_types.h>
+ #include <linux/uaccess.h>
+ #include <linux/nospec.h>
+
+--- a/fs/btrfs/ctree.h
++++ b/fs/btrfs/ctree.h
+@@ -17,7 +17,6 @@
+ #include <linux/wait.h>
+ #include <linux/slab.h>
+ #include <trace/events/btrfs.h>
+-#include <asm/kmap_types.h>
+ #include <asm/unaligned.h>
+ #include <linux/pagemap.h>
+ #include <linux/btrfs.h>
diff --git a/patches/0004-sh-highmem-Remove-all-traces-of-unused-cruft.patch b/patches/0004-sh-highmem-Remove-all-traces-of-unused-cruft.patch
new file mode 100644
index 000000000000..9330c8b74ce4
--- /dev/null
+++ b/patches/0004-sh-highmem-Remove-all-traces-of-unused-cruft.patch
@@ -0,0 +1,82 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:16 +0100
+Subject: [PATCH 04/37] sh/highmem: Remove all traces of unused cruft
+
+For whatever reasons SH has highmem bits all over the place but does
+not enable it via Kconfig. Remove the bitrot.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/sh/include/asm/fixmap.h | 8 --------
+ arch/sh/include/asm/kmap_types.h | 15 ---------------
+ arch/sh/mm/init.c | 8 --------
+ 3 files changed, 31 deletions(-)
+ delete mode 100644 arch/sh/include/asm/kmap_types.h
+
+--- a/arch/sh/include/asm/fixmap.h
++++ b/arch/sh/include/asm/fixmap.h
+@@ -13,9 +13,6 @@
+ #include <linux/kernel.h>
+ #include <linux/threads.h>
+ #include <asm/page.h>
+-#ifdef CONFIG_HIGHMEM
+-#include <asm/kmap_types.h>
+-#endif
+
+ /*
+ * Here we define all the compile-time 'special' virtual
+@@ -53,11 +50,6 @@ enum fixed_addresses {
+ FIX_CMAP_BEGIN,
+ FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS) - 1,
+
+-#ifdef CONFIG_HIGHMEM
+- FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
+-#endif
+-
+ #ifdef CONFIG_IOREMAP_FIXED
+ /*
+ * FIX_IOREMAP entries are useful for mapping physical address
+--- a/arch/sh/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __SH_KMAP_TYPES_H
+-#define __SH_KMAP_TYPES_H
+-
+-/* Dummy header just to define km_type. */
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif
+--- a/arch/sh/mm/init.c
++++ b/arch/sh/mm/init.c
+@@ -362,9 +362,6 @@ void __init mem_init(void)
+ mem_init_print_info(NULL);
+ pr_info("virtual kernel memory layout:\n"
+ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+-#ifdef CONFIG_HIGHMEM
+- " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
+-#endif
+ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
+ " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n"
+ #ifdef CONFIG_UNCACHED_MAPPING
+@@ -376,11 +373,6 @@ void __init mem_init(void)
+ FIXADDR_START, FIXADDR_TOP,
+ (FIXADDR_TOP - FIXADDR_START) >> 10,
+
+-#ifdef CONFIG_HIGHMEM
+- PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
+- (LAST_PKMAP*PAGE_SIZE) >> 10,
+-#endif
+-
+ (unsigned long)VMALLOC_START, VMALLOC_END,
+ (VMALLOC_END - VMALLOC_START) >> 20,
+
diff --git a/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
deleted file mode 100644
index 61218ed26449..000000000000
--- a/patches/0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 11:14:55 +0200
-Subject: [PATCH 05/18] arc/mm/highmem: Use generic kmap atomic implementation
-
-Adopt the map ordering to match the other architectures and the generic
-code.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Cc: Vineet Gupta <vgupta@synopsys.com>
-Cc: linux-snps-arc@lists.infradead.org
----
- arch/arc/Kconfig | 1
- arch/arc/include/asm/highmem.h | 8 ++++++-
- arch/arc/mm/highmem.c | 44 -----------------------------------------
- 3 files changed, 9 insertions(+), 44 deletions(-)
-
---- a/arch/arc/Kconfig
-+++ b/arch/arc/Kconfig
-@@ -507,6 +507,7 @@ config LINUX_RAM_BASE
- config HIGHMEM
- bool "High Memory Support"
- select ARCH_DISCONTIGMEM_ENABLE
-+ select KMAP_LOCAL
- help
- With ARC 2G:2G address split, only upper 2G is directly addressable by
- kernel. Enable this to potentially allow access to rest of 2G and PAE
---- a/arch/arc/include/asm/highmem.h
-+++ b/arch/arc/include/asm/highmem.h
-@@ -15,7 +15,10 @@
- #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
- #define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
- #define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
--#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
-+
-+#define FIX_KMAP_BEGIN (0)
-+#define FIX_KMAP_END ((FIXMAP_SIZE >> PAGE_SHIFT) - 1)
-+#define FIXADDR_TOP (FIXMAP_BASE + FIXMAP_SIZE - PAGE_SIZE)
-
- /* start after fixmap area */
- #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
-@@ -29,6 +32,9 @@
-
- extern void kmap_init(void);
-
-+#define arch_kmap_local_post_unmap(vaddr) \
-+ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
-+
- static inline void flush_cache_kmaps(void)
- {
- flush_cache_all();
---- a/arch/arc/mm/highmem.c
-+++ b/arch/arc/mm/highmem.c
-@@ -47,48 +47,6 @@
- */
-
- extern pte_t * pkmap_page_table;
--static pte_t * fixmap_page_table;
--
--void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
--{
-- int idx, cpu_idx;
-- unsigned long vaddr;
--
-- cpu_idx = kmap_atomic_idx_push();
-- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
-- vaddr = FIXMAP_ADDR(idx);
--
-- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
-- mk_pte(page, prot));
--
-- return (void *)vaddr;
--}
--EXPORT_SYMBOL(kmap_atomic_high_prot);
--
--void kunmap_atomic_high(void *kv)
--{
-- unsigned long kvaddr = (unsigned long)kv;
--
-- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
--
-- /*
-- * Because preemption is disabled, this vaddr can be associated
-- * with the current allocated index.
-- * But in case of multiple live kmap_atomic(), it still relies on
-- * callers to unmap in right order.
-- */
-- int cpu_idx = kmap_atomic_idx();
-- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
--
-- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
--
-- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
-- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
--
-- kmap_atomic_idx_pop();
-- }
--}
--EXPORT_SYMBOL(kunmap_atomic_high);
-
- static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
- {
-@@ -113,5 +71,5 @@ void __init kmap_init(void)
- pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
-
- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
-- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
-+ alloc_kmap_pgtable(FIXMAP_BASE);
- }
diff --git a/patches/0005-asm-generic-Provide-kmap_size.h.patch b/patches/0005-asm-generic-Provide-kmap_size.h.patch
new file mode 100644
index 000000000000..def1d2e64ee0
--- /dev/null
+++ b/patches/0005-asm-generic-Provide-kmap_size.h.patch
@@ -0,0 +1,60 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:17 +0100
+Subject: [PATCH 05/37] asm-generic: Provide kmap_size.h
+
+kmap_types.h is a misnomer because the old atomic MAP based array does not
+exist anymore and the whole indirection of architectures including
+kmap_types.h is inconinstent and does not allow to provide guard page
+debugging for this misfeature.
+
+Add a common header file which defines the mapping stack size for all
+architectures. Will be used when converting architectures over to a
+generic kmap_local/atomic implementation.
+
+The array size is chosen with the following constraints in mind:
+
+ - The deepest nest level in one context is 3 according to code
+ inspection.
+
+ - The worst case nesting for the upcoming reemptible version would be:
+
+ 2 maps in task context and a fault inside
+ 2 maps in the fault handler
+ 3 maps in softirq
+ 2 maps in interrupt
+
+So a total of 16 is sufficient and probably overestimated.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/asm-generic/Kbuild | 1 +
+ include/asm-generic/kmap_size.h | 12 ++++++++++++
+ 2 files changed, 13 insertions(+)
+ create mode 100644 include/asm-generic/kmap_size.h
+
+--- a/include/asm-generic/Kbuild
++++ b/include/asm-generic/Kbuild
+@@ -31,6 +31,7 @@ mandatory-y += irq_regs.h
+ mandatory-y += irq_work.h
+ mandatory-y += kdebug.h
+ mandatory-y += kmap_types.h
++mandatory-y += kmap_size.h
+ mandatory-y += kprobes.h
+ mandatory-y += linkage.h
+ mandatory-y += local.h
+--- /dev/null
++++ b/include/asm-generic/kmap_size.h
+@@ -0,0 +1,12 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _ASM_GENERIC_KMAP_SIZE_H
++#define _ASM_GENERIC_KMAP_SIZE_H
++
++/* For debug this provides guard pages between the maps */
++#ifdef CONFIG_DEBUG_HIGHMEM
++# define KM_MAX_IDX 33
++#else
++# define KM_MAX_IDX 16
++#endif
++
++#endif
diff --git a/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
index d9c26d949f95..3a0331d178d0 100644
--- a/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
+++ b/patches/0005-locking-rtmutex-Handle-the-various-new-futex-race-co.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2146,6 +2146,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2148,6 +2148,16 @@ static int futex_requeue(u32 __user *uad
*/
requeue_pi_wake_futex(this, &key2, hb2);
continue;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -3172,7 +3182,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3174,7 +3184,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3224,20 +3234,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3226,20 +3236,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3246,7 +3291,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3248,7 +3293,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3257,7 +3303,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3259,7 +3305,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -128,7 +128,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3271,7 +3317,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3273,7 +3319,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
diff --git a/patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch b/patches/0006-highmem-Provide-generic-variant-of-kmap_atomic.patch
index fcbf4a88c524..1d7948f65312 100644
--- a/patches/0003-highmem-Provide-generic-variant-of-kmap_atomic.patch
+++ b/patches/0006-highmem-Provide-generic-variant-of-kmap_atomic.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 09:30:47 +0200
-Subject: [PATCH 03/18] highmem: Provide generic variant of kmap_atomic*
+Date: Tue, 3 Nov 2020 10:27:18 +0100
+Subject: [PATCH 06/37] highmem: Provide generic variant of kmap_atomic*
The kmap_atomic* interfaces in all architectures are pretty much the same
except for post map operations (flush) and pre- and post unmap operations.
@@ -10,11 +10,12 @@ Provide a generic variant for that.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/highmem.h | 79 ++++++++++++++++++++++++++------
+ include/linux/highmem.h | 82 ++++++++++++++++++++++-----
mm/Kconfig | 3 +
- mm/highmem.c | 118 +++++++++++++++++++++++++++++++++++++++++++++++-
- 3 files changed, 183 insertions(+), 17 deletions(-)
+ mm/highmem.c | 144 +++++++++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 211 insertions(+), 18 deletions(-)
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -89,7 +90,7 @@ Cc: linux-mm@kvack.org
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
-@@ -157,21 +200,28 @@ static inline void *kmap_atomic(struct p
+@@ -147,25 +190,33 @@ static inline void *kmap_atomic(struct p
pagefault_disable();
return page_address(page);
}
@@ -123,7 +124,20 @@ Cc: linux-mm@kvack.org
#define kmap_flush_unused() do {} while(0)
#endif /* CONFIG_HIGHMEM */
-@@ -213,15 +263,14 @@ static inline void kmap_atomic_idx_pop(v
+
++#if !defined(CONFIG_KMAP_LOCAL)
+ #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+
+ DECLARE_PER_CPU(int, __kmap_atomic_idx);
+@@ -196,22 +247,21 @@ static inline void kmap_atomic_idx_pop(v
+ __this_cpu_dec(__kmap_atomic_idx);
+ #endif
+ }
+-
++#endif
+ #endif
+
+ /*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
* kunmap_atomic() should get the return value of kmap_atomic, not the page.
*/
@@ -157,15 +171,19 @@ Cc: linux-mm@kvack.org
endmenu
--- a/mm/highmem.c
+++ b/mm/highmem.c
-@@ -30,6 +30,7 @@
- #include <linux/kgdb.h>
+@@ -31,9 +31,11 @@
#include <asm/tlbflush.h>
#include <linux/vmalloc.h>
-+#include <asm/fixmap.h>
++#ifndef CONFIG_KMAP_LOCAL
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
DEFINE_PER_CPU(int, __kmap_atomic_idx);
-@@ -365,9 +366,122 @@ void kunmap_high(struct page *page)
+ #endif
++#endif
+
+ /*
+ * Virtual_count is not a pure "count".
+@@ -365,9 +367,147 @@ void kunmap_high(struct page *page)
if (need_wakeup)
wake_up(pkmap_map_wait);
}
@@ -175,10 +193,35 @@ Cc: linux-mm@kvack.org
+#endif /* CONFIG_HIGHMEM */
+
+#ifdef CONFIG_KMAP_LOCAL
++
++#include <asm/kmap_size.h>
++
++static DEFINE_PER_CPU(int, __kmap_local_idx);
++
++static inline int kmap_local_idx_push(void)
++{
++ int idx = __this_cpu_inc_return(__kmap_local_idx) - 1;
++
++ WARN_ON_ONCE(in_irq() && !irqs_disabled());
++ BUG_ON(idx >= KM_MAX_IDX);
++ return idx;
++}
++
++static inline int kmap_local_idx(void)
++{
++ return __this_cpu_read(__kmap_local_idx) - 1;
++}
++
++static inline void kmap_local_idx_pop(void)
++{
++ int idx = __this_cpu_dec_return(__kmap_local_idx);
++
++ BUG_ON(idx < 0);
++}
++
+#ifndef arch_kmap_local_post_map
+# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
+#endif
-+
+#ifndef arch_kmap_local_pre_unmap
+# define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
+#endif
@@ -188,11 +231,11 @@ Cc: linux-mm@kvack.org
+#endif
+
+#ifndef arch_kmap_local_map_idx
-+#define arch_kmap_local_map_idx(type, pfn) kmap_local_calc_idx(type)
++#define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_unmap_idx
-+#define arch_kmap_local_unmap_idx(type, vaddr) kmap_local_calc_idx(type)
++#define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
+#endif
+
+#ifndef arch_kmap_local_high_get
@@ -211,9 +254,9 @@ Cc: linux-mm@kvack.org
+#endif
+}
+
-+static inline int kmap_local_calc_idx(int type)
++static inline int kmap_local_calc_idx(int idx)
+{
-+ return type + KM_TYPE_NR * smp_processor_id();
++ return idx + KM_MAX_IDX * smp_processor_id();
+}
+
+static pte_t *__kmap_pte;
@@ -232,7 +275,7 @@ Cc: linux-mm@kvack.org
+ int idx;
+
+ preempt_disable();
-+ idx = arch_kmap_local_map_idx(kmap_atomic_idx_push(), pfn);
++ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+ BUG_ON(!pte_none(*(kmap_pte - idx)));
+ pteval = pfn_pte(pfn, prot);
@@ -276,13 +319,13 @@ Cc: linux-mm@kvack.org
+ }
+
+ preempt_disable();
-+ idx = arch_kmap_local_unmap_idx(kmap_atomic_idx(), addr);
++ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
+ WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
+
+ arch_kmap_local_pre_unmap(addr);
+ pte_clear(&init_mm, addr, kmap_pte - idx);
+ arch_kmap_local_post_unmap(addr);
-+ kmap_atomic_idx_pop();
++ kmap_local_idx_pop();
+ preempt_enable();
+}
+EXPORT_SYMBOL(kunmap_local_indexed);
diff --git a/patches/0007-highmem-Make-DEBUG_HIGHMEM-functional.patch b/patches/0007-highmem-Make-DEBUG_HIGHMEM-functional.patch
new file mode 100644
index 000000000000..9735b2d349a5
--- /dev/null
+++ b/patches/0007-highmem-Make-DEBUG_HIGHMEM-functional.patch
@@ -0,0 +1,54 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:19 +0100
+Subject: [PATCH 07/37] highmem: Make DEBUG_HIGHMEM functional
+
+For some obscure reason when CONFIG_DEBUG_HIGHMEM is enabled the stack
+depth is increased from 20 to 41. But the only thing DEBUG_HIGHMEM does is
+to enable a few BUG_ON()'s in the mapping code.
+
+That's a leftover from the historical mapping code which had fixed entries
+for various purposes. DEBUG_HIGHMEM inserted guard mappings between the map
+types. But that got all ditched when kmap_atomic() switched to a stack
+based map management. Though the WITH_KM_FENCE magic survived without being
+functional. All the thing does today is to increase the stack depth.
+
+Add a working implementation to the generic kmap_local* implementation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ mm/highmem.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -376,9 +376,19 @@ EXPORT_SYMBOL(kunmap_high);
+
+ static DEFINE_PER_CPU(int, __kmap_local_idx);
+
++/*
++ * With DEBUG_HIGHMEM the stack depth is doubled and every second
++ * slot is unused which acts as a guard page
++ */
++#ifdef CONFIG_DEBUG_HIGHMEM
++# define KM_INCR 2
++#else
++# define KM_INCR 1
++#endif
++
+ static inline int kmap_local_idx_push(void)
+ {
+- int idx = __this_cpu_inc_return(__kmap_local_idx) - 1;
++ int idx = __this_cpu_add_return(__kmap_local_idx, KM_INCR) - 1;
+
+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
+ BUG_ON(idx >= KM_MAX_IDX);
+@@ -392,7 +402,7 @@ static inline int kmap_local_idx(void)
+
+ static inline void kmap_local_idx_pop(void)
+ {
+- int idx = __this_cpu_dec_return(__kmap_local_idx);
++ int idx = __this_cpu_sub_return(__kmap_local_idx, KM_INCR);
+
+ BUG_ON(idx < 0);
+ }
diff --git a/patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/patches/0008-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
index 2bc27f02ac5e..c4a8fc7269c5 100644
--- a/patches/0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+++ b/patches/0008-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
@@ -1,22 +1,28 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 10:42:27 +0200
-Subject: [PATCH 04/18] x86/mm/highmem: Use generic kmap atomic implementation
+Date: Tue, 3 Nov 2020 10:27:20 +0100
+Subject: [PATCH 08/37] x86/mm/highmem: Use generic kmap atomic implementation
Convert X86 to the generic kmap atomic implementation and make the
iomap_atomic() naming convention consistent while at it.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: x86@kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/Kconfig | 3 +-
- arch/x86/include/asm/fixmap.h | 1
- arch/x86/include/asm/highmem.h | 12 ++++++--
- arch/x86/include/asm/iomap.h | 18 ++++++------
- arch/x86/mm/highmem_32.c | 59 -----------------------------------------
- arch/x86/mm/init_32.c | 15 ----------
- arch/x86/mm/iomap_32.c | 59 +++--------------------------------------
- include/linux/io-mapping.h | 2 -
- 8 files changed, 27 insertions(+), 142 deletions(-)
+ arch/x86/Kconfig | 3 +
+ arch/x86/include/asm/fixmap.h | 5 +-
+ arch/x86/include/asm/highmem.h | 13 +++++--
+ arch/x86/include/asm/iomap.h | 18 +++++-----
+ arch/x86/include/asm/kmap_types.h | 13 -------
+ arch/x86/include/asm/paravirt_types.h | 1
+ arch/x86/mm/highmem_32.c | 59 ----------------------------------
+ arch/x86/mm/init_32.c | 15 --------
+ arch/x86/mm/iomap_32.c | 59 ++--------------------------------
+ include/linux/highmem.h | 2 -
+ include/linux/io-mapping.h | 2 -
+ mm/highmem.c | 2 -
+ 12 files changed, 31 insertions(+), 161 deletions(-)
+ delete mode 100644 arch/x86/include/asm/kmap_types.h
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -35,6 +41,24 @@ Cc: x86@kernel.org
def_bool y
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
+@@ -31,7 +31,7 @@
+ #include <asm/pgtable_types.h>
+ #ifdef CONFIG_X86_32
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #else
+ #include <uapi/asm/vsyscall.h>
+ #endif
+@@ -94,7 +94,7 @@ enum fixed_addresses {
+ #endif
+ #ifdef CONFIG_X86_32
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #ifdef CONFIG_PCI_MMCONFIG
+ FIX_PCIE_MCFG,
+ #endif
@@ -151,7 +151,6 @@ extern void reserve_top_address(unsigned
extern int fixmaps_set;
@@ -45,7 +69,15 @@ Cc: x86@kernel.org
void __native_set_fixmap(enum fixed_addresses idx, pte_t pte);
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
-@@ -58,11 +58,17 @@ extern unsigned long highstart_pfn, high
+@@ -23,7 +23,6 @@
+
+ #include <linux/interrupt.h>
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
+ #include <asm/tlbflush.h>
+ #include <asm/paravirt.h>
+ #include <asm/fixmap.h>
+@@ -58,11 +57,17 @@ extern unsigned long highstart_pfn, high
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
@@ -98,6 +130,32 @@ Cc: x86@kernel.org
+void iomap_free(resource_size_t base, unsigned long size);
#endif /* _ASM_X86_IOMAP_H */
+--- a/arch/x86/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_X86_KMAP_TYPES_H
+-#define _ASM_X86_KMAP_TYPES_H
+-
+-#if defined(CONFIG_X86_32) && defined(CONFIG_DEBUG_HIGHMEM)
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif /* _ASM_X86_KMAP_TYPES_H */
+--- a/arch/x86/include/asm/paravirt_types.h
++++ b/arch/x86/include/asm/paravirt_types.h
+@@ -41,7 +41,6 @@
+ #ifndef __ASSEMBLY__
+
+ #include <asm/desc_defs.h>
+-#include <asm/kmap_types.h>
+ #include <asm/pgtable_types.h>
+ #include <asm/nospec-branch.h>
+
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -4,65 +4,6 @@
@@ -270,6 +328,17 @@ Cc: x86@kernel.org
}
-EXPORT_SYMBOL_GPL(iounmap_atomic);
+EXPORT_SYMBOL_GPL(iomap_atomic_pfn_prot);
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -217,7 +217,7 @@ static inline void __kunmap_atomic(void
+ #endif /* CONFIG_HIGHMEM */
+
+ #if !defined(CONFIG_KMAP_LOCAL)
+-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
++#if defined(CONFIG_HIGHMEM)
+
+ DECLARE_PER_CPU(int, __kmap_atomic_idx);
+
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -69,7 +69,7 @@ io_mapping_map_atomic_wc(struct io_mappi
@@ -281,3 +350,14 @@ Cc: x86@kernel.org
}
static inline void
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -32,7 +32,7 @@
+ #include <linux/vmalloc.h>
+
+ #ifndef CONFIG_KMAP_LOCAL
+-#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
++#ifdef CONFIG_HIGHMEM
+ DEFINE_PER_CPU(int, __kmap_atomic_idx);
+ #endif
+ #endif
diff --git a/patches/0009-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch b/patches/0009-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
new file mode 100644
index 000000000000..f4b22100c17e
--- /dev/null
+++ b/patches/0009-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
@@ -0,0 +1,197 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:21 +0100
+Subject: [PATCH 09/37] arc/mm/highmem: Use generic kmap atomic implementation
+
+Adopt the map ordering to match the other architectures and the generic
+code. Also make the maximum entries limited and not dependend on the number
+of CPUs. With the original implementation did the following calculation:
+
+ nr_slots = mapsize >> PAGE_SHIFT;
+
+The results in either 512 or 1024 total slots depending on
+configuration. The total slots have to be divided by the number of CPUs to
+get the number of slots per CPU (former KM_TYPE_NR). ARC supports up to 4k
+CPUs, so this just falls apart in random ways depending on the number of
+CPUs and the actual kmap (atomic) nesting. The comment in highmem.c:
+
+ * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+ * slots across NR_CPUS would be more than sufficient (generic code defines
+ * KM_TYPE_NR as 20).
+
+is just wrong. KM_TYPE_NR (now KM_MAX_IDX) is the number of slots per CPU
+because kmap_local/atomic() needs to support nested mappings (thread,
+softirq, interrupt). While KM_MAX_IDX might be overestimated, the above
+reasoning is just wrong and clearly the highmem code was never tested with
+any system with more than a few CPUs.
+
+Use the default number of slots and fail the build when it does not
+fit. Randomly failing at runtime is not a really good option.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: linux-snps-arc@lists.infradead.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arc/Kconfig | 1
+ arch/arc/include/asm/highmem.h | 26 ++++++++++++++----
+ arch/arc/include/asm/kmap_types.h | 14 ---------
+ arch/arc/mm/highmem.c | 54 +++-----------------------------------
+ 4 files changed, 26 insertions(+), 69 deletions(-)
+ delete mode 100644 arch/arc/include/asm/kmap_types.h
+
+--- a/arch/arc/Kconfig
++++ b/arch/arc/Kconfig
+@@ -507,6 +507,7 @@ config LINUX_RAM_BASE
+ config HIGHMEM
+ bool "High Memory Support"
+ select ARCH_DISCONTIGMEM_ENABLE
++ select KMAP_LOCAL
+ help
+ With ARC 2G:2G address split, only upper 2G is directly addressable by
+ kernel. Enable this to potentially allow access to rest of 2G and PAE
+--- a/arch/arc/include/asm/highmem.h
++++ b/arch/arc/include/asm/highmem.h
+@@ -9,17 +9,29 @@
+ #ifdef CONFIG_HIGHMEM
+
+ #include <uapi/asm/page.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
++
++#define FIXMAP_SIZE PGDIR_SIZE
++#define PKMAP_SIZE PGDIR_SIZE
+
+ /* start after vmalloc area */
+ #define FIXMAP_BASE (PAGE_OFFSET - FIXMAP_SIZE - PKMAP_SIZE)
+-#define FIXMAP_SIZE PGDIR_SIZE /* only 1 PGD worth */
+-#define KM_TYPE_NR ((FIXMAP_SIZE >> PAGE_SHIFT)/NR_CPUS)
+-#define FIXMAP_ADDR(nr) (FIXMAP_BASE + ((nr) << PAGE_SHIFT))
++
++#define FIX_KMAP_SLOTS (KM_MAX_IDX * NR_CPUS)
++#define FIX_KMAP_BEGIN (0UL)
++#define FIX_KMAP_END ((FIX_KMAP_BEGIN + FIX_KMAP_SLOTS) - 1)
++
++#define FIXADDR_TOP (FIXMAP_BASE + (FIX_KMAP_END << PAGE_SHIFT))
++
++/*
++ * This should be converted to the asm-generic version, but of course this
++ * is needlessly different from all other architectures. Sigh - tglx
++ */
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x) (((FIXADDR_TOP - ((x) & PAGE_MASK))) >> PAGE_SHIFT)
+
+ /* start after fixmap area */
+ #define PKMAP_BASE (FIXMAP_BASE + FIXMAP_SIZE)
+-#define PKMAP_SIZE PGDIR_SIZE
+ #define LAST_PKMAP (PKMAP_SIZE >> PAGE_SHIFT)
+ #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+ #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
+@@ -29,11 +41,13 @@
+
+ extern void kmap_init(void);
+
++#define arch_kmap_local_post_unmap(vaddr) \
++ local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE)
++
+ static inline void flush_cache_kmaps(void)
+ {
+ flush_cache_all();
+ }
+-
+ #endif
+
+ #endif
+--- a/arch/arc/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-only */
+-/*
+- * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
+- */
+-
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-/*
+- * We primarily need to define KM_TYPE_NR here but that in turn
+- * is a function of PGDIR_SIZE etc.
+- * To avoid circular deps issue, put everything in asm/highmem.h
+- */
+-#endif
+--- a/arch/arc/mm/highmem.c
++++ b/arch/arc/mm/highmem.c
+@@ -36,9 +36,8 @@
+ * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
+ * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
+ *
+- * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
+- * slots across NR_CPUS would be more than sufficient (generic code defines
+- * KM_TYPE_NR as 20).
++ * - The fixed KMAP slots for kmap_local/atomic() require KM_MAX_IDX slots per
++ * CPU. So the number of CPUs sharing a single PTE page is limited.
+ *
+ * - pkmap being preemptible, in theory could do with more than 256 concurrent
+ * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
+@@ -47,48 +46,6 @@
+ */
+
+ extern pte_t * pkmap_page_table;
+-static pte_t * fixmap_page_table;
+-
+-void *kmap_atomic_high_prot(struct page *page, pgprot_t prot)
+-{
+- int idx, cpu_idx;
+- unsigned long vaddr;
+-
+- cpu_idx = kmap_atomic_idx_push();
+- idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+- vaddr = FIXMAP_ADDR(idx);
+-
+- set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
+- mk_pte(page, prot));
+-
+- return (void *)vaddr;
+-}
+-EXPORT_SYMBOL(kmap_atomic_high_prot);
+-
+-void kunmap_atomic_high(void *kv)
+-{
+- unsigned long kvaddr = (unsigned long)kv;
+-
+- if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
+-
+- /*
+- * Because preemption is disabled, this vaddr can be associated
+- * with the current allocated index.
+- * But in case of multiple live kmap_atomic(), it still relies on
+- * callers to unmap in right order.
+- */
+- int cpu_idx = kmap_atomic_idx();
+- int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
+-
+- WARN_ON(kvaddr != FIXMAP_ADDR(idx));
+-
+- pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
+- local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
+-
+- kmap_atomic_idx_pop();
+- }
+-}
+-EXPORT_SYMBOL(kunmap_atomic_high);
+
+ static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
+ {
+@@ -108,10 +65,9 @@ void __init kmap_init(void)
+ {
+ /* Due to recursive include hell, we can't do this in processor.h */
+ BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
++ BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
++ BUILD_BUG_ON(FIX_KMAP_SLOTS > PTRS_PER_PTE);
+
+- BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
+ pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
+-
+- BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
+- fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);
++ alloc_kmap_pgtable(FIXMAP_BASE);
+ }
diff --git a/patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0010-ARM-highmem-Switch-to-generic-kmap-atomic.patch
index 776381fcb798..6a1c6d38afdb 100644
--- a/patches/0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0010-ARM-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 12:05:18 +0200
-Subject: [PATCH 06/18] ARM: highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:22 +0100
+Subject: [PATCH 10/37] ARM: highmem: Switch to generic kmap atomic
No reason having the same code in every architecture.
@@ -8,12 +8,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: linux-arm-kernel@lists.infradead.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/arm/Kconfig | 1
- arch/arm/include/asm/highmem.h | 31 +++++++---
- arch/arm/mm/Makefile | 1
- arch/arm/mm/highmem.c | 121 -----------------------------------------
- 4 files changed, 23 insertions(+), 131 deletions(-)
+ arch/arm/Kconfig | 1
+ arch/arm/include/asm/fixmap.h | 4 -
+ arch/arm/include/asm/highmem.h | 34 +++++++---
+ arch/arm/include/asm/kmap_types.h | 10 ---
+ arch/arm/mm/Makefile | 1
+ arch/arm/mm/highmem.c | 121 --------------------------------------
+ 6 files changed, 27 insertions(+), 144 deletions(-)
+ delete mode 100644 arch/arm/include/asm/kmap_types.h
delete mode 100644 arch/arm/mm/highmem.c
--- a/arch/arm/Kconfig
@@ -26,9 +30,38 @@ Cc: linux-arm-kernel@lists.infradead.org
help
The address space of ARM processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
+--- a/arch/arm/include/asm/fixmap.h
++++ b/arch/arm/include/asm/fixmap.h
+@@ -7,14 +7,14 @@
+ #define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
+
+ #include <linux/pgtable.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+
+ enum fixed_addresses {
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_permanent_fixed_addresses,
+
+ FIX_KMAP_BEGIN = __end_of_permanent_fixed_addresses,
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+
+ /* Support writing RO kernel text via kprobes, jump labels, etc. */
+ FIX_TEXT_POKE0,
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
-@@ -46,19 +46,32 @@ extern pte_t *pkmap_page_table;
+@@ -2,7 +2,8 @@
+ #ifndef _ASM_HIGHMEM_H
+ #define _ASM_HIGHMEM_H
+
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
++#include <asm/fixmap.h>
+
+ #define PKMAP_BASE (PAGE_OFFSET - PMD_SIZE)
+ #define LAST_PKMAP PTRS_PER_PTE
+@@ -46,19 +47,32 @@ extern pte_t *pkmap_page_table;
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
@@ -70,6 +103,19 @@ Cc: linux-arm-kernel@lists.infradead.org
+ local_flush_tlb_kernel_page(vaddr)
#endif
+--- a/arch/arm/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,10 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef __ARM_KMAP_TYPES_H
+-#define __ARM_KMAP_TYPES_H
+-
+-/*
+- * This is the "bare minimum". AIO seems to require this.
+- */
+-#define KM_TYPE_NR 16
+-
+-#endif
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -19,7 +19,6 @@ obj-$(CONFIG_MODULES) += proc-syms.o
diff --git a/patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0011-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
index 3482405a77df..e6673a12600e 100644
--- a/patches/0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0011-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,17 +1,18 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 13:01:33 +0200
-Subject: [PATCH 07/18] csky/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:23 +0100
+Subject: [PATCH 11/37] csky/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-Acked-by: Guo Ren <guoren@kernel.org>
Cc: linux-csky@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/csky/Kconfig | 1
- arch/csky/include/asm/highmem.h | 4 +-
+ arch/csky/include/asm/fixmap.h | 4 +-
+ arch/csky/include/asm/highmem.h | 6 ++-
arch/csky/mm/highmem.c | 75 ----------------------------------------
- 3 files changed, 5 insertions(+), 75 deletions(-)
+ 4 files changed, 8 insertions(+), 78 deletions(-)
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -23,8 +24,37 @@ Cc: linux-csky@vger.kernel.org
default y
config FORCE_MAX_ZONEORDER
+--- a/arch/csky/include/asm/fixmap.h
++++ b/arch/csky/include/asm/fixmap.h
+@@ -8,7 +8,7 @@
+ #include <asm/memory.h>
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ enum fixed_addresses {
+@@ -17,7 +17,7 @@ enum fixed_addresses {
+ #endif
+ #ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #endif
+ __end_of_fixed_addresses
+ };
--- a/arch/csky/include/asm/highmem.h
+++ b/arch/csky/include/asm/highmem.h
+@@ -9,7 +9,7 @@
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
+ #include <linux/uaccess.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #include <asm/cache.h>
+
+ /* undef for production */
@@ -32,10 +32,12 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
diff --git a/patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0012-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
index dd4c0486ae6b..2e90d273b177 100644
--- a/patches/0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0012-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,18 +1,20 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 13:06:09 +0200
-Subject: [PATCH 08/18] microblaze/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:24 +0100
+Subject: [PATCH 12/37] microblaze/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Michal Simek <monstr@monstr.eu>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/microblaze/Kconfig | 1
+ arch/microblaze/include/asm/fixmap.h | 4 -
arch/microblaze/include/asm/highmem.h | 6 ++
arch/microblaze/mm/Makefile | 1
arch/microblaze/mm/highmem.c | 78 ----------------------------------
arch/microblaze/mm/init.c | 6 --
- 5 files changed, 6 insertions(+), 86 deletions(-)
+ 6 files changed, 8 insertions(+), 88 deletions(-)
delete mode 100644 arch/microblaze/mm/highmem.c
--- a/arch/microblaze/Kconfig
@@ -25,6 +27,26 @@ Cc: Michal Simek <monstr@monstr.eu>
help
The address space of Microblaze processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
+--- a/arch/microblaze/include/asm/fixmap.h
++++ b/arch/microblaze/include/asm/fixmap.h
+@@ -20,7 +20,7 @@
+ #include <asm/page.h>
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ #define FIXADDR_TOP ((unsigned long)(-PAGE_SIZE))
+@@ -47,7 +47,7 @@ enum fixed_addresses {
+ FIX_HOLE,
+ #ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * num_possible_cpus()) - 1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * num_possible_cpus()) - 1,
+ #endif
+ __end_of_fixed_addresses
+ };
--- a/arch/microblaze/include/asm/highmem.h
+++ b/arch/microblaze/include/asm/highmem.h
@@ -25,7 +25,6 @@
diff --git a/patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0013-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
index 696751587c72..33197d32608b 100644
--- a/patches/0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0013-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,18 +1,22 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 13:30:40 +0200
-Subject: [PATCH 09/18] mips/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:25 +0100
+Subject: [PATCH 13/37] mips/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: linux-mips@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/mips/Kconfig | 1
- arch/mips/include/asm/highmem.h | 4 +-
- arch/mips/mm/highmem.c | 77 ----------------------------------------
- arch/mips/mm/init.c | 3 -
- 4 files changed, 3 insertions(+), 82 deletions(-)
+ arch/mips/Kconfig | 1
+ arch/mips/include/asm/fixmap.h | 4 -
+ arch/mips/include/asm/highmem.h | 6 +-
+ arch/mips/include/asm/kmap_types.h | 13 ------
+ arch/mips/mm/highmem.c | 77 -------------------------------------
+ arch/mips/mm/init.c | 4 -
+ 6 files changed, 6 insertions(+), 99 deletions(-)
+ delete mode 100644 arch/mips/include/asm/kmap_types.h
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -24,8 +28,37 @@ Cc: linux-mips@vger.kernel.org
config CPU_SUPPORTS_HIGHMEM
bool
+--- a/arch/mips/include/asm/fixmap.h
++++ b/arch/mips/include/asm/fixmap.h
+@@ -17,7 +17,7 @@
+ #include <spaces.h>
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ /*
+@@ -52,7 +52,7 @@ enum fixed_addresses {
+ #ifdef CONFIG_HIGHMEM
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
+- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #endif
+ __end_of_fixed_addresses
+ };
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
+@@ -24,7 +24,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/uaccess.h>
+ #include <asm/cpu-features.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+
+ /* declarations for highmem.c */
+ extern unsigned long highstart_pfn, highend_pfn;
@@ -48,11 +48,11 @@ extern pte_t *pkmap_page_table;
#define ARCH_HAS_KMAP_FLUSH_TLB
@@ -40,6 +73,22 @@ Cc: linux-mips@vger.kernel.org
#endif /* __KERNEL__ */
+--- a/arch/mips/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -8,8 +8,6 @@
@@ -132,7 +181,15 @@ Cc: linux-mips@vger.kernel.org
-}
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
-@@ -402,9 +402,6 @@ void __init paging_init(void)
+@@ -36,7 +36,6 @@
+ #include <asm/cachectl.h>
+ #include <asm/cpu.h>
+ #include <asm/dma.h>
+-#include <asm/kmap_types.h>
+ #include <asm/maar.h>
+ #include <asm/mmu_context.h>
+ #include <asm/sections.h>
+@@ -402,9 +401,6 @@ void __init paging_init(void)
pagetable_init();
diff --git a/patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0014-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
index b696d1fafdf4..1ed58f2f462d 100644
--- a/patches/0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0014-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,19 +1,23 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 13:33:51 +0200
-Subject: [PATCH 10/18] nds32/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:26 +0100
+Subject: [PATCH 14/37] nds32/mm/highmem: Switch to generic kmap atomic
The mapping code is odd and looks broken. See FIXME in the comment.
+Also fix the harmless off by one in the FIX_KMAP_END define.
+
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Nick Hu <nickhu@andestech.com>
Cc: Greentime Hu <green.hu@gmail.com>
Cc: Vincent Chen <deanbo422@gmail.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/nds32/Kconfig.cpu | 1
- arch/nds32/include/asm/highmem.h | 21 +++++++++++++----
+ arch/nds32/include/asm/fixmap.h | 4 +--
+ arch/nds32/include/asm/highmem.h | 22 +++++++++++++----
arch/nds32/mm/Makefile | 1
arch/nds32/mm/highmem.c | 48 ---------------------------------------
- 4 files changed, 17 insertions(+), 54 deletions(-)
+ 5 files changed, 19 insertions(+), 57 deletions(-)
delete mode 100644 arch/nds32/mm/highmem.c
--- a/arch/nds32/Kconfig.cpu
@@ -26,9 +30,37 @@ Cc: Vincent Chen <deanbo422@gmail.com>
help
The address space of Andes processors is only 4 Gigabytes large
and it has to accommodate user address space, kernel address
+--- a/arch/nds32/include/asm/fixmap.h
++++ b/arch/nds32/include/asm/fixmap.h
+@@ -6,7 +6,7 @@
+
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ enum fixed_addresses {
+@@ -14,7 +14,7 @@ enum fixed_addresses {
+ FIX_KMAP_RESERVED,
+ FIX_KMAP_BEGIN,
+ #ifdef CONFIG_HIGHMEM
+- FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS),
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #endif
+ FIX_EARLYCON_MEM_BASE,
+ __end_of_fixed_addresses
--- a/arch/nds32/include/asm/highmem.h
+++ b/arch/nds32/include/asm/highmem.h
-@@ -45,11 +45,22 @@ extern pte_t *pkmap_page_table;
+@@ -5,7 +5,6 @@
+ #define _ASM_HIGHMEM_H
+
+ #include <asm/proc-fns.h>
+-#include <asm/kmap_types.h>
+ #include <asm/fixmap.h>
+
+ /*
+@@ -45,11 +44,22 @@ extern pte_t *pkmap_page_table;
extern void kmap_init(void);
/*
@@ -49,7 +81,7 @@ Cc: Vincent Chen <deanbo422@gmail.com>
+ __nds32__isb(); \
+ } while (0)
+
-+#define arch_kmap_local_pre_unmap(vaddr, pte) \
++#define arch_kmap_local_pre_unmap(vaddr) \
+ do { \
+ __nds32__tlbop_inv(vaddr); \
+ __nds32__isb(); \
diff --git a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
index f61b2556937f..cd0d0dde59f7 100644
--- a/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/0015-locking-rtmutex-add-sleeping-lock-implementation.patch
@@ -370,7 +370,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1539,13 +1540,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1541,13 +1542,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -387,7 +387,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2841,7 +2842,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2843,7 +2844,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -396,7 +396,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3203,7 +3204,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3205,7 +3206,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
diff --git a/patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
index 2f072ccc8ed1..dc44856c7bd2 100644
--- a/patches/0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 13:45:08 +0200
-Subject: [PATCH 11/18] powerpc/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:27 +0100
+Subject: [PATCH 15/37] powerpc/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture
@@ -9,13 +9,17 @@ Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: linuxppc-dev@lists.ozlabs.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/powerpc/Kconfig | 1
- arch/powerpc/include/asm/highmem.h | 6 ++-
- arch/powerpc/mm/Makefile | 1
- arch/powerpc/mm/highmem.c | 67 -------------------------------------
- arch/powerpc/mm/mem.c | 7 ---
- 5 files changed, 6 insertions(+), 76 deletions(-)
+ arch/powerpc/Kconfig | 1
+ arch/powerpc/include/asm/fixmap.h | 4 +-
+ arch/powerpc/include/asm/highmem.h | 7 ++-
+ arch/powerpc/include/asm/kmap_types.h | 13 ------
+ arch/powerpc/mm/Makefile | 1
+ arch/powerpc/mm/highmem.c | 67 ----------------------------------
+ arch/powerpc/mm/mem.c | 7 ---
+ 7 files changed, 8 insertions(+), 92 deletions(-)
+ delete mode 100644 arch/powerpc/include/asm/kmap_types.h
delete mode 100644 arch/powerpc/mm/highmem.c
--- a/arch/powerpc/Kconfig
@@ -28,9 +32,34 @@ Cc: linuxppc-dev@lists.ozlabs.org
source "kernel/Kconfig.hz"
+--- a/arch/powerpc/include/asm/fixmap.h
++++ b/arch/powerpc/include/asm/fixmap.h
+@@ -20,7 +20,7 @@
+ #include <asm/page.h>
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ #ifdef CONFIG_KASAN
+@@ -55,7 +55,7 @@ enum fixed_addresses {
+ FIX_EARLY_DEBUG_BASE = FIX_EARLY_DEBUG_TOP+(ALIGN(SZ_128K, PAGE_SIZE)/PAGE_SIZE)-1,
+ #ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
+- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++ FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_MAX_IDX * NR_CPUS) - 1,
+ #endif
+ #ifdef CONFIG_PPC_8xx
+ /* For IMMR we need an aligned 512K area */
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
-@@ -29,7 +29,6 @@
+@@ -24,12 +24,10 @@
+ #ifdef __KERNEL__
+
+ #include <linux/interrupt.h>
+-#include <asm/kmap_types.h>
+ #include <asm/cacheflush.h>
#include <asm/page.h>
#include <asm/fixmap.h>
@@ -38,7 +67,7 @@ Cc: linuxppc-dev@lists.ozlabs.org
extern pte_t *pkmap_page_table;
/*
-@@ -60,6 +59,11 @@ extern pte_t *pkmap_page_table;
+@@ -60,6 +58,11 @@ extern pte_t *pkmap_page_table;
#define flush_cache_kmaps() flush_cache_all()
@@ -50,6 +79,22 @@ Cc: linuxppc-dev@lists.ozlabs.org
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
+--- a/arch/powerpc/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0-or-later */
+-#ifndef _ASM_POWERPC_KMAP_TYPES_H
+-#define _ASM_POWERPC_KMAP_TYPES_H
+-
+-#ifdef __KERNEL__
+-
+-/*
+- */
+-
+-#define KM_TYPE_NR 16
+-
+-#endif /* __KERNEL__ */
+-#endif /* _ASM_POWERPC_KMAP_TYPES_H */
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_NEED_MULTIPLE_NODES) += num
diff --git a/patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0016-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
index 6dfc05892133..2e8ce37104c4 100644
--- a/patches/0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0016-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,19 +1,23 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 14:00:29 +0200
-Subject: [PATCH 12/18] sparc/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:28 +0100
+Subject: [PATCH 16/37] sparc/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: sparclinux@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/sparc/Kconfig | 1
- arch/sparc/include/asm/highmem.h | 7 +-
- arch/sparc/mm/Makefile | 3 -
- arch/sparc/mm/highmem.c | 115 ---------------------------------------
- arch/sparc/mm/srmmu.c | 2
- 5 files changed, 6 insertions(+), 122 deletions(-)
+ arch/sparc/Kconfig | 1
+ arch/sparc/include/asm/highmem.h | 8 +-
+ arch/sparc/include/asm/kmap_types.h | 11 ---
+ arch/sparc/include/asm/vaddrs.h | 4 -
+ arch/sparc/mm/Makefile | 3
+ arch/sparc/mm/highmem.c | 115 ------------------------------------
+ arch/sparc/mm/srmmu.c | 2
+ 7 files changed, 8 insertions(+), 136 deletions(-)
+ delete mode 100644 arch/sparc/include/asm/kmap_types.h
delete mode 100644 arch/sparc/mm/highmem.c
--- a/arch/sparc/Kconfig
@@ -28,7 +32,15 @@ Cc: sparclinux@vger.kernel.org
bool
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
-@@ -33,8 +33,6 @@ extern unsigned long highstart_pfn, high
+@@ -24,7 +24,6 @@
+ #include <linux/interrupt.h>
+ #include <linux/pgtable.h>
+ #include <asm/vaddrs.h>
+-#include <asm/kmap_types.h>
+ #include <asm/pgtsrmmu.h>
+
+ /* declarations for highmem.c */
+@@ -33,8 +32,6 @@ extern unsigned long highstart_pfn, high
#define kmap_prot __pgprot(SRMMU_ET_PTE | SRMMU_PRIV | SRMMU_CACHE)
extern pte_t *pkmap_page_table;
@@ -37,7 +49,7 @@ Cc: sparclinux@vger.kernel.org
/*
* Right now we initialize only a single pte table. It can be extended
* easily, subsequent pte tables have to be allocated in one physical
-@@ -53,6 +51,11 @@ void kmap_init(void) __init;
+@@ -53,6 +50,11 @@ void kmap_init(void) __init;
#define flush_cache_kmaps() flush_cache_all()
@@ -49,6 +61,38 @@ Cc: sparclinux@vger.kernel.org
#endif /* __KERNEL__ */
#endif /* _ASM_HIGHMEM_H */
+--- a/arch/sparc/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-/* Dummy header just to define km_type. None of this
+- * is actually used on sparc. -DaveM
+- */
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#endif
+--- a/arch/sparc/include/asm/vaddrs.h
++++ b/arch/sparc/include/asm/vaddrs.h
+@@ -32,13 +32,13 @@
+ #define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
+
+ #ifndef __ASSEMBLY__
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+
+ enum fixed_addresses {
+ FIX_HOLE,
+ #ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+- FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS),
++ FIX_KMAP_END = (KM_MAX_IDX * NR_CPUS),
+ #endif
+ __end_of_fixed_addresses
+ };
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -15,6 +15,3 @@ obj-$(CONFIG_SPARC32) += leon_mm.o
diff --git a/patches/0017-mm-highmem-Provide-kmap_local.patch b/patches/0017-mm-highmem-Provide-kmap_local.patch
deleted file mode 100644
index 405744c5dd58..000000000000
--- a/patches/0017-mm-highmem-Provide-kmap_local.patch
+++ /dev/null
@@ -1,177 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 21:29:41 +0200
-Subject: [PATCH 17/18] mm/highmem: Provide kmap_local*
-
-Now that the kmap atomic index is stored in task struct provide a
-preemptible variant. On context switch the maps of an outgoing task are
-removed and the map of the incoming task are restored. That's obviously
-slow, but highmem is slow anyway.
-
-The kmap_local.*() functions can be invoked from both preemptible and
-atomic context. kmap local sections disable migration to keep the resulting
-virtual mapping address correct, but disable neither pagefaults nor
-preemption.
-
-A wholesale conversion of kmap_atomic to be fully preemptible is not
-possible because some of the usage sites might rely on the preemption
-disable for serialization or on the implicit pagefault disable. Needs to be
-done on a case by case basis.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/highmem.h | 117 +++++++++++++++++++++++++++++++++++++++++-------
- 1 file changed, 101 insertions(+), 16 deletions(-)
-
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -86,17 +86,56 @@ static inline void kunmap(struct page *p
- }
-
- /*
-- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
-- * no global lock is needed and because the kmap code must perform a global TLB
-- * invalidation when the kmap pool wraps.
-- *
-- * However when holding an atomic kmap it is not legal to sleep, so atomic
-- * kmaps are appropriate for short, tight code paths only.
-- *
-- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
-- * gives a more generic (and caching) interface. But kmap_atomic can
-- * be used in IRQ contexts, so in some (very limited) cases we need
-- * it.
-+ * For highmem systems it is required to temporarily map pages
-+ * which reside in the portion of memory which is not covered
-+ * by the permanent kernel mapping.
-+ *
-+ * This comes in three flavors:
-+ *
-+ * 1) kmap/kunmap:
-+ *
-+ * An interface to acquire longer term mappings with no restrictions
-+ * on preemption and migration. This comes with an overhead as the
-+ * mapping space is restricted and protected by a global lock. It
-+ * also requires global TLB invalidation when the kmap pool wraps.
-+ *
-+ * kmap() might block when the mapping space is fully utilized until a
-+ * slot becomes available. Only callable from preemptible thread
-+ * context.
-+ *
-+ * 2) kmap_local.*()/kunmap_local.*()
-+ *
-+ * An interface to acquire short term mappings. Can be invoked from any
-+ * context including interrupts. The mapping is per thread, CPU local
-+ * and not globaly visible. It can only be used in the context which
-+ * acquried the mapping. Nesting kmap_local.*() and kmap_atomic.*()
-+ * mappings is allowed to a certain extent (up to KMAP_TYPE_NR).
-+ *
-+ * Nested kmap_local.*() and kunmap_local.*() invocations have to be
-+ * strictly ordered because the map implementation is stack based.
-+ *
-+ * kmap_local.*() disables migration, but keeps preemption enabled. It's
-+ * valid to take pagefaults in a kmap_local region unless the context in
-+ * which the local kmap is acquired does not allow it for other reasons.
-+ *
-+ * If a task holding local kmaps is preempted, the maps are removed on
-+ * context switch and restored when the task comes back on the CPU. As
-+ * the maps are strictly CPU local it is guaranteed that the task stays
-+ * on the CPU and the CPU cannot be unplugged until the local kmaps are
-+ * released.
-+ *
-+ * 3) kmap_atomic.*()/kunmap_atomic.*()
-+ *
-+ * Based on the same mechanism as kmap local. Atomic kmap disables
-+ * preemption and pagefaults. Only use if absolutely required, use
-+ * the corresponding kmap_local variant if possible.
-+ *
-+ * Local and atomic kmaps are faster than kmap/kunmap, but impose
-+ * restrictions. Only use them when required.
-+ *
-+ * For !HIGHMEM enabled systems the kmap flavours are not doing any mapping
-+ * operation and kmap() won't sleep, but the kmap local and atomic variants
-+ * still disable migration resp. pagefaults and preemption.
- */
- static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-@@ -122,6 +161,28 @@ static inline void __kunmap_atomic(void
- kunmap_local_indexed(addr);
- }
-
-+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
-+{
-+ migrate_disable();
-+ return __kmap_local_page_prot(page, prot);
-+}
-+
-+static inline void *kmap_local_page(struct page *page)
-+{
-+ return kmap_local_page_prot(page, kmap_prot);
-+}
-+
-+static inline void *kmap_local_pfn(unsigned long pfn)
-+{
-+ migrate_disable();
-+ return __kmap_local_pfn_prot(pfn, kmap_prot);
-+}
-+
-+static inline void __kunmap_local(void *vaddr)
-+{
-+ kunmap_local_indexed(vaddr);
-+}
-+
- /* declarations for linux/mm/highmem.c */
- unsigned int nr_free_highpages(void);
- extern atomic_long_t _totalhigh_pages;
-@@ -199,17 +260,34 @@ static inline void *kmap_atomic_pfn(unsi
- return kmap_atomic(pfn_to_page(pfn));
- }
-
--static inline void __kunmap_atomic(void *addr)
-+static inline void __kunmap_local(void *addr)
- {
-- /*
-- * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
-- * handles re-enabling faults and preemption
-- */
- #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
- kunmap_flush_on_unmap(addr);
- #endif
- }
-
-+static inline void __kunmap_atomic(void *addr)
-+{
-+ __kunmap_local(addr);
-+}
-+
-+static inline void *kmap_local_page(struct page *page)
-+{
-+ migrate_disable();
-+ return page_address(page);
-+}
-+
-+static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
-+{
-+ return kmap_local_page(page);
-+}
-+
-+static inline void *kmap_local_pfn(unsigned long pfn)
-+{
-+ return kmap_local_page(pfn_to_page(pfn));
-+}
-+
- #define kmap_flush_unused() do {} while(0)
-
- #endif /* CONFIG_HIGHMEM */
-@@ -226,6 +304,13 @@ do { \
- preempt_enable(); \
- } while (0)
-
-+#define kunmap_local(__addr) \
-+do { \
-+ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
-+ __kunmap_local(__addr); \
-+ migrate_enable(); \
-+} while (0)
-+
- /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
- #ifndef clear_user_highpage
- static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
diff --git a/patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch b/patches/0017-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
index 8bab507b8dc8..3c4c7ec907f7 100644
--- a/patches/0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
+++ b/patches/0017-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 14:04:36 +0200
-Subject: [PATCH 13/18] xtensa/mm/highmem: Switch to generic kmap atomic
+Date: Tue, 3 Nov 2020 10:27:29 +0100
+Subject: [PATCH 17/37] xtensa/mm/highmem: Switch to generic kmap atomic
No reason having the same code in every architecture
@@ -8,11 +8,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: Chris Zankel <chris@zankel.net>
Cc: Max Filippov <jcmvbkbc@gmail.com>
Cc: linux-xtensa@linux-xtensa.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/xtensa/Kconfig | 1
- arch/xtensa/include/asm/highmem.h | 9 +++++++
- arch/xtensa/mm/highmem.c | 44 +++-----------------------------------
- 3 files changed, 14 insertions(+), 40 deletions(-)
+ arch/xtensa/include/asm/fixmap.h | 4 +--
+ arch/xtensa/include/asm/highmem.h | 12 ++++++++-
+ arch/xtensa/mm/highmem.c | 46 ++++----------------------------------
+ 4 files changed, 18 insertions(+), 45 deletions(-)
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -24,9 +26,40 @@ Cc: linux-xtensa@linux-xtensa.org
help
Linux can use the full amount of RAM in the system by
default. However, the default MMUv2 setup only maps the
+--- a/arch/xtensa/include/asm/fixmap.h
++++ b/arch/xtensa/include/asm/fixmap.h
+@@ -16,7 +16,7 @@
+ #ifdef CONFIG_HIGHMEM
+ #include <linux/threads.h>
+ #include <linux/pgtable.h>
+-#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
+ #endif
+
+ /*
+@@ -39,7 +39,7 @@ enum fixed_addresses {
+ /* reserved pte's for temporary kernel mappings */
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = FIX_KMAP_BEGIN +
+- (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1,
++ (KM_MAX_IDX * NR_CPUS * DCACHE_N_COLORS) - 1,
+ #endif
+ __end_of_fixed_addresses
+ };
--- a/arch/xtensa/include/asm/highmem.h
+++ b/arch/xtensa/include/asm/highmem.h
-@@ -68,6 +68,15 @@ static inline void flush_cache_kmaps(voi
+@@ -16,9 +16,8 @@
+ #include <linux/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/fixmap.h>
+-#include <asm/kmap_types.h>
+
+-#define PKMAP_BASE ((FIXADDR_START - \
++#define PKMAP_BASE ((FIXADDR_START - \
+ (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK)
+ #define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS)
+ #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
+@@ -68,6 +67,15 @@ static inline void flush_cache_kmaps(voi
flush_cache_all();
}
@@ -53,7 +86,12 @@ Cc: linux-xtensa@linux-xtensa.org
#if DCACHE_WAY_SIZE > PAGE_SIZE
unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS];
wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS];
-@@ -37,55 +35,21 @@ static inline enum fixed_addresses kmap_
+@@ -33,59 +31,25 @@ static inline void kmap_waitqueues_init(
+
+ static inline enum fixed_addresses kmap_idx(int type, unsigned long color)
+ {
+- return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS +
++ return (type + KM_MAX_IDX * smp_processor_id()) * DCACHE_N_COLORS +
color;
}
@@ -72,7 +110,7 @@ Cc: linux-xtensa@linux-xtensa.org
- set_pte(kmap_pte + idx, mk_pte(page, prot));
-
- return (void *)vaddr;
-+ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT);
++ return kmap_idx(type, DCACHE_ALIAS(pfn << PAGE_SHIFT));
}
-EXPORT_SYMBOL(kmap_atomic_high_prot);
diff --git a/patches/0018-highmem-Get-rid-of-kmap_types.h.patch b/patches/0018-highmem-Get-rid-of-kmap_types.h.patch
new file mode 100644
index 000000000000..33b211b74751
--- /dev/null
+++ b/patches/0018-highmem-Get-rid-of-kmap_types.h.patch
@@ -0,0 +1,159 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:30 +0100
+Subject: [PATCH 18/37] highmem: Get rid of kmap_types.h
+
+The header is not longer used and on alpha, ia64, openrisc, parisc and um
+it was completely unused anyway as these architectures have no highmem
+support.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/alpha/include/asm/kmap_types.h | 15 ---------------
+ arch/ia64/include/asm/kmap_types.h | 13 -------------
+ arch/openrisc/mm/init.c | 1 -
+ arch/openrisc/mm/ioremap.c | 1 -
+ arch/parisc/include/asm/kmap_types.h | 13 -------------
+ arch/um/include/asm/fixmap.h | 1 -
+ arch/um/include/asm/kmap_types.h | 13 -------------
+ include/asm-generic/Kbuild | 1 -
+ include/asm-generic/kmap_types.h | 11 -----------
+ include/linux/highmem.h | 2 --
+ 10 files changed, 71 deletions(-)
+ delete mode 100644 arch/alpha/include/asm/kmap_types.h
+ delete mode 100644 arch/ia64/include/asm/kmap_types.h
+ delete mode 100644 arch/parisc/include/asm/kmap_types.h
+ delete mode 100644 arch/um/include/asm/kmap_types.h
+ delete mode 100644 include/asm-generic/kmap_types.h
+
+--- a/arch/alpha/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-/* Dummy header just to define km_type. */
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif
+--- a/arch/ia64/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_IA64_KMAP_TYPES_H
+-#define _ASM_IA64_KMAP_TYPES_H
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif /* _ASM_IA64_KMAP_TYPES_H */
+--- a/arch/openrisc/mm/init.c
++++ b/arch/openrisc/mm/init.c
+@@ -33,7 +33,6 @@
+ #include <asm/io.h>
+ #include <asm/tlb.h>
+ #include <asm/mmu_context.h>
+-#include <asm/kmap_types.h>
+ #include <asm/fixmap.h>
+ #include <asm/tlbflush.h>
+ #include <asm/sections.h>
+--- a/arch/openrisc/mm/ioremap.c
++++ b/arch/openrisc/mm/ioremap.c
+@@ -15,7 +15,6 @@
+ #include <linux/io.h>
+ #include <linux/pgtable.h>
+ #include <asm/pgalloc.h>
+-#include <asm/kmap_types.h>
+ #include <asm/fixmap.h>
+ #include <asm/bug.h>
+ #include <linux/sched.h>
+--- a/arch/parisc/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_KMAP_TYPES_H
+-#define _ASM_KMAP_TYPES_H
+-
+-#ifdef CONFIG_DEBUG_HIGHMEM
+-#define __WITH_KM_FENCE
+-#endif
+-
+-#include <asm-generic/kmap_types.h>
+-
+-#undef __WITH_KM_FENCE
+-
+-#endif
+--- a/arch/um/include/asm/fixmap.h
++++ b/arch/um/include/asm/fixmap.h
+@@ -3,7 +3,6 @@
+ #define __UM_FIXMAP_H
+
+ #include <asm/processor.h>
+-#include <asm/kmap_types.h>
+ #include <asm/archparam.h>
+ #include <asm/page.h>
+ #include <linux/threads.h>
+--- a/arch/um/include/asm/kmap_types.h
++++ /dev/null
+@@ -1,13 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-/*
+- * Copyright (C) 2002 Jeff Dike (jdike@karaya.com)
+- */
+-
+-#ifndef __UM_KMAP_TYPES_H
+-#define __UM_KMAP_TYPES_H
+-
+-/* No more #include "asm/arch/kmap_types.h" ! */
+-
+-#define KM_TYPE_NR 14
+-
+-#endif
+--- a/include/asm-generic/Kbuild
++++ b/include/asm-generic/Kbuild
+@@ -30,7 +30,6 @@ mandatory-y += irq.h
+ mandatory-y += irq_regs.h
+ mandatory-y += irq_work.h
+ mandatory-y += kdebug.h
+-mandatory-y += kmap_types.h
+ mandatory-y += kmap_size.h
+ mandatory-y += kprobes.h
+ mandatory-y += linkage.h
+--- a/include/asm-generic/kmap_types.h
++++ /dev/null
+@@ -1,11 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _ASM_GENERIC_KMAP_TYPES_H
+-#define _ASM_GENERIC_KMAP_TYPES_H
+-
+-#ifdef __WITH_KM_FENCE
+-# define KM_TYPE_NR 41
+-#else
+-# define KM_TYPE_NR 20
+-#endif
+-
+-#endif
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -29,8 +29,6 @@ static inline void invalidate_kernel_vma
+ }
+ #endif
+
+-#include <asm/kmap_types.h>
+-
+ /*
+ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
+ */
diff --git a/patches/0018-io-mapping-Provide-iomap_local-variant.patch b/patches/0018-io-mapping-Provide-iomap_local-variant.patch
deleted file mode 100644
index 2f00fd9b99a3..000000000000
--- a/patches/0018-io-mapping-Provide-iomap_local-variant.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 29 Oct 2020 21:59:08 +0100
-Subject: [PATCH 18/18] io-mapping: Provide iomap_local variant
-
-Similar to kmap local provide a iomap local variant which only disables
-migration, but neither disables pagefaults nor preemption.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/io-mapping.h | 34 ++++++++++++++++++++++++++++++++--
- 1 file changed, 32 insertions(+), 2 deletions(-)
-
---- a/include/linux/io-mapping.h
-+++ b/include/linux/io-mapping.h
-@@ -83,6 +83,23 @@ io_mapping_unmap_atomic(void __iomem *va
- }
-
- static inline void __iomem *
-+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
-+{
-+ resource_size_t phys_addr;
-+
-+ BUG_ON(offset >= mapping->size);
-+ phys_addr = mapping->base + offset;
-+ migrate_disable();
-+ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
-+}
-+
-+static inline void io_mapping_unmap_local(void __iomem *vaddr)
-+{
-+ kunmap_local_indexed((void __force *)vaddr);
-+ migrate_enable();
-+}
-+
-+static inline void __iomem *
- io_mapping_map_wc(struct io_mapping *mapping,
- unsigned long offset,
- unsigned long size)
-@@ -101,7 +118,7 @@ io_mapping_unmap(void __iomem *vaddr)
- iounmap(vaddr);
- }
-
--#else
-+#else /* HAVE_ATOMIC_IOMAP */
-
- #include <linux/uaccess.h>
-
-@@ -166,7 +183,20 @@ io_mapping_unmap_atomic(void __iomem *va
- preempt_enable();
- }
-
--#endif /* HAVE_ATOMIC_IOMAP */
-+static inline void __iomem *
-+io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
-+{
-+ migrate_disable();
-+ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
-+}
-+
-+static inline void io_mapping_unmap_local(void __iomem *vaddr)
-+{
-+ io_mapping_unmap(vaddr);
-+ migrate_enable();
-+}
-+
-+#endif /* !HAVE_ATOMIC_IOMAP */
-
- static inline struct io_mapping *
- io_mapping_create_wc(resource_size_t base,
diff --git a/patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch b/patches/0019-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
index 4c5f89936221..091950cdeb60 100644
--- a/patches/0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
+++ b/patches/0019-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
@@ -1,18 +1,19 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 14:58:47 +0200
-Subject: [PATCH 14/18] mm/highmem: Remove the old kmap_atomic cruft
+Date: Tue, 3 Nov 2020 10:27:31 +0100
+Subject: [PATCH 19/37] mm/highmem: Remove the old kmap_atomic cruft
All users gone.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/highmem.h | 61 ++----------------------------------------------
- mm/highmem.c | 28 ++++++++++++++++++----
- 2 files changed, 27 insertions(+), 62 deletions(-)
+ include/linux/highmem.h | 63 +++---------------------------------------------
+ mm/highmem.c | 7 -----
+ 2 files changed, 5 insertions(+), 65 deletions(-)
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
-@@ -88,31 +88,16 @@ static inline void kunmap(struct page *p
+@@ -86,31 +86,16 @@ static inline void kunmap(struct page *p
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
@@ -47,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static inline void *kmap_atomic_pfn(unsigned long pfn)
-@@ -127,13 +112,6 @@ static inline void __kunmap_atomic(void
+@@ -125,13 +110,6 @@ static inline void __kunmap_atomic(void
kunmap_local_indexed(addr);
}
@@ -61,11 +62,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* declarations for linux/mm/highmem.c */
unsigned int nr_free_highpages(void);
extern atomic_long_t _totalhigh_pages;
-@@ -226,39 +204,6 @@ static inline void __kunmap_atomic(void
+@@ -212,41 +190,8 @@ static inline void __kunmap_atomic(void
- #endif /* CONFIG_HIGHMEM */
+ #define kmap_flush_unused() do {} while(0)
--#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+-#endif /* CONFIG_HIGHMEM */
+-
+-#if !defined(CONFIG_KMAP_LOCAL)
+-#if defined(CONFIG_HIGHMEM)
-
-DECLARE_PER_CPU(int, __kmap_atomic_idx);
-
@@ -84,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-{
- return __this_cpu_read(__kmap_atomic_idx) - 1;
-}
--
+
-static inline void kmap_atomic_idx_pop(void)
-{
-#ifdef CONFIG_DEBUG_HIGHMEM
@@ -95,53 +99,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- __this_cpu_dec(__kmap_atomic_idx);
-#endif
-}
--
-#endif
--
+-#endif
++#endif /* CONFIG_HIGHMEM */
+
/*
* Prevent people trying to call kunmap_atomic() as if it were kunmap()
- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
--- a/mm/highmem.c
+++ b/mm/highmem.c
-@@ -32,10 +32,6 @@
+@@ -31,12 +31,6 @@
+ #include <asm/tlbflush.h>
#include <linux/vmalloc.h>
- #include <asm/fixmap.h>
--#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
+-#ifndef CONFIG_KMAP_LOCAL
+-#ifdef CONFIG_HIGHMEM
-DEFINE_PER_CPU(int, __kmap_atomic_idx);
-#endif
+-#endif
-
/*
* Virtual_count is not a pure "count".
* 0 means that it is not mapped, and has not been mapped
-@@ -370,6 +366,30 @@ EXPORT_SYMBOL(kunmap_high);
- #endif /* CONFIG_HIGHMEM */
-
- #ifdef CONFIG_KMAP_LOCAL
-+
-+static DEFINE_PER_CPU(int, __kmap_atomic_idx);
-+
-+static inline int kmap_atomic_idx_push(void)
-+{
-+ int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-+
-+ WARN_ON_ONCE(in_irq() && !irqs_disabled());
-+ BUG_ON(idx >= KM_TYPE_NR);
-+ return idx;
-+}
-+
-+static inline int kmap_atomic_idx(void)
-+{
-+ return __this_cpu_read(__kmap_atomic_idx) - 1;
-+}
-+
-+static inline void kmap_atomic_idx_pop(void)
-+{
-+ int idx = __this_cpu_dec_return(__kmap_atomic_idx);
-+
-+ BUG_ON(idx < 0);
-+}
-+
+@@ -410,6 +404,7 @@ static inline void kmap_local_idx_pop(vo
#ifndef arch_kmap_local_post_map
# define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
#endif
++
+ #ifndef arch_kmap_local_pre_unmap
+ # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
+ #endif
diff --git a/patches/0015-io-mapping-Cleanup-atomic-iomap.patch b/patches/0020-io-mapping-Cleanup-atomic-iomap.patch
index 99f5130a9781..8628d633d616 100644
--- a/patches/0015-io-mapping-Cleanup-atomic-iomap.patch
+++ b/patches/0020-io-mapping-Cleanup-atomic-iomap.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 29 Oct 2020 14:38:06 +0100
-Subject: [PATCH 15/18] io-mapping: Cleanup atomic iomap
+Date: Tue, 3 Nov 2020 10:27:32 +0100
+Subject: [PATCH 20/37] io-mapping: Cleanup atomic iomap
Switch the atomic iomap implementation over to kmap_local and stick the
preempt/pagefault mechanics into the generic code similar to the
@@ -9,6 +9,7 @@ kmap_atomic variants.
Rename the x86 map function in preparation for a non-atomic variant.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/include/asm/iomap.h | 9 +--------
arch/x86/mm/iomap_32.c | 6 ++----
diff --git a/patches/0021-Documentation-io-mapping-Remove-outdated-blurb.patch b/patches/0021-Documentation-io-mapping-Remove-outdated-blurb.patch
new file mode 100644
index 000000000000..40e7fb1a2176
--- /dev/null
+++ b/patches/0021-Documentation-io-mapping-Remove-outdated-blurb.patch
@@ -0,0 +1,41 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:33 +0100
+Subject: [PATCH 21/37] Documentation/io-mapping: Remove outdated blurb
+
+The implementation details in the documentation are outdated and not really
+helpful. Remove them.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/driver-api/io-mapping.rst | 22 ----------------------
+ 1 file changed, 22 deletions(-)
+
+--- a/Documentation/driver-api/io-mapping.rst
++++ b/Documentation/driver-api/io-mapping.rst
+@@ -73,25 +73,3 @@ for pages mapped with io_mapping_map_wc.
+ At driver close time, the io_mapping object must be freed::
+
+ void io_mapping_free(struct io_mapping *mapping)
+-
+-Current Implementation
+-======================
+-
+-The initial implementation of these functions uses existing mapping
+-mechanisms and so provides only an abstraction layer and no new
+-functionality.
+-
+-On 64-bit processors, io_mapping_create_wc calls ioremap_wc for the whole
+-range, creating a permanent kernel-visible mapping to the resource. The
+-map_atomic and map functions add the requested offset to the base of the
+-virtual address returned by ioremap_wc.
+-
+-On 32-bit processors with HIGHMEM defined, io_mapping_map_atomic_wc uses
+-kmap_atomic_pfn to map the specified page in an atomic fashion;
+-kmap_atomic_pfn isn't really supposed to be used with device pages, but it
+-provides an efficient mapping for this usage.
+-
+-On 32-bit processors without HIGHMEM defined, io_mapping_map_atomic_wc and
+-io_mapping_map_wc both use ioremap_wc, a terribly inefficient function which
+-performs an IPI to inform all processors about the new mapping. This results
+-in a significant performance penalty.
diff --git a/patches/0022-highmem-High-implementation-details-and-document-API.patch b/patches/0022-highmem-High-implementation-details-and-document-API.patch
new file mode 100644
index 000000000000..6c3ad48dae90
--- /dev/null
+++ b/patches/0022-highmem-High-implementation-details-and-document-API.patch
@@ -0,0 +1,534 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:34 +0100
+Subject: [PATCH 22/37] highmem: High implementation details and document API
+
+Move the gory details of kmap & al into a private header and only document
+the interfaces which are usable by drivers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 174 +++++++++++++++++++++++++
+ include/linux/highmem.h | 270 ++++++++++++++-------------------------
+ mm/highmem.c | 11 -
+ 3 files changed, 276 insertions(+), 179 deletions(-)
+ create mode 100644 include/linux/highmem-internal.h
+
+--- /dev/null
++++ b/include/linux/highmem-internal.h
+@@ -0,0 +1,174 @@
++/* SPDX-License-Identifier: GPL-2.0 */
++#ifndef _LINUX_HIGHMEM_INTERNAL_H
++#define _LINUX_HIGHMEM_INTERNAL_H
++
++/*
++ * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
++ */
++#ifdef CONFIG_KMAP_LOCAL
++void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
++void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
++void kunmap_local_indexed(void *vaddr);
++#endif
++
++#ifdef CONFIG_HIGHMEM
++#include <asm/highmem.h>
++
++#ifndef ARCH_HAS_KMAP_FLUSH_TLB
++static inline void kmap_flush_tlb(unsigned long addr) { }
++#endif
++
++#ifndef kmap_prot
++#define kmap_prot PAGE_KERNEL
++#endif
++
++void *kmap_high(struct page *page);
++void kunmap_high(struct page *page);
++void __kmap_flush_unused(void);
++struct page *__kmap_to_page(void *addr);
++
++static inline void *kmap(struct page *page)
++{
++ void *addr;
++
++ might_sleep();
++ if (!PageHighMem(page))
++ addr = page_address(page);
++ else
++ addr = kmap_high(page);
++ kmap_flush_tlb((unsigned long)addr);
++ return addr;
++}
++
++static inline void kunmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++static inline struct page *kmap_to_page(void *addr)
++{
++ return __kmap_to_page(addr);
++}
++
++static inline void kmap_flush_unused(void)
++{
++ __kmap_flush_unused();
++}
++
++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++{
++ preempt_disable();
++ pagefault_disable();
++ return __kmap_local_page_prot(page, prot);
++}
++
++static inline void *kmap_atomic(struct page *page)
++{
++ return kmap_atomic_prot(page, kmap_prot);
++}
++
++static inline void *kmap_atomic_pfn(unsigned long pfn)
++{
++ preempt_disable();
++ pagefault_disable();
++ return __kmap_local_pfn_prot(pfn, kmap_prot);
++}
++
++static inline void __kunmap_atomic(void *addr)
++{
++ kunmap_local_indexed(addr);
++ pagefault_enable();
++ preempt_enable();
++}
++
++unsigned int __nr_free_highpages(void);
++extern atomic_long_t _totalhigh_pages;
++
++static inline unsigned int nr_free_highpages(void)
++{
++ return __nr_free_highpages();
++}
++
++static inline unsigned long totalhigh_pages(void)
++{
++ return (unsigned long)atomic_long_read(&_totalhigh_pages);
++}
++
++static inline void totalhigh_pages_inc(void)
++{
++ atomic_long_inc(&_totalhigh_pages);
++}
++
++static inline void totalhigh_pages_add(long count)
++{
++ atomic_long_add(count, &_totalhigh_pages);
++}
++
++#else /* CONFIG_HIGHMEM */
++
++static inline struct page *kmap_to_page(void *addr)
++{
++ return virt_to_page(addr);
++}
++
++static inline void *kmap(struct page *page)
++{
++ might_sleep();
++ return page_address(page);
++}
++
++static inline void kunmap_high(struct page *page) { }
++static inline void kmap_flush_unused(void) { }
++
++static inline void kunmap(struct page *page)
++{
++#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
++ kunmap_flush_on_unmap(page_address(page));
++#endif
++}
++
++static inline void *kmap_atomic(struct page *page)
++{
++ preempt_disable();
++ pagefault_disable();
++ return page_address(page);
++}
++
++static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++{
++ return kmap_atomic(page);
++}
++
++static inline void *kmap_atomic_pfn(unsigned long pfn)
++{
++ return kmap_atomic(pfn_to_page(pfn));
++}
++
++static inline void __kunmap_atomic(void *addr)
++{
++#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
++ kunmap_flush_on_unmap(addr);
++#endif
++ pagefault_enable();
++ preempt_enable();
++}
++
++static inline unsigned int nr_free_highpages(void) { return 0; }
++static inline unsigned long totalhigh_pages(void) { return 0UL; }
++
++#endif /* CONFIG_HIGHMEM */
++
++/*
++ * Prevent people trying to call kunmap_atomic() as if it were kunmap()
++ * kunmap_atomic() should get the return value of kmap_atomic, not the page.
++ */
++#define kunmap_atomic(__addr) \
++do { \
++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
++ __kunmap_atomic(__addr); \
++} while (0)
++
++#endif
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -11,199 +11,125 @@
+
+ #include <asm/cacheflush.h>
+
+-#ifndef ARCH_HAS_FLUSH_ANON_PAGE
+-static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+-{
+-}
+-#endif
++#include "highmem-internal.h"
+
+-#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+-static inline void flush_kernel_dcache_page(struct page *page)
+-{
+-}
+-static inline void flush_kernel_vmap_range(void *vaddr, int size)
+-{
+-}
+-static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+-{
+-}
+-#endif
+-
+-/*
+- * Outside of CONFIG_HIGHMEM to support X86 32bit iomap_atomic() cruft.
++/**
++ * kmap - Map a page for long term usage
++ * @page: Pointer to the page to be mapped
++ *
++ * Returns: The virtual address of the mapping
++ *
++ * Can only be invoked from preemptible task context because on 32bit
++ * systems with CONFIG_HIGHMEM enabled this function might sleep.
++ *
++ * For systems with CONFIG_HIGHMEM=n and for pages in the low memory area
++ * this returns the virtual address of the direct kernel mapping.
++ *
++ * The returned virtual address is globally visible and valid up to the
++ * point where it is unmapped via kunmap(). The pointer can be handed to
++ * other contexts.
++ *
++ * For highmem pages on 32bit systems this can be slow as the mapping space
++ * is limited and protected by a global lock. In case that there is no
++ * mapping slot available the function blocks until a slot is released via
++ * kunmap().
+ */
+-#ifdef CONFIG_KMAP_LOCAL
+-void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
+-void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
+-void kunmap_local_indexed(void *vaddr);
+-#endif
+-
+-#ifdef CONFIG_HIGHMEM
+-#include <asm/highmem.h>
++static inline void *kmap(struct page *page);
+
+-#ifndef ARCH_HAS_KMAP_FLUSH_TLB
+-static inline void kmap_flush_tlb(unsigned long addr) { }
+-#endif
+-
+-#ifndef kmap_prot
+-#define kmap_prot PAGE_KERNEL
+-#endif
+-
+-void *kmap_high(struct page *page);
+-static inline void *kmap(struct page *page)
+-{
+- void *addr;
+-
+- might_sleep();
+- if (!PageHighMem(page))
+- addr = page_address(page);
+- else
+- addr = kmap_high(page);
+- kmap_flush_tlb((unsigned long)addr);
+- return addr;
+-}
+-
+-void kunmap_high(struct page *page);
+-
+-static inline void kunmap(struct page *page)
+-{
+- might_sleep();
+- if (!PageHighMem(page))
+- return;
+- kunmap_high(page);
+-}
+-
+-/*
+- * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
+- * no global lock is needed and because the kmap code must perform a global TLB
+- * invalidation when the kmap pool wraps.
+- *
+- * However when holding an atomic kmap it is not legal to sleep, so atomic
+- * kmaps are appropriate for short, tight code paths only.
+- *
+- * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
+- * gives a more generic (and caching) interface. But kmap_atomic can
+- * be used in IRQ contexts, so in some (very limited) cases we need
+- * it.
++/**
++ * kunmap - Unmap the virtual address mapped by kmap()
++ * @addr: Virtual address to be unmapped
++ *
++ * Counterpart to kmap(). A NOOP for CONFIG_HIGHMEM=n and for mappings of
++ * pages in the low memory area.
+ */
+-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+-{
+- preempt_disable();
+- pagefault_disable();
+- return __kmap_local_page_prot(page, prot);
+-}
+-
+-static inline void *kmap_atomic(struct page *page)
+-{
+- return kmap_atomic_prot(page, kmap_prot);
+-}
+-
+-static inline void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- preempt_disable();
+- pagefault_disable();
+- return __kmap_local_pfn_prot(pfn, kmap_prot);
+-}
+-
+-static inline void __kunmap_atomic(void *addr)
+-{
+- kunmap_local_indexed(addr);
+-}
+-
+-/* declarations for linux/mm/highmem.c */
+-unsigned int nr_free_highpages(void);
+-extern atomic_long_t _totalhigh_pages;
+-static inline unsigned long totalhigh_pages(void)
+-{
+- return (unsigned long)atomic_long_read(&_totalhigh_pages);
+-}
++static inline void kunmap(struct page *page);
+
+-static inline void totalhigh_pages_inc(void)
+-{
+- atomic_long_inc(&_totalhigh_pages);
+-}
+-
+-static inline void totalhigh_pages_add(long count)
+-{
+- atomic_long_add(count, &_totalhigh_pages);
+-}
+-
+-void kmap_flush_unused(void);
+-
+-struct page *kmap_to_page(void *addr);
+-
+-#else /* CONFIG_HIGHMEM */
++/**
++ * kmap_to_page - Get the page for a kmap'ed address
++ * @addr: The address to look up
++ *
++ * Returns: The page which is mapped to @addr.
++ */
++static inline struct page *kmap_to_page(void *addr);
+
+-static inline unsigned int nr_free_highpages(void) { return 0; }
++/**
++ * kmap_flush_unused - Flush all unused kmap mappings in order to
++ * remove stray mappings
++ */
++static inline void kmap_flush_unused(void);
+
+-static inline struct page *kmap_to_page(void *addr)
+-{
+- return virt_to_page(addr);
+-}
++/**
++ * kmap_atomic - Atomically map a page for temporary usage
++ * @page: Pointer to the page to be mapped
++ *
++ * Returns: The virtual address of the mapping
++ *
++ * Side effect: On return pagefaults and preemption are disabled.
++ *
++ * Can be invoked from any context.
++ *
++ * Requires careful handling when nesting multiple mappings because the map
++ * management is stack based. The unmap has to be in the reverse order of
++ * the map operation:
++ *
++ * addr1 = kmap_atomic(page1);
++ * addr2 = kmap_atomic(page2);
++ * ...
++ * kunmap_atomic(addr2);
++ * kunmap_atomic(addr1);
++ *
++ * Unmapping addr1 before addr2 is invalid and causes malfunction.
++ *
++ * Contrary to kmap() mappings the mapping is only valid in the context of
++ * the caller and cannot be handed to other contexts.
++ *
++ * On CONFIG_HIGHMEM=n kernels and for low memory pages this returns the
++ * virtual address of the direct mapping. Only real highmem pages are
++ * temporarily mapped.
++ *
++ * While it is significantly faster than kmap() it comes with restrictions
++ * about the pointer validity and the side effects of disabling page faults
++ * and preemption. Use it only when absolutely necessary, e.g. from non
++ * preemptible contexts.
++ */
++static inline void *kmap_atomic(struct page *page);
+
+-static inline unsigned long totalhigh_pages(void) { return 0UL; }
++/**
++ * kunmap_atomic - Unmap the virtual address mapped by kmap_atomic()
++ * @addr: Virtual address to be unmapped
++ *
++ * Counterpart to kmap_atomic().
++ *
++ * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
++ * preemption.
++ *
++ * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
++ * in the low memory area. For real highmen pages the mapping which was
++ * established with kmap_atomic() is destroyed.
++ */
+
+-static inline void *kmap(struct page *page)
+-{
+- might_sleep();
+- return page_address(page);
+-}
++/* Highmem related interfaces for management code */
++static inline unsigned int nr_free_highpages(void);
++static inline unsigned long totalhigh_pages(void);
+
+-static inline void kunmap_high(struct page *page)
++#ifndef ARCH_HAS_FLUSH_ANON_PAGE
++static inline void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
+ {
+ }
+-
+-static inline void kunmap(struct page *page)
+-{
+-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+- kunmap_flush_on_unmap(page_address(page));
+ #endif
+-}
+
+-static inline void *kmap_atomic(struct page *page)
++#ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
++static inline void flush_kernel_dcache_page(struct page *page)
+ {
+- preempt_disable();
+- pagefault_disable();
+- return page_address(page);
+ }
+-
+-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++static inline void flush_kernel_vmap_range(void *vaddr, int size)
+ {
+- return kmap_atomic(page);
+ }
+-
+-static inline void *kmap_atomic_pfn(unsigned long pfn)
++static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+ {
+- return kmap_atomic(pfn_to_page(pfn));
+ }
+-
+-static inline void __kunmap_atomic(void *addr)
+-{
+- /*
+- * Mostly nothing to do in the CONFIG_HIGHMEM=n case as kunmap_atomic()
+- * handles re-enabling faults and preemption
+- */
+-#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
+- kunmap_flush_on_unmap(addr);
+ #endif
+-}
+-
+-#define kmap_flush_unused() do {} while(0)
+-
+-
+-#endif /* CONFIG_HIGHMEM */
+-
+-/*
+- * Prevent people trying to call kunmap_atomic() as if it were kunmap()
+- * kunmap_atomic() should get the return value of kmap_atomic, not the page.
+- */
+-#define kunmap_atomic(__addr) \
+-do { \
+- BUILD_BUG_ON(__same_type((__addr), struct page *)); \
+- __kunmap_atomic(__addr); \
+- pagefault_enable(); \
+- preempt_enable(); \
+-} while (0)
+
+ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+ #ifndef clear_user_highpage
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -104,7 +104,7 @@ static inline wait_queue_head_t *get_pkm
+ atomic_long_t _totalhigh_pages __read_mostly;
+ EXPORT_SYMBOL(_totalhigh_pages);
+
+-unsigned int nr_free_highpages (void)
++unsigned int __nr_free_highpages (void)
+ {
+ struct zone *zone;
+ unsigned int pages = 0;
+@@ -141,7 +141,7 @@ pte_t * pkmap_page_table;
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+ #endif
+
+-struct page *kmap_to_page(void *vaddr)
++struct page *__kmap_to_page(void *vaddr)
+ {
+ unsigned long addr = (unsigned long)vaddr;
+
+@@ -152,7 +152,7 @@ struct page *kmap_to_page(void *vaddr)
+
+ return virt_to_page(addr);
+ }
+-EXPORT_SYMBOL(kmap_to_page);
++EXPORT_SYMBOL(__kmap_to_page);
+
+ static void flush_all_zero_pkmaps(void)
+ {
+@@ -194,10 +194,7 @@ static void flush_all_zero_pkmaps(void)
+ flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+ }
+
+-/**
+- * kmap_flush_unused - flush all unused kmap mappings in order to remove stray mappings
+- */
+-void kmap_flush_unused(void)
++void __kmap_flush_unused(void)
+ {
+ lock_kmap();
+ flush_all_zero_pkmaps();
diff --git a/patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
index 34317954e82a..5a4d24d17693 100644
--- a/patches/0001-sched-Make-migrate_disable-enable-independent-of-RT.patch
+++ b/patches/0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 22 Sep 2020 15:52:06 +0200
-Subject: [PATCH 01/18] sched: Make migrate_disable/enable() independent of RT
+Date: Tue, 3 Nov 2020 10:27:35 +0100
+Subject: [PATCH 23/37] sched: Make migrate_disable/enable() independent of RT
Now that the scheduler can deal with migrate disable properly, there is no
real compelling reason to make it only available for RT.
@@ -24,6 +24,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Ben Segall <bsegall@google.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Daniel Bristot de Oliveira <bristot@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/kernel.h | 21 ++++++++++++++-------
include/linux/preempt.h | 38 +++-----------------------------------
diff --git a/patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch b/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
index 4849c7ea2d4f..099b48159af5 100644
--- a/patches/0016-sched-highmem-Store-local-kmaps-in-task-struct.patch
+++ b/patches/0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Fri, 18 Sep 2020 09:27:11 +0200
-Subject: [PATCH 16/18] sched: highmem: Store local kmaps in task struct
+Date: Tue, 3 Nov 2020 10:27:36 +0100
+Subject: [PATCH 24/37] sched: highmem: Store local kmaps in task struct
Instead of storing the map per CPU provide and use per task storage. That
prepares for local kmaps which are preemptible.
@@ -18,18 +18,19 @@ Also add an assert into the return to user space code. Going back to user
space with an active kmap local is a nono.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/highmem.h | 10 +++++
- include/linux/sched.h | 9 ++++
- kernel/entry/common.c | 2 +
- kernel/fork.c | 1
- kernel/sched/core.c | 18 +++++++++
- mm/highmem.c | 96 +++++++++++++++++++++++++++++++++++++++++-------
- 6 files changed, 123 insertions(+), 13 deletions(-)
+ include/linux/highmem-internal.h | 10 +++
+ include/linux/sched.h | 9 +++
+ kernel/entry/common.c | 2
+ kernel/fork.c | 1
+ kernel/sched/core.c | 18 +++++++
+ mm/highmem.c | 99 +++++++++++++++++++++++++++++++++++----
+ 6 files changed, 129 insertions(+), 10 deletions(-)
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -38,6 +38,16 @@ static inline void invalidate_kernel_vma
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -9,6 +9,16 @@
void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot);
void *__kmap_local_page_prot(struct page *page, pgprot_t prot);
void kunmap_local_indexed(void *vaddr);
@@ -52,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/rseq.h>
#include <linux/seqlock.h>
#include <linux/kcsan.h>
-+#include <asm/kmap_types.h>
++#include <asm/kmap_size.h>
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
@@ -63,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+struct kmap_ctrl {
+#ifdef CONFIG_KMAP_LOCAL
+ int idx;
-+ pte_t pteval[KM_TYPE_NR];
++ pte_t pteval[KM_MAX_IDX];
+#endif
+};
+
@@ -149,49 +150,46 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/mm/highmem.c
+++ b/mm/highmem.c
-@@ -367,27 +367,24 @@ EXPORT_SYMBOL(kunmap_high);
+@@ -365,8 +365,6 @@ EXPORT_SYMBOL(kunmap_high);
- #ifdef CONFIG_KMAP_LOCAL
+ #include <asm/kmap_size.h>
--static DEFINE_PER_CPU(int, __kmap_atomic_idx);
+-static DEFINE_PER_CPU(int, __kmap_local_idx);
-
--static inline int kmap_atomic_idx_push(void)
-+static inline int kmap_local_idx_push(void)
- {
-- int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
-+ int idx = current->kmap_ctrl.idx++;
+ /*
+ * With DEBUG_HIGHMEM the stack depth is doubled and every second
+ * slot is unused which acts as a guard page
+@@ -379,23 +377,21 @@ static DEFINE_PER_CPU(int, __kmap_local_
+ static inline int kmap_local_idx_push(void)
+ {
+- int idx = __this_cpu_add_return(__kmap_local_idx, KM_INCR) - 1;
+-
WARN_ON_ONCE(in_irq() && !irqs_disabled());
- BUG_ON(idx >= KM_TYPE_NR);
- return idx;
+- BUG_ON(idx >= KM_MAX_IDX);
+- return idx;
++ current->kmap_ctrl.idx += KM_INCR;
++ BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
++ return current->kmap_ctrl.idx - 1;
}
--static inline int kmap_atomic_idx(void)
-+static inline int kmap_local_idx(void)
+ static inline int kmap_local_idx(void)
{
-- return __this_cpu_read(__kmap_atomic_idx) - 1;
+- return __this_cpu_read(__kmap_local_idx) - 1;
+ return current->kmap_ctrl.idx - 1;
}
--static inline void kmap_atomic_idx_pop(void)
-+static inline void kmap_local_idx_pop(void)
+ static inline void kmap_local_idx_pop(void)
{
-- int idx = __this_cpu_dec_return(__kmap_atomic_idx);
+- int idx = __this_cpu_sub_return(__kmap_local_idx, KM_INCR);
-
- BUG_ON(idx < 0);
-+ current->kmap_ctrl.idx--;
++ current->kmap_ctrl.idx -= KM_INCR;
+ BUG_ON(current->kmap_ctrl.idx < 0);
}
#ifndef arch_kmap_local_post_map
-@@ -447,12 +444,13 @@ void *__kmap_local_pfn_prot(unsigned lon
- int idx;
-
- preempt_disable();
-- idx = arch_kmap_local_map_idx(kmap_atomic_idx_push(), pfn);
-+ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
- vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
- BUG_ON(!pte_none(*(kmap_pte - idx)));
+@@ -461,6 +457,7 @@ void *__kmap_local_pfn_prot(unsigned lon
pteval = pfn_pte(pfn, prot);
set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
arch_kmap_local_post_map(vaddr, pteval);
@@ -199,20 +197,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
return (void *)vaddr;
-@@ -491,16 +489,88 @@ void kunmap_local_indexed(void *vaddr)
- }
-
- preempt_disable();
-- idx = arch_kmap_local_unmap_idx(kmap_atomic_idx(), addr);
-+ idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
- WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
-
+@@ -505,10 +502,92 @@ void kunmap_local_indexed(void *vaddr)
arch_kmap_local_pre_unmap(addr);
pte_clear(&init_mm, addr, kmap_pte - idx);
arch_kmap_local_post_unmap(addr);
-- kmap_atomic_idx_pop();
+ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
-+ kmap_local_idx_pop();
+ kmap_local_idx_pop();
preempt_enable();
}
EXPORT_SYMBOL(kunmap_local_indexed);
@@ -239,6 +229,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned long addr;
+ int idx;
+
++ /* With debug all even slots are unmapped and act as guard */
++ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
++ WARN_ON_ONCE(!pte_none(pteval));
++ continue;
++ }
+ if (WARN_ON_ONCE(pte_none(pteval)))
+ continue;
+
@@ -270,6 +265,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned long addr;
+ int idx;
+
++ /* With debug all even slots are unmapped and act as guard */
++ if (IS_ENABLED(CONFIG_DEBUG_HIGHMEM) && !(i & 0x01)) {
++ WARN_ON_ONCE(!pte_none(pteval));
++ continue;
++ }
+ if (WARN_ON_ONCE(pte_none(pteval)))
+ continue;
+
diff --git a/patches/0025-mm-highmem-Provide-kmap_local.patch b/patches/0025-mm-highmem-Provide-kmap_local.patch
new file mode 100644
index 000000000000..948a536b83af
--- /dev/null
+++ b/patches/0025-mm-highmem-Provide-kmap_local.patch
@@ -0,0 +1,196 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:37 +0100
+Subject: [PATCH 25/37] mm/highmem: Provide kmap_local*
+
+Now that the kmap atomic index is stored in task struct provide a
+preemptible variant. On context switch the maps of an outgoing task are
+removed and the map of the incoming task are restored. That's obviously
+slow, but highmem is slow anyway.
+
+The kmap_local.*() functions can be invoked from both preemptible and
+atomic context. kmap local sections disable migration to keep the resulting
+virtual mapping address correct, but disable neither pagefaults nor
+preemption.
+
+A wholesale conversion of kmap_atomic to be fully preemptible is not
+possible because some of the usage sites might rely on the preemption
+disable for serialization or on the implicit pagefault disable. Needs to be
+done on a case by case basis.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 48 +++++++++++++++++++++++++++++++++++++++
+ include/linux/highmem.h | 43 +++++++++++++++++++++-------------
+ mm/highmem.c | 6 ++++
+ 3 files changed, 81 insertions(+), 16 deletions(-)
+
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -68,6 +68,26 @@ static inline void kmap_flush_unused(voi
+ __kmap_flush_unused();
+ }
+
++static inline void *kmap_local_page(struct page *page)
++{
++ return __kmap_local_page_prot(page, kmap_prot);
++}
++
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ return __kmap_local_page_prot(page, prot);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ return __kmap_local_pfn_prot(pfn, kmap_prot);
++}
++
++static inline void __kunmap_local(void *vaddr)
++{
++ kunmap_local_indexed(vaddr);
++}
++
+ static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+ {
+ preempt_disable();
+@@ -140,6 +160,28 @@ static inline void kunmap(struct page *p
+ #endif
+ }
+
++static inline void *kmap_local_page(struct page *page)
++{
++ return page_address(page);
++}
++
++static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
++{
++ return kmap_local_page(page);
++}
++
++static inline void *kmap_local_pfn(unsigned long pfn)
++{
++ return kmap_local_page(pfn_to_page(pfn));
++}
++
++static inline void __kunmap_local(void *addr)
++{
++#ifdef ARCH_HAS_FLUSH_ON_KUNMAP
++ kunmap_flush_on_unmap(addr);
++#endif
++}
++
+ static inline void *kmap_atomic(struct page *page)
+ {
+ preempt_disable();
+@@ -181,4 +223,10 @@ do { \
+ __kunmap_atomic(__addr); \
+ } while (0)
+
++#define kunmap_local(__addr) \
++do { \
++ BUILD_BUG_ON(__same_type((__addr), struct page *)); \
++ __kunmap_local(__addr); \
++} while (0)
++
+ #endif
+--- a/include/linux/highmem.h
++++ b/include/linux/highmem.h
+@@ -60,24 +60,22 @@ static inline struct page *kmap_to_page(
+ static inline void kmap_flush_unused(void);
+
+ /**
+- * kmap_atomic - Atomically map a page for temporary usage
++ * kmap_local_page - Map a page for temporary usage
+ * @page: Pointer to the page to be mapped
+ *
+ * Returns: The virtual address of the mapping
+ *
+- * Side effect: On return pagefaults and preemption are disabled.
+- *
+ * Can be invoked from any context.
+ *
+ * Requires careful handling when nesting multiple mappings because the map
+ * management is stack based. The unmap has to be in the reverse order of
+ * the map operation:
+ *
+- * addr1 = kmap_atomic(page1);
+- * addr2 = kmap_atomic(page2);
++ * addr1 = kmap_local_page(page1);
++ * addr2 = kmap_local_page(page2);
+ * ...
+- * kunmap_atomic(addr2);
+- * kunmap_atomic(addr1);
++ * kunmap_local(addr2);
++ * kunmap_local(addr1);
+ *
+ * Unmapping addr1 before addr2 is invalid and causes malfunction.
+ *
+@@ -88,10 +86,26 @@ static inline void kmap_flush_unused(voi
+ * virtual address of the direct mapping. Only real highmem pages are
+ * temporarily mapped.
+ *
+- * While it is significantly faster than kmap() it comes with restrictions
+- * about the pointer validity and the side effects of disabling page faults
+- * and preemption. Use it only when absolutely necessary, e.g. from non
+- * preemptible contexts.
++ * While it is significantly faster than kmap() for the higmem case it
++ * comes with restrictions about the pointer validity. Only use when really
++ * necessary.
++ *
++ * On HIGHMEM enabled systems mapping a highmem page has the side effect of
++ * disabling migration in order to keep the virtual address stable across
++ * preemption. No caller of kmap_local_page() can rely on this side effect.
++ */
++static inline void *kmap_local_page(struct page *page);
++
++/**
++ * kmap_atomic - Atomically map a page for temporary usage - Deprecated!
++ * @page: Pointer to the page to be mapped
++ *
++ * Returns: The virtual address of the mapping
++ *
++ * Effectively a wrapper around kmap_local_page() which disables pagefaults
++ * and preemption.
++ *
++ * Do not use in new code. Use kmap_local_page() instead.
+ */
+ static inline void *kmap_atomic(struct page *page);
+
+@@ -101,12 +115,9 @@ static inline void *kmap_atomic(struct p
+ *
+ * Counterpart to kmap_atomic().
+ *
+- * Undoes the side effects of kmap_atomic(), i.e. reenabling pagefaults and
++ * Effectively a wrapper around kunmap_local() which additionally undoes
++ * the side effects of kmap_atomic(), i.e. reenabling pagefaults and
+ * preemption.
+- *
+- * Other than that a NOOP for CONFIG_HIGHMEM=n and for mappings of pages
+- * in the low memory area. For real highmen pages the mapping which was
+- * established with kmap_atomic() is destroyed.
+ */
+
+ /* Highmem related interfaces for management code */
+--- a/mm/highmem.c
++++ b/mm/highmem.c
+@@ -450,6 +450,11 @@ void *__kmap_local_pfn_prot(unsigned lon
+ unsigned long vaddr;
+ int idx;
+
++ /*
++ * Disable migration so resulting virtual address is stable
++ * accross preemption.
++ */
++ migrate_disable();
+ preempt_disable();
+ idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
+ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
+@@ -505,6 +510,7 @@ void kunmap_local_indexed(void *vaddr)
+ current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
+ kmap_local_idx_pop();
+ preempt_enable();
++ migrate_enable();
+ }
+ EXPORT_SYMBOL(kunmap_local_indexed);
+
diff --git a/patches/0026-io-mapping-Provide-iomap_local-variant.patch b/patches/0026-io-mapping-Provide-iomap_local-variant.patch
new file mode 100644
index 000000000000..6a2f42181d57
--- /dev/null
+++ b/patches/0026-io-mapping-Provide-iomap_local-variant.patch
@@ -0,0 +1,171 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:38 +0100
+Subject: [PATCH 26/37] io-mapping: Provide iomap_local variant
+
+Similar to kmap local provide a iomap local variant which only disables
+migration, but neither disables pagefaults nor preemption.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/driver-api/io-mapping.rst | 76 +++++++++++++++++++-------------
+ include/linux/io-mapping.h | 30 +++++++++++-
+ 2 files changed, 74 insertions(+), 32 deletions(-)
+
+--- a/Documentation/driver-api/io-mapping.rst
++++ b/Documentation/driver-api/io-mapping.rst
+@@ -20,55 +20,71 @@ as it would consume too much of the kern
+ mappable, while 'size' indicates how large a mapping region to
+ enable. Both are in bytes.
+
+-This _wc variant provides a mapping which may only be used
+-with the io_mapping_map_atomic_wc or io_mapping_map_wc.
++This _wc variant provides a mapping which may only be used with
++io_mapping_map_atomic_wc(), io_mapping_map_local_wc() or
++io_mapping_map_wc().
++
++With this mapping object, individual pages can be mapped either temporarily
++or long term, depending on the requirements. Of course, temporary maps are
++more efficient. They come in two flavours::
+
+-With this mapping object, individual pages can be mapped either atomically
+-or not, depending on the necessary scheduling environment. Of course, atomic
+-maps are more efficient::
++ void *io_mapping_map_local_wc(struct io_mapping *mapping,
++ unsigned long offset)
+
+ void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
+ unsigned long offset)
+
+-'offset' is the offset within the defined mapping region.
+-Accessing addresses beyond the region specified in the
+-creation function yields undefined results. Using an offset
+-which is not page aligned yields an undefined result. The
+-return value points to a single page in CPU address space.
+-
+-This _wc variant returns a write-combining map to the
+-page and may only be used with mappings created by
+-io_mapping_create_wc
++'offset' is the offset within the defined mapping region. Accessing
++addresses beyond the region specified in the creation function yields
++undefined results. Using an offset which is not page aligned yields an
++undefined result. The return value points to a single page in CPU address
++space.
+
+-Note that the task may not sleep while holding this page
+-mapped.
++This _wc variant returns a write-combining map to the page and may only be
++used with mappings created by io_mapping_create_wc()
+
+-::
++Temporary mappings are only valid in the context of the caller. The mapping
++is not guaranteed to be globaly visible.
+
+- void io_mapping_unmap_atomic(void *vaddr)
++io_mapping_map_local_wc() has a side effect on X86 32bit as it disables
++migration to make the mapping code work. No caller can rely on this side
++effect.
++
++io_mapping_map_atomic_wc() has the side effect of disabling preemption and
++pagefaults. Don't use in new code. Use io_mapping_map_local_wc() instead.
+
+-'vaddr' must be the value returned by the last
+-io_mapping_map_atomic_wc call. This unmaps the specified
+-page and allows the task to sleep once again.
++Nested mappings need to be undone in reverse order because the mapping
++code uses a stack for keeping track of them::
+
+-If you need to sleep while holding the lock, you can use the non-atomic
+-variant, although they may be significantly slower.
++ addr1 = io_mapping_map_local_wc(map1, offset1);
++ addr2 = io_mapping_map_local_wc(map2, offset2);
++ ...
++ io_mapping_unmap_local(addr2);
++ io_mapping_unmap_local(addr1);
+
+-::
++The mappings are released with::
++
++ void io_mapping_unmap_local(void *vaddr)
++ void io_mapping_unmap_atomic(void *vaddr)
++
++'vaddr' must be the value returned by the last io_mapping_map_local_wc() or
++io_mapping_map_atomic_wc() call. This unmaps the specified mapping and
++undoes the side effects of the mapping functions.
++
++If you need to sleep while holding a mapping, you can use the regular
++variant, although this may be significantly slower::
+
+ void *io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset)
+
+-This works like io_mapping_map_atomic_wc except it allows
+-the task to sleep while holding the page mapped.
+-
++This works like io_mapping_map_atomic/local_wc() except it has no side
++effects and the pointer is globaly visible.
+
+-::
++The mappings are released with::
+
+ void io_mapping_unmap(void *vaddr)
+
+-This works like io_mapping_unmap_atomic, except it is used
+-for pages mapped with io_mapping_map_wc.
++Use for pages mapped with io_mapping_map_wc().
+
+ At driver close time, the io_mapping object must be freed::
+
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -83,6 +83,21 @@ io_mapping_unmap_atomic(void __iomem *va
+ }
+
+ static inline void __iomem *
++io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
++{
++ resource_size_t phys_addr;
++
++ BUG_ON(offset >= mapping->size);
++ phys_addr = mapping->base + offset;
++ return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
++}
++
++static inline void io_mapping_unmap_local(void __iomem *vaddr)
++{
++ kunmap_local_indexed((void __force *)vaddr);
++}
++
++static inline void __iomem *
+ io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset,
+ unsigned long size)
+@@ -101,7 +116,7 @@ io_mapping_unmap(void __iomem *vaddr)
+ iounmap(vaddr);
+ }
+
+-#else
++#else /* HAVE_ATOMIC_IOMAP */
+
+ #include <linux/uaccess.h>
+
+@@ -166,7 +181,18 @@ io_mapping_unmap_atomic(void __iomem *va
+ preempt_enable();
+ }
+
+-#endif /* HAVE_ATOMIC_IOMAP */
++static inline void __iomem *
++io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
++{
++ return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
++}
++
++static inline void io_mapping_unmap_local(void __iomem *vaddr)
++{
++ io_mapping_unmap(vaddr);
++}
++
++#endif /* !HAVE_ATOMIC_IOMAP */
+
+ static inline struct io_mapping *
+ io_mapping_create_wc(resource_size_t base,
diff --git a/patches/0027-x86-crashdump-32-Simplify-copy_oldmem_page.patch b/patches/0027-x86-crashdump-32-Simplify-copy_oldmem_page.patch
new file mode 100644
index 000000000000..121c8c40e31d
--- /dev/null
+++ b/patches/0027-x86-crashdump-32-Simplify-copy_oldmem_page.patch
@@ -0,0 +1,92 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:39 +0100
+Subject: [PATCH 27/37] x86/crashdump/32: Simplify copy_oldmem_page()
+
+Replace kmap_atomic_pfn() with kmap_local_pfn() which is preemptible and
+can take page faults.
+
+Remove the indirection of the dump page and the related cruft which is not
+longer required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/crash_dump_32.c | 48 ++++++++--------------------------------
+ 1 file changed, 10 insertions(+), 38 deletions(-)
+
+--- a/arch/x86/kernel/crash_dump_32.c
++++ b/arch/x86/kernel/crash_dump_32.c
+@@ -13,8 +13,6 @@
+
+ #include <linux/uaccess.h>
+
+-static void *kdump_buf_page;
+-
+ static inline bool is_crashed_pfn_valid(unsigned long pfn)
+ {
+ #ifndef CONFIG_X86_PAE
+@@ -41,15 +39,11 @@ static inline bool is_crashed_pfn_valid(
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- *
+- * Calling copy_to_user() in atomic context is not desirable. Hence first
+- * copying the data to a pre-allocated kernel page and then copying to user
+- * space in non-atomic context.
++ * Copy a page from "oldmem". For this page, there might be no pte mapped
++ * in the current kernel.
+ */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
++ unsigned long offset, int userbuf)
+ {
+ void *vaddr;
+
+@@ -59,38 +53,16 @@ ssize_t copy_oldmem_page(unsigned long p
+ if (!is_crashed_pfn_valid(pfn))
+ return -EFAULT;
+
+- vaddr = kmap_atomic_pfn(pfn);
++ vaddr = kmap_local_pfn(pfn);
+
+ if (!userbuf) {
+- memcpy(buf, (vaddr + offset), csize);
+- kunmap_atomic(vaddr);
++ memcpy(buf, vaddr + offset, csize);
+ } else {
+- if (!kdump_buf_page) {
+- printk(KERN_WARNING "Kdump: Kdump buffer page not"
+- " allocated\n");
+- kunmap_atomic(vaddr);
+- return -EFAULT;
+- }
+- copy_page(kdump_buf_page, vaddr);
+- kunmap_atomic(vaddr);
+- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+- return -EFAULT;
++ if (copy_to_user(buf, vaddr + offset, csize))
++ csize = -EFAULT;
+ }
+
+- return csize;
+-}
++ kunmap_local(vaddr);
+
+-static int __init kdump_buf_page_init(void)
+-{
+- int ret = 0;
+-
+- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+- if (!kdump_buf_page) {
+- printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
+- " page\n");
+- ret = -ENOMEM;
+- }
+-
+- return ret;
++ return csize;
+ }
+-arch_initcall(kdump_buf_page_init);
diff --git a/patches/0028-mips-crashdump-Simplify-copy_oldmem_page.patch b/patches/0028-mips-crashdump-Simplify-copy_oldmem_page.patch
new file mode 100644
index 000000000000..6d36f421d39f
--- /dev/null
+++ b/patches/0028-mips-crashdump-Simplify-copy_oldmem_page.patch
@@ -0,0 +1,88 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:40 +0100
+Subject: [PATCH 28/37] mips/crashdump: Simplify copy_oldmem_page()
+
+Replace kmap_atomic_pfn() with kmap_local_pfn() which is preemptible and
+can take page faults.
+
+Remove the indirection of the dump page and the related cruft which is not
+longer required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Cc: linux-mips@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/mips/kernel/crash_dump.c | 42 +++++++-----------------------------------
+ 1 file changed, 7 insertions(+), 35 deletions(-)
+
+--- a/arch/mips/kernel/crash_dump.c
++++ b/arch/mips/kernel/crash_dump.c
+@@ -5,8 +5,6 @@
+ #include <linux/uaccess.h>
+ #include <linux/slab.h>
+
+-static void *kdump_buf_page;
+-
+ /**
+ * copy_oldmem_page - copy one page from "oldmem"
+ * @pfn: page frame number to be copied
+@@ -17,51 +15,25 @@ static void *kdump_buf_page;
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
++ * Copy a page from "oldmem". For this page, there might be no pte mapped
+ * in the current kernel.
+- *
+- * Calling copy_to_user() in atomic context is not desirable. Hence first
+- * copying the data to a pre-allocated kernel page and then copying to user
+- * space in non-atomic context.
+ */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
++ unsigned long offset, int userbuf)
+ {
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+- vaddr = kmap_atomic_pfn(pfn);
++ vaddr = kmap_local_pfn(pfn);
+
+ if (!userbuf) {
+- memcpy(buf, (vaddr + offset), csize);
+- kunmap_atomic(vaddr);
++ memcpy(buf, vaddr + offset, csize);
+ } else {
+- if (!kdump_buf_page) {
+- pr_warn("Kdump: Kdump buffer page not allocated\n");
+-
+- return -EFAULT;
+- }
+- copy_page(kdump_buf_page, vaddr);
+- kunmap_atomic(vaddr);
+- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+- return -EFAULT;
++ if (copy_to_user(buf, vaddr + offset, csize))
++ csize = -EFAULT;
+ }
+
+ return csize;
+ }
+-
+-static int __init kdump_buf_page_init(void)
+-{
+- int ret = 0;
+-
+- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+- if (!kdump_buf_page) {
+- pr_warn("Kdump: Failed to allocate kdump buffer page\n");
+- ret = -ENOMEM;
+- }
+-
+- return ret;
+-}
+-arch_initcall(kdump_buf_page_init);
diff --git a/patches/0029-ARM-mm-Replace-kmap_atomic_pfn.patch b/patches/0029-ARM-mm-Replace-kmap_atomic_pfn.patch
new file mode 100644
index 000000000000..2980e5184cbd
--- /dev/null
+++ b/patches/0029-ARM-mm-Replace-kmap_atomic_pfn.patch
@@ -0,0 +1,62 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:41 +0100
+Subject: [PATCH 29/37] ARM: mm: Replace kmap_atomic_pfn()
+
+There is no requirement to disable pagefaults and preemption for these
+cache management mappings.
+
+Replace kmap_atomic_pfn() with kmap_local_pfn(). This allows to remove
+kmap_atomic_pfn() in the next step.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: linux-arm-kernel@lists.infradead.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm/mm/cache-feroceon-l2.c | 6 +++---
+ arch/arm/mm/cache-xsc3l2.c | 4 ++--
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/mm/cache-feroceon-l2.c
++++ b/arch/arm/mm/cache-feroceon-l2.c
+@@ -49,9 +49,9 @@ static inline unsigned long l2_get_va(un
+ * we simply install a virtual mapping for it only for the
+ * TLB lookup to occur, hence no need to flush the untouched
+ * memory mapping afterwards (note: a cache flush may happen
+- * in some circumstances depending on the path taken in kunmap_atomic).
++ * in some circumstances depending on the path taken in kunmap_local).
+ */
+- void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
++ void *vaddr = kmap_local_pfn(paddr >> PAGE_SHIFT);
+ return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
+ #else
+ return __phys_to_virt(paddr);
+@@ -61,7 +61,7 @@ static inline unsigned long l2_get_va(un
+ static inline void l2_put_va(unsigned long vaddr)
+ {
+ #ifdef CONFIG_HIGHMEM
+- kunmap_atomic((void *)vaddr);
++ kunmap_local((void *)vaddr);
+ #endif
+ }
+
+--- a/arch/arm/mm/cache-xsc3l2.c
++++ b/arch/arm/mm/cache-xsc3l2.c
+@@ -59,7 +59,7 @@ static inline void l2_unmap_va(unsigned
+ {
+ #ifdef CONFIG_HIGHMEM
+ if (va != -1)
+- kunmap_atomic((void *)va);
++ kunmap_local((void *)va);
+ #endif
+ }
+
+@@ -75,7 +75,7 @@ static inline unsigned long l2_map_va(un
+ * in place for it.
+ */
+ l2_unmap_va(prev_va);
+- va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
++ va = (unsigned long)kmap_local_pfn(pa >> PAGE_SHIFT);
+ }
+ return va + (pa_offset >> (32 - PAGE_SHIFT));
+ #else
diff --git a/patches/0030-highmem-Remove-kmap_atomic_pfn.patch b/patches/0030-highmem-Remove-kmap_atomic_pfn.patch
new file mode 100644
index 000000000000..cecae9d5f61b
--- /dev/null
+++ b/patches/0030-highmem-Remove-kmap_atomic_pfn.patch
@@ -0,0 +1,40 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:42 +0100
+Subject: [PATCH 30/37] highmem: Remove kmap_atomic_pfn()
+
+No more users.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 12 ------------
+ 1 file changed, 12 deletions(-)
+
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -100,13 +100,6 @@ static inline void *kmap_atomic(struct p
+ return kmap_atomic_prot(page, kmap_prot);
+ }
+
+-static inline void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- preempt_disable();
+- pagefault_disable();
+- return __kmap_local_pfn_prot(pfn, kmap_prot);
+-}
+-
+ static inline void __kunmap_atomic(void *addr)
+ {
+ kunmap_local_indexed(addr);
+@@ -194,11 +187,6 @@ static inline void *kmap_atomic_prot(str
+ return kmap_atomic(page);
+ }
+
+-static inline void *kmap_atomic_pfn(unsigned long pfn)
+-{
+- return kmap_atomic(pfn_to_page(pfn));
+-}
+-
+ static inline void __kunmap_atomic(void *addr)
+ {
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
diff --git a/patches/0031-drm-ttm-Replace-kmap_atomic-usage.patch b/patches/0031-drm-ttm-Replace-kmap_atomic-usage.patch
new file mode 100644
index 000000000000..1e951311f15c
--- /dev/null
+++ b/patches/0031-drm-ttm-Replace-kmap_atomic-usage.patch
@@ -0,0 +1,67 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:43 +0100
+Subject: [PATCH 31/37] drm/ttm: Replace kmap_atomic() usage
+
+There is no reason to disable pagefaults and preemption as a side effect of
+kmap_atomic_prot().
+
+Use kmap_local_page_prot() instead and document the reasoning for the
+mapping usage with the given pgprot.
+
+Remove the NULL pointer check for the map. These functions return a valid
+address for valid pages and the return was bogus anyway as it would have
+left preemption and pagefaults disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Christian Koenig <christian.koenig@amd.com>
+Cc: Huang Rui <ray.huang@amd.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/ttm/ttm_bo_util.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
+@@ -181,13 +181,15 @@ static int ttm_copy_io_ttm_page(struct t
+ return -ENOMEM;
+
+ src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
+- dst = kmap_atomic_prot(d, prot);
+- if (!dst)
+- return -ENOMEM;
++ /*
++ * Ensure that a highmem page is mapped with the correct
++ * pgprot. For non highmem the mapping is already there.
++ */
++ dst = kmap_local_page_prot(d, prot);
+
+ memcpy_fromio(dst, src, PAGE_SIZE);
+
+- kunmap_atomic(dst);
++ kunmap_local(dst);
+
+ return 0;
+ }
+@@ -203,13 +205,15 @@ static int ttm_copy_ttm_io_page(struct t
+ return -ENOMEM;
+
+ dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
+- src = kmap_atomic_prot(s, prot);
+- if (!src)
+- return -ENOMEM;
++ /*
++ * Ensure that a highmem page is mapped with the correct
++ * pgprot. For non highmem the mapping is already there.
++ */
++ src = kmap_local_page_prot(s, prot);
+
+ memcpy_toio(dst, src, PAGE_SIZE);
+
+- kunmap_atomic(src);
++ kunmap_local(src);
+
+ return 0;
+ }
diff --git a/patches/0032-drm-vmgfx-Replace-kmap_atomic.patch b/patches/0032-drm-vmgfx-Replace-kmap_atomic.patch
new file mode 100644
index 000000000000..589807cdda40
--- /dev/null
+++ b/patches/0032-drm-vmgfx-Replace-kmap_atomic.patch
@@ -0,0 +1,97 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:44 +0100
+Subject: [PATCH 32/37] drm/vmgfx: Replace kmap_atomic()
+
+There is no reason to disable pagefaults and preemption as a side effect of
+kmap_atomic_prot().
+
+Use kmap_local_page_prot() instead and document the reasoning for the
+mapping usage with the given pgprot.
+
+Remove the NULL pointer check for the map. These functions return a valid
+address for valid pages and the return was bogus anyway as it would have
+left preemption and pagefaults disabled.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
+Cc: Roland Scheidegger <sroland@vmware.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 30 ++++++++++++------------------
+ 1 file changed, 12 insertions(+), 18 deletions(-)
+
+--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+@@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct v
+ copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
+
+ if (unmap_src) {
+- kunmap_atomic(d->src_addr);
++ kunmap_local(d->src_addr);
+ d->src_addr = NULL;
+ }
+
+ if (unmap_dst) {
+- kunmap_atomic(d->dst_addr);
++ kunmap_local(d->dst_addr);
+ d->dst_addr = NULL;
+ }
+
+@@ -388,12 +388,8 @@ static int vmw_bo_cpu_blit_line(struct v
+ if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
+ return -EINVAL;
+
+- d->dst_addr =
+- kmap_atomic_prot(d->dst_pages[dst_page],
+- d->dst_prot);
+- if (!d->dst_addr)
+- return -ENOMEM;
+-
++ d->dst_addr = kmap_local_page_prot(d->dst_pages[dst_page],
++ d->dst_prot);
+ d->mapped_dst = dst_page;
+ }
+
+@@ -401,12 +397,8 @@ static int vmw_bo_cpu_blit_line(struct v
+ if (WARN_ON_ONCE(src_page >= d->src_num_pages))
+ return -EINVAL;
+
+- d->src_addr =
+- kmap_atomic_prot(d->src_pages[src_page],
+- d->src_prot);
+- if (!d->src_addr)
+- return -ENOMEM;
+-
++ d->src_addr = kmap_local_page_prot(d->src_pages[src_page],
++ d->src_prot);
+ d->mapped_src = src_page;
+ }
+ diff->do_cpy(diff, d->dst_addr + dst_page_offset,
+@@ -436,8 +428,10 @@ static int vmw_bo_cpu_blit_line(struct v
+ *
+ * Performs a CPU blit from one buffer object to another avoiding a full
+ * bo vmap which may exhaust- or fragment vmalloc space.
+- * On supported architectures (x86), we're using kmap_atomic which avoids
+- * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
++ *
++ * On supported architectures (x86), we're using kmap_local_prot() which
++ * avoids cross-processor TLB- and cache flushes. kmap_local_prot() will
++ * either map a highmem page with the proper pgprot on HIGHMEM=y systems or
+ * reference already set-up mappings.
+ *
+ * Neither of the buffer objects may be placed in PCI memory
+@@ -500,9 +494,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_ob
+ }
+ out:
+ if (d.src_addr)
+- kunmap_atomic(d.src_addr);
++ kunmap_local(d.src_addr);
+ if (d.dst_addr)
+- kunmap_atomic(d.dst_addr);
++ kunmap_local(d.dst_addr);
+
+ return ret;
+ }
diff --git a/patches/0033-highmem-Remove-kmap_atomic_prot.patch b/patches/0033-highmem-Remove-kmap_atomic_prot.patch
new file mode 100644
index 000000000000..203927184719
--- /dev/null
+++ b/patches/0033-highmem-Remove-kmap_atomic_prot.patch
@@ -0,0 +1,45 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:45 +0100
+Subject: [PATCH 33/37] highmem: Remove kmap_atomic_prot()
+
+No more users.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/highmem-internal.h | 14 ++------------
+ 1 file changed, 2 insertions(+), 12 deletions(-)
+
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -88,16 +88,11 @@ static inline void __kunmap_local(void *
+ kunmap_local_indexed(vaddr);
+ }
+
+-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
++static inline void *kmap_atomic(struct page *page)
+ {
+ preempt_disable();
+ pagefault_disable();
+- return __kmap_local_page_prot(page, prot);
+-}
+-
+-static inline void *kmap_atomic(struct page *page)
+-{
+- return kmap_atomic_prot(page, kmap_prot);
++ return __kmap_local_page_prot(page, kmap_prot);
+ }
+
+ static inline void __kunmap_atomic(void *addr)
+@@ -182,11 +177,6 @@ static inline void *kmap_atomic(struct p
+ return page_address(page);
+ }
+
+-static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
+-{
+- return kmap_atomic(page);
+-}
+-
+ static inline void __kunmap_atomic(void *addr)
+ {
+ #ifdef ARCH_HAS_FLUSH_ON_KUNMAP
diff --git a/patches/0034-drm-qxl-Replace-io_mapping_map_atomic_wc.patch b/patches/0034-drm-qxl-Replace-io_mapping_map_atomic_wc.patch
new file mode 100644
index 000000000000..216e2fe1922c
--- /dev/null
+++ b/patches/0034-drm-qxl-Replace-io_mapping_map_atomic_wc.patch
@@ -0,0 +1,242 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:46 +0100
+Subject: [PATCH 34/37] drm/qxl: Replace io_mapping_map_atomic_wc()
+
+None of these mapping requires the side effect of disabling pagefaults and
+preemption.
+
+Use io_mapping_map_local_wc() instead, rename the related functions
+accordingly and clean up qxl_process_single_command() to use a plain
+copy_from_user() as the local maps are not disabling pagefaults.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Dave Airlie <airlied@redhat.com>
+Cc: Gerd Hoffmann <kraxel@redhat.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: virtualization@lists.linux-foundation.org
+Cc: spice-devel@lists.freedesktop.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/qxl/qxl_image.c | 18 +++++++++---------
+ drivers/gpu/drm/qxl/qxl_ioctl.c | 27 +++++++++++++--------------
+ drivers/gpu/drm/qxl/qxl_object.c | 12 ++++++------
+ drivers/gpu/drm/qxl/qxl_object.h | 4 ++--
+ drivers/gpu/drm/qxl/qxl_release.c | 4 ++--
+ 5 files changed, 32 insertions(+), 33 deletions(-)
+
+--- a/drivers/gpu/drm/qxl/qxl_image.c
++++ b/drivers/gpu/drm/qxl/qxl_image.c
+@@ -124,12 +124,12 @@ qxl_image_init_helper(struct qxl_device
+ wrong (check the bitmaps are sent correctly
+ first) */
+
+- ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
++ ptr = qxl_bo_kmap_local_page(qdev, chunk_bo, 0);
+ chunk = ptr;
+ chunk->data_size = height * chunk_stride;
+ chunk->prev_chunk = 0;
+ chunk->next_chunk = 0;
+- qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, chunk_bo, ptr);
+
+ {
+ void *k_data, *i_data;
+@@ -143,7 +143,7 @@ qxl_image_init_helper(struct qxl_device
+ i_data = (void *)data;
+
+ while (remain > 0) {
+- ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
++ ptr = qxl_bo_kmap_local_page(qdev, chunk_bo, page << PAGE_SHIFT);
+
+ if (page == 0) {
+ chunk = ptr;
+@@ -157,7 +157,7 @@ qxl_image_init_helper(struct qxl_device
+
+ memcpy(k_data, i_data, size);
+
+- qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, chunk_bo, ptr);
+ i_data += size;
+ remain -= size;
+ page++;
+@@ -175,10 +175,10 @@ qxl_image_init_helper(struct qxl_device
+ page_offset = offset_in_page(out_offset);
+ size = min((int)(PAGE_SIZE - page_offset), remain);
+
+- ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
++ ptr = qxl_bo_kmap_local_page(qdev, chunk_bo, page_base);
+ k_data = ptr + page_offset;
+ memcpy(k_data, i_data, size);
+- qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, chunk_bo, ptr);
+ remain -= size;
+ i_data += size;
+ out_offset += size;
+@@ -189,7 +189,7 @@ qxl_image_init_helper(struct qxl_device
+ qxl_bo_kunmap(chunk_bo);
+
+ image_bo = dimage->bo;
+- ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
++ ptr = qxl_bo_kmap_local_page(qdev, image_bo, 0);
+ image = ptr;
+
+ image->descriptor.id = 0;
+@@ -212,7 +212,7 @@ qxl_image_init_helper(struct qxl_device
+ break;
+ default:
+ DRM_ERROR("unsupported image bit depth\n");
+- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, image_bo, ptr);
+ return -EINVAL;
+ }
+ image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+@@ -222,7 +222,7 @@ qxl_image_init_helper(struct qxl_device
+ image->u.bitmap.palette = 0;
+ image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
+
+- qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, image_bo, ptr);
+
+ return 0;
+ }
+--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
++++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
+@@ -89,11 +89,11 @@ apply_reloc(struct qxl_device *qdev, str
+ {
+ void *reloc_page;
+
+- reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
++ reloc_page = qxl_bo_kmap_local_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
+ info->src_bo,
+ info->src_offset);
+- qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
++ qxl_bo_kunmap_local_page(qdev, info->dst_bo, reloc_page);
+ }
+
+ static void
+@@ -105,9 +105,9 @@ apply_surf_reloc(struct qxl_device *qdev
+ if (info->src_bo && !info->src_bo->is_primary)
+ id = info->src_bo->surface_id;
+
+- reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
++ reloc_page = qxl_bo_kmap_local_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK);
+ *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id;
+- qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page);
++ qxl_bo_kunmap_local_page(qdev, info->dst_bo, reloc_page);
+ }
+
+ /* return holding the reference to this object */
+@@ -149,7 +149,6 @@ static int qxl_process_single_command(st
+ struct qxl_bo *cmd_bo;
+ void *fb_cmd;
+ int i, ret, num_relocs;
+- int unwritten;
+
+ switch (cmd->type) {
+ case QXL_CMD_DRAW:
+@@ -185,21 +184,21 @@ static int qxl_process_single_command(st
+ goto out_free_reloc;
+
+ /* TODO copy slow path code from i915 */
+- fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
+- unwritten = __copy_from_user_inatomic_nocache
+- (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK),
+- u64_to_user_ptr(cmd->command), cmd->command_size);
++ fb_cmd = qxl_bo_kmap_local_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK));
+
+- {
++ if (copy_from_user(fb_cmd + sizeof(union qxl_release_info) +
++ (release->release_offset & ~PAGE_MASK),
++ u64_to_user_ptr(cmd->command), cmd->command_size)) {
++ ret = -EFAULT;
++ } else {
+ struct qxl_drawable *draw = fb_cmd;
+
+ draw->mm_time = qdev->rom->mm_clock;
+ }
+
+- qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
+- if (unwritten) {
+- DRM_ERROR("got unwritten %d\n", unwritten);
+- ret = -EFAULT;
++ qxl_bo_kunmap_local_page(qdev, cmd_bo, fb_cmd);
++ if (ret) {
++ DRM_ERROR("copy from user failed %d\n", ret);
+ goto out_free_release;
+ }
+
+--- a/drivers/gpu/drm/qxl/qxl_object.c
++++ b/drivers/gpu/drm/qxl/qxl_object.c
+@@ -172,8 +172,8 @@ int qxl_bo_kmap(struct qxl_bo *bo, void
+ return 0;
+ }
+
+-void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
+- struct qxl_bo *bo, int page_offset)
++void *qxl_bo_kmap_local_page(struct qxl_device *qdev,
++ struct qxl_bo *bo, int page_offset)
+ {
+ unsigned long offset;
+ void *rptr;
+@@ -188,7 +188,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl
+ goto fallback;
+
+ offset = bo->tbo.mem.start << PAGE_SHIFT;
+- return io_mapping_map_atomic_wc(map, offset + page_offset);
++ return io_mapping_map_local_wc(map, offset + page_offset);
+ fallback:
+ if (bo->kptr) {
+ rptr = bo->kptr + (page_offset * PAGE_SIZE);
+@@ -214,14 +214,14 @@ void qxl_bo_kunmap(struct qxl_bo *bo)
+ ttm_bo_kunmap(&bo->kmap);
+ }
+
+-void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
+- struct qxl_bo *bo, void *pmap)
++void qxl_bo_kunmap_local_page(struct qxl_device *qdev,
++ struct qxl_bo *bo, void *pmap)
+ {
+ if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
+ (bo->tbo.mem.mem_type != TTM_PL_PRIV))
+ goto fallback;
+
+- io_mapping_unmap_atomic(pmap);
++ io_mapping_unmap_local(pmap);
+ return;
+ fallback:
+ qxl_bo_kunmap(bo);
+--- a/drivers/gpu/drm/qxl/qxl_object.h
++++ b/drivers/gpu/drm/qxl/qxl_object.h
+@@ -88,8 +88,8 @@ extern int qxl_bo_create(struct qxl_devi
+ struct qxl_bo **bo_ptr);
+ extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr);
+ extern void qxl_bo_kunmap(struct qxl_bo *bo);
+-void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
+-void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
++void *qxl_bo_kmap_local_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset);
++void qxl_bo_kunmap_local_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map);
+ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo);
+ extern void qxl_bo_unref(struct qxl_bo **bo);
+ extern int qxl_bo_pin(struct qxl_bo *bo);
+--- a/drivers/gpu/drm/qxl/qxl_release.c
++++ b/drivers/gpu/drm/qxl/qxl_release.c
+@@ -408,7 +408,7 @@ union qxl_release_info *qxl_release_map(
+ union qxl_release_info *info;
+ struct qxl_bo *bo = release->release_bo;
+
+- ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK);
++ ptr = qxl_bo_kmap_local_page(qdev, bo, release->release_offset & PAGE_MASK);
+ if (!ptr)
+ return NULL;
+ info = ptr + (release->release_offset & ~PAGE_MASK);
+@@ -423,7 +423,7 @@ void qxl_release_unmap(struct qxl_device
+ void *ptr;
+
+ ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK);
+- qxl_bo_kunmap_atomic_page(qdev, bo, ptr);
++ qxl_bo_kunmap_local_page(qdev, bo, ptr);
+ }
+
+ void qxl_release_fence_buffer_objects(struct qxl_release *release)
diff --git a/patches/0035-drm-nouveau-device-Replace-io_mapping_map_atomic_wc.patch b/patches/0035-drm-nouveau-device-Replace-io_mapping_map_atomic_wc.patch
new file mode 100644
index 000000000000..a4f628844588
--- /dev/null
+++ b/patches/0035-drm-nouveau-device-Replace-io_mapping_map_atomic_wc.patch
@@ -0,0 +1,46 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:47 +0100
+Subject: [PATCH 35/37] drm/nouveau/device: Replace io_mapping_map_atomic_wc()
+
+Neither fbmem_peek() nor fbmem_poke() require to disable pagefaults and
+preemption as a side effect of io_mapping_map_atomic_wc().
+
+Use io_mapping_map_local_wc() instead.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ben Skeggs <bskeggs@redhat.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: dri-devel@lists.freedesktop.org
+Cc: nouveau@lists.freedesktop.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/fbmem.h
+@@ -60,19 +60,19 @@ fbmem_fini(struct io_mapping *fb)
+ static inline u32
+ fbmem_peek(struct io_mapping *fb, u32 off)
+ {
+- u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
++ u8 __iomem *p = io_mapping_map_local_wc(fb, off & PAGE_MASK);
+ u32 val = ioread32(p + (off & ~PAGE_MASK));
+- io_mapping_unmap_atomic(p);
++ io_mapping_unmap_local(p);
+ return val;
+ }
+
+ static inline void
+ fbmem_poke(struct io_mapping *fb, u32 off, u32 val)
+ {
+- u8 __iomem *p = io_mapping_map_atomic_wc(fb, off & PAGE_MASK);
++ u8 __iomem *p = io_mapping_map_local_wc(fb, off & PAGE_MASK);
+ iowrite32(val, p + (off & ~PAGE_MASK));
+ wmb();
+- io_mapping_unmap_atomic(p);
++ io_mapping_unmap_local(p);
+ }
+
+ static inline bool
diff --git a/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch b/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
new file mode 100644
index 000000000000..4cdc4274aaba
--- /dev/null
+++ b/patches/0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
@@ -0,0 +1,160 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:48 +0100
+Subject: [PATCH 36/37] drm/i915: Replace io_mapping_map_atomic_wc()
+
+None of these mapping requires the side effect of disabling pagefaults and
+preemption.
+
+Use io_mapping_map_local_wc() instead, and clean up gtt_user_read() and
+gtt_user_write() to use a plain copy_from_user() as the local maps are not
+disabling pagefaults.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Jani Nikula <jani.nikula@linux.intel.com>
+Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: intel-gfx@lists.freedesktop.org
+Cc: dri-devel@lists.freedesktop.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 7 +---
+ drivers/gpu/drm/i915/i915_gem.c | 40 ++++++++-----------------
+ drivers/gpu/drm/i915/selftests/i915_gem.c | 4 +-
+ drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 ++---
+ 4 files changed, 22 insertions(+), 37 deletions(-)
+
+--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+@@ -1081,7 +1081,7 @@ static void reloc_cache_reset(struct rel
+ struct i915_ggtt *ggtt = cache_to_ggtt(cache);
+
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+- io_mapping_unmap_atomic((void __iomem *)vaddr);
++ io_mapping_unmap_local((void __iomem *)vaddr);
+
+ if (drm_mm_node_allocated(&cache->node)) {
+ ggtt->vm.clear_range(&ggtt->vm,
+@@ -1147,7 +1147,7 @@ static void *reloc_iomap(struct drm_i915
+
+ if (cache->vaddr) {
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+- io_mapping_unmap_atomic((void __force __iomem *) unmask_page(cache->vaddr));
++ io_mapping_unmap_local((void __force __iomem *) unmask_page(cache->vaddr));
+ } else {
+ struct i915_vma *vma;
+ int err;
+@@ -1195,8 +1195,7 @@ static void *reloc_iomap(struct drm_i915
+ offset += page << PAGE_SHIFT;
+ }
+
+- vaddr = (void __force *)io_mapping_map_atomic_wc(&ggtt->iomap,
+- offset);
++ vaddr = (void __force *)io_mapping_map_local_wc(&ggtt->iomap, offset);
+ cache->page = page;
+ cache->vaddr = (unsigned long)vaddr;
+
+--- a/drivers/gpu/drm/i915/i915_gem.c
++++ b/drivers/gpu/drm/i915/i915_gem.c
+@@ -379,22 +379,15 @@ gtt_user_read(struct io_mapping *mapping
+ char __user *user_data, int length)
+ {
+ void __iomem *vaddr;
+- unsigned long unwritten;
++ bool fail = false;
+
+ /* We can use the cpu mem copy function because this is X86. */
+- vaddr = io_mapping_map_atomic_wc(mapping, base);
+- unwritten = __copy_to_user_inatomic(user_data,
+- (void __force *)vaddr + offset,
+- length);
+- io_mapping_unmap_atomic(vaddr);
+- if (unwritten) {
+- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+- unwritten = copy_to_user(user_data,
+- (void __force *)vaddr + offset,
+- length);
+- io_mapping_unmap(vaddr);
+- }
+- return unwritten;
++ vaddr = io_mapping_map_local_wc(mapping, base);
++ if (copy_to_user(user_data, (void __force *)vaddr + offset, length))
++ fail = true;
++ io_mapping_unmap_local(vaddr);
++
++ return fail;
+ }
+
+ static int
+@@ -557,21 +550,14 @@ ggtt_write(struct io_mapping *mapping,
+ char __user *user_data, int length)
+ {
+ void __iomem *vaddr;
+- unsigned long unwritten;
++ bool fail = false;
+
+ /* We can use the cpu mem copy function because this is X86. */
+- vaddr = io_mapping_map_atomic_wc(mapping, base);
+- unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
+- user_data, length);
+- io_mapping_unmap_atomic(vaddr);
+- if (unwritten) {
+- vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+- unwritten = copy_from_user((void __force *)vaddr + offset,
+- user_data, length);
+- io_mapping_unmap(vaddr);
+- }
+-
+- return unwritten;
++ vaddr = io_mapping_map_local_wc(mapping, base);
++ if (copy_from_user((void __force *)vaddr + offset, user_data, length))
++ fail = true;
++ io_mapping_unmap_local(vaddr);
++ return fail;
+ }
+
+ /**
+--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
+@@ -57,12 +57,12 @@ static void trash_stolen(struct drm_i915
+
+ ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
+
+- s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
++ s = io_mapping_map_local_wc(&ggtt->iomap, slot);
+ for (x = 0; x < PAGE_SIZE / sizeof(u32); x++) {
+ prng = next_pseudo_random32(prng);
+ iowrite32(prng, &s[x]);
+ }
+- io_mapping_unmap_atomic(s);
++ io_mapping_unmap_local(s);
+ }
+
+ ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
+--- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
++++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
+@@ -1200,9 +1200,9 @@ static int igt_ggtt_page(void *arg)
+ u64 offset = tmp.start + order[n] * PAGE_SIZE;
+ u32 __iomem *vaddr;
+
+- vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
++ vaddr = io_mapping_map_local_wc(&ggtt->iomap, offset);
+ iowrite32(n, vaddr + n);
+- io_mapping_unmap_atomic(vaddr);
++ io_mapping_unmap_local(vaddr);
+ }
+ intel_gt_flush_ggtt_writes(ggtt->vm.gt);
+
+@@ -1212,9 +1212,9 @@ static int igt_ggtt_page(void *arg)
+ u32 __iomem *vaddr;
+ u32 val;
+
+- vaddr = io_mapping_map_atomic_wc(&ggtt->iomap, offset);
++ vaddr = io_mapping_map_local_wc(&ggtt->iomap, offset);
+ val = ioread32(vaddr + n);
+- io_mapping_unmap_atomic(vaddr);
++ io_mapping_unmap_local(vaddr);
+
+ if (val != n) {
+ pr_err("insert page failed: found %d, expected %d\n",
diff --git a/patches/0037-io-mapping-Remove-io_mapping_map_atomic_wc.patch b/patches/0037-io-mapping-Remove-io_mapping_map_atomic_wc.patch
new file mode 100644
index 000000000000..947f4e8ad33e
--- /dev/null
+++ b/patches/0037-io-mapping-Remove-io_mapping_map_atomic_wc.patch
@@ -0,0 +1,131 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:49 +0100
+Subject: [PATCH 37/37] io-mapping: Remove io_mapping_map_atomic_wc()
+
+No more users. Get rid of it and remove the traces in documentation.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ Documentation/driver-api/io-mapping.rst | 22 +++++-----------
+ include/linux/io-mapping.h | 42 +-------------------------------
+ 2 files changed, 9 insertions(+), 55 deletions(-)
+
+--- a/Documentation/driver-api/io-mapping.rst
++++ b/Documentation/driver-api/io-mapping.rst
+@@ -21,19 +21,15 @@ mappable, while 'size' indicates how lar
+ enable. Both are in bytes.
+
+ This _wc variant provides a mapping which may only be used with
+-io_mapping_map_atomic_wc(), io_mapping_map_local_wc() or
+-io_mapping_map_wc().
++io_mapping_map_local_wc() or io_mapping_map_wc().
+
+ With this mapping object, individual pages can be mapped either temporarily
+ or long term, depending on the requirements. Of course, temporary maps are
+-more efficient. They come in two flavours::
++more efficient.
+
+ void *io_mapping_map_local_wc(struct io_mapping *mapping,
+ unsigned long offset)
+
+- void *io_mapping_map_atomic_wc(struct io_mapping *mapping,
+- unsigned long offset)
+-
+ 'offset' is the offset within the defined mapping region. Accessing
+ addresses beyond the region specified in the creation function yields
+ undefined results. Using an offset which is not page aligned yields an
+@@ -50,9 +46,6 @@ io_mapping_map_local_wc() has a side eff
+ migration to make the mapping code work. No caller can rely on this side
+ effect.
+
+-io_mapping_map_atomic_wc() has the side effect of disabling preemption and
+-pagefaults. Don't use in new code. Use io_mapping_map_local_wc() instead.
+-
+ Nested mappings need to be undone in reverse order because the mapping
+ code uses a stack for keeping track of them::
+
+@@ -65,11 +58,10 @@ Nested mappings need to be undone in rev
+ The mappings are released with::
+
+ void io_mapping_unmap_local(void *vaddr)
+- void io_mapping_unmap_atomic(void *vaddr)
+
+-'vaddr' must be the value returned by the last io_mapping_map_local_wc() or
+-io_mapping_map_atomic_wc() call. This unmaps the specified mapping and
+-undoes the side effects of the mapping functions.
++'vaddr' must be the value returned by the last io_mapping_map_local_wc()
++call. This unmaps the specified mapping and undoes eventual side effects of
++the mapping function.
+
+ If you need to sleep while holding a mapping, you can use the regular
+ variant, although this may be significantly slower::
+@@ -77,8 +69,8 @@ If you need to sleep while holding a map
+ void *io_mapping_map_wc(struct io_mapping *mapping,
+ unsigned long offset)
+
+-This works like io_mapping_map_atomic/local_wc() except it has no side
+-effects and the pointer is globaly visible.
++This works like io_mapping_map_local_wc() except it has no side effects and
++the pointer is globaly visible.
+
+ The mappings are released with::
+
+--- a/include/linux/io-mapping.h
++++ b/include/linux/io-mapping.h
+@@ -60,28 +60,7 @@ io_mapping_fini(struct io_mapping *mappi
+ iomap_free(mapping->base, mapping->size);
+ }
+
+-/* Atomic map/unmap */
+-static inline void __iomem *
+-io_mapping_map_atomic_wc(struct io_mapping *mapping,
+- unsigned long offset)
+-{
+- resource_size_t phys_addr;
+-
+- BUG_ON(offset >= mapping->size);
+- phys_addr = mapping->base + offset;
+- preempt_disable();
+- pagefault_disable();
+- return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
+-}
+-
+-static inline void
+-io_mapping_unmap_atomic(void __iomem *vaddr)
+-{
+- kunmap_local_indexed((void __force *)vaddr);
+- pagefault_enable();
+- preempt_enable();
+-}
+-
++/* Temporary mappings which are only valid in the current context */
+ static inline void __iomem *
+ io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+ {
+@@ -163,24 +142,7 @@ io_mapping_unmap(void __iomem *vaddr)
+ {
+ }
+
+-/* Atomic map/unmap */
+-static inline void __iomem *
+-io_mapping_map_atomic_wc(struct io_mapping *mapping,
+- unsigned long offset)
+-{
+- preempt_disable();
+- pagefault_disable();
+- return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+-}
+-
+-static inline void
+-io_mapping_unmap_atomic(void __iomem *vaddr)
+-{
+- io_mapping_unmap(vaddr);
+- pagefault_enable();
+- preempt_enable();
+-}
+-
++/* Temporary mappings which are only valid in the current context */
+ static inline void __iomem *
+ io_mapping_map_local_wc(struct io_mapping *mapping, unsigned long offset)
+ {
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 5378d4302555..0e9822a293d3 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -115,7 +115,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -657,9 +657,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
+@@ -660,9 +660,18 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKIN
mrs x0, daif
orr x24, x24, x0
alternative_else_nop_endif
diff --git a/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch b/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
index 72c60a32e97b..dac7d1087abc 100644
--- a/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
+++ b/patches/highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -2,43 +2,18 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 30 Oct 2020 13:59:06 +0100
Subject: [PATCH] highmem: Don't disable preemption on RT in kmap_atomic()
-Disabling preemption make it impossible to acquire sleeping locks within
+Disabling preemption makes it impossible to acquire sleeping locks within
kmap_atomic() section.
For PREEMPT_RT it is sufficient to disable migration.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/highmem.h | 20 ++++++++++++++++----
- include/linux/io-mapping.h | 20 ++++++++++++++++----
- 2 files changed, 32 insertions(+), 8 deletions(-)
+ include/linux/highmem-internal.h | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
---- a/include/linux/highmem.h
-+++ b/include/linux/highmem.h
-@@ -139,7 +139,10 @@ static inline void kunmap(struct page *p
- */
- static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot)
- {
-- preempt_disable();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ migrate_disable();
-+ else
-+ preempt_disable();
- pagefault_disable();
- return __kmap_local_page_prot(page, prot);
- }
-@@ -151,7 +154,10 @@ static inline void *kmap_atomic(struct p
-
- static inline void *kmap_atomic_pfn(unsigned long pfn)
- {
-- preempt_disable();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ migrate_disable();
-+ else
-+ preempt_disable();
- pagefault_disable();
- return __kmap_local_pfn_prot(pfn, kmap_prot);
- }
-@@ -245,7 +251,10 @@ static inline void kunmap(struct page *p
+--- a/include/linux/highmem-internal.h
++++ b/include/linux/highmem-internal.h
+@@ -90,7 +90,10 @@ static inline void __kunmap_local(void *
static inline void *kmap_atomic(struct page *page)
{
@@ -48,37 +23,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ else
+ preempt_disable();
pagefault_disable();
- return page_address(page);
+ return __kmap_local_page_prot(page, kmap_prot);
}
-@@ -301,7 +310,10 @@ do { \
- BUILD_BUG_ON(__same_type((__addr), struct page *)); \
- __kunmap_atomic(__addr); \
- pagefault_enable(); \
-- preempt_enable(); \
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) \
-+ migrate_enable(); \
-+ else \
-+ preempt_enable(); \
- } while (0)
-
- #define kunmap_local(__addr) \
---- a/include/linux/io-mapping.h
-+++ b/include/linux/io-mapping.h
-@@ -69,7 +69,10 @@ io_mapping_map_atomic_wc(struct io_mappi
-
- BUG_ON(offset >= mapping->size);
- phys_addr = mapping->base + offset;
-- preempt_disable();
-+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
-+ migrate_disable();
-+ else
-+ preempt_disable();
- pagefault_disable();
- return __iomap_local_pfn_prot(PHYS_PFN(phys_addr), mapping->prot);
- }
-@@ -79,7 +82,10 @@ io_mapping_unmap_atomic(void __iomem *va
+@@ -99,7 +102,10 @@ static inline void __kunmap_atomic(void
{
- kunmap_local_indexed((void __force *)vaddr);
+ kunmap_local_indexed(addr);
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
@@ -87,10 +36,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ preempt_enable();
}
- static inline void __iomem *
-@@ -170,7 +176,10 @@ static inline void __iomem *
- io_mapping_map_atomic_wc(struct io_mapping *mapping,
- unsigned long offset)
+ unsigned int __nr_free_highpages(void);
+@@ -172,7 +178,10 @@ static inline void __kunmap_local(void *
+
+ static inline void *kmap_atomic(struct page *page)
{
- preempt_disable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
@@ -98,11 +47,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ else
+ preempt_disable();
pagefault_disable();
- return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
+ return page_address(page);
}
-@@ -180,7 +189,10 @@ io_mapping_unmap_atomic(void __iomem *va
- {
- io_mapping_unmap(vaddr);
+@@ -183,7 +192,10 @@ static inline void __kunmap_atomic(void
+ kunmap_flush_on_unmap(addr);
+ #endif
pagefault_enable();
- preempt_enable();
+ if (IS_ENABLED(CONFIG_PREEMPT_RT))
@@ -111,4 +60,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ preempt_enable();
}
- static inline void __iomem *
+ static inline unsigned int nr_free_highpages(void) { return 0; }
diff --git a/patches/hrtimer-Allow-raw-wakeups-during-boot.patch b/patches/hrtimer-Allow-raw-wakeups-during-boot.patch
index 8c3d93f3a56e..1fb9979c3637 100644
--- a/patches/hrtimer-Allow-raw-wakeups-during-boot.patch
+++ b/patches/hrtimer-Allow-raw-wakeups-during-boot.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1828,7 +1828,7 @@ static void __hrtimer_init_sleeper(struc
+@@ -1823,7 +1823,7 @@ static void __hrtimer_init_sleeper(struc
* expiry.
*/
if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index c0b323717970..fc3b90a168ad 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -165,7 +165,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
init_dl_bw(&rd->dl_bw);
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1762,6 +1762,8 @@ static __latent_entropy void run_timer_s
+@@ -1764,6 +1764,8 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e36eb4b6666a..03a80b8b0e80 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt3
++-rt4
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 2c432069ec70..325db985579c 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void stop_critical_timings(void);
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -5292,6 +5292,7 @@ static void check_flags(unsigned long fl
+@@ -5286,6 +5286,7 @@ static void check_flags(unsigned long fl
}
}
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -5306,6 +5307,7 @@ static void check_flags(unsigned long fl
+@@ -5300,6 +5301,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index cec724bd7030..7627b102dee9 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -52,7 +52,7 @@ performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/preempt.h | 51 +++++++++++++++++++++++++-
+ include/linux/preempt.h | 54 ++++++++++++++++++++++++++--
include/linux/sched.h | 38 +++++++++++++++++++
include/linux/thread_info.h | 12 +++++-
include/linux/trace_events.h | 1
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/trace.h | 2 +
kernel/trace/trace_events.c | 1
kernel/trace/trace_output.c | 14 ++++++-
- 13 files changed, 243 insertions(+), 34 deletions(-)
+ 13 files changed, 246 insertions(+), 34 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -135,7 +135,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preempt_enable_notrace() \
do { \
barrier(); \
-@@ -282,7 +320,7 @@ do { \
+@@ -264,6 +302,9 @@ do { \
+ #define preempt_check_resched_rt() barrier()
+ #define preemptible() 0
+
++#define preempt_lazy_disable() barrier()
++#define preempt_lazy_enable() barrier()
++
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+ #ifdef MODULE
+@@ -282,7 +323,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -144,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
set_preempt_need_resched(); \
} while (0)
-@@ -410,8 +448,15 @@ extern void migrate_enable(void);
+@@ -410,8 +451,15 @@ extern void migrate_enable(void);
#else
diff --git a/patches/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch b/patches/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
new file mode 100644
index 000000000000..eab4267192af
--- /dev/null
+++ b/patches/ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
@@ -0,0 +1,57 @@
+From: Oleg Nesterov <oleg@redhat.com>
+Date: Tue, 3 Nov 2020 12:39:01 +0100
+Subject: [PATCH] ptrace: fix ptrace_unfreeze_traced() race with rt-lock
+
+The patch "ptrace: fix ptrace vs tasklist_lock race" changed
+ptrace_freeze_traced() to take task->saved_state into account, but
+ptrace_unfreeze_traced() has the same problem and needs a similar fix:
+it should check/update both ->state and ->saved_state.
+
+Reported-by: Luis Claudio R. Goncalves <lgoncalv@redhat.com>
+Fixes: "ptrace: fix ptrace vs tasklist_lock race"
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/ptrace.c | 23 +++++++++++++++--------
+ 1 file changed, 15 insertions(+), 8 deletions(-)
+
+--- a/kernel/ptrace.c
++++ b/kernel/ptrace.c
+@@ -197,8 +197,8 @@ static bool ptrace_freeze_traced(struct
+
+ static void ptrace_unfreeze_traced(struct task_struct *task)
+ {
+- if (task->state != __TASK_TRACED)
+- return;
++ unsigned long flags;
++ bool frozen = true;
+
+ WARN_ON(!task->ptrace || task->parent != current);
+
+@@ -207,12 +207,19 @@ static void ptrace_unfreeze_traced(struc
+ * Recheck state under the lock to close this race.
+ */
+ spin_lock_irq(&task->sighand->siglock);
+- if (task->state == __TASK_TRACED) {
+- if (__fatal_signal_pending(task))
+- wake_up_state(task, __TASK_TRACED);
+- else
+- task->state = TASK_TRACED;
+- }
++
++ raw_spin_lock_irqsave(&task->pi_lock, flags);
++ if (task->state == __TASK_TRACED)
++ task->state = TASK_TRACED;
++ else if (task->saved_state == __TASK_TRACED)
++ task->saved_state = TASK_TRACED;
++ else
++ frozen = false;
++ raw_spin_unlock_irqrestore(&task->pi_lock, flags);
++
++ if (frozen && __fatal_signal_pending(task))
++ wake_up_state(task, __TASK_TRACED);
++
+ spin_unlock_irq(&task->sighand->siglock);
+ }
+
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 6f061b770919..5d9e0068b237 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1993,6 +1993,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct
+@@ -1988,6 +1988,36 @@ SYSCALL_DEFINE2(nanosleep_time32, struct
}
#endif
diff --git a/patches/series b/patches/series
index fca447ae6c24..8fd6a558976c 100644
--- a/patches/series
+++ b/patches/series
@@ -32,27 +32,45 @@
0018-sched-Deny-self-issued-__set_cpus_allowed_ptr-when-m.patch
0019-sched-Comment-affine_move_task.patch
-# 2020-10-29 23:18 Thomas Gleixner ( 113) [patch V2 00/18] mm/highmem: Preemptible variant of kmap_atomic & friend
-# 20201029221806.189523375@linutronix.de
-# + fixes
-0001-sched-Make-migrate_disable-enable-independent-of-RT.patch
-0002-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
-0003-highmem-Provide-generic-variant-of-kmap_atomic.patch
-0004-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
-0005-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
-0006-ARM-highmem-Switch-to-generic-kmap-atomic.patch
-0007-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0008-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0009-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0010-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0011-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0012-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0013-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
-0014-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
-0015-io-mapping-Cleanup-atomic-iomap.patch
-0016-sched-highmem-Store-local-kmaps-in-task-struct.patch
-0017-mm-highmem-Provide-kmap_local.patch
-0018-io-mapping-Provide-iomap_local-variant.patch
+# 2020-11-03 10:27 Thomas Gleixner [patch V3 00/37] mm/highmem: Preemptible variant of kmap_atomic & friends
+# 20201103092712.714480842@linutronix.de
+0001-mm-highmem-Un-EXPORT-__kmap_atomic_idx.patch
+0002-highmem-Remove-unused-functions.patch
+0003-fs-Remove-asm-kmap_types.h-includes.patch
+0004-sh-highmem-Remove-all-traces-of-unused-cruft.patch
+0005-asm-generic-Provide-kmap_size.h.patch
+0006-highmem-Provide-generic-variant-of-kmap_atomic.patch
+0007-highmem-Make-DEBUG_HIGHMEM-functional.patch
+0008-x86-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+0009-arc-mm-highmem-Use-generic-kmap-atomic-implementatio.patch
+0010-ARM-highmem-Switch-to-generic-kmap-atomic.patch
+0011-csky-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0012-microblaze-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0013-mips-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0014-nds32-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0015-powerpc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0016-sparc-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0017-xtensa-mm-highmem-Switch-to-generic-kmap-atomic.patch
+0018-highmem-Get-rid-of-kmap_types.h.patch
+0019-mm-highmem-Remove-the-old-kmap_atomic-cruft.patch
+0020-io-mapping-Cleanup-atomic-iomap.patch
+0021-Documentation-io-mapping-Remove-outdated-blurb.patch
+0022-highmem-High-implementation-details-and-document-API.patch
+0023-sched-Make-migrate_disable-enable-independent-of-RT.patch
+0024-sched-highmem-Store-local-kmaps-in-task-struct.patch
+0025-mm-highmem-Provide-kmap_local.patch
+0026-io-mapping-Provide-iomap_local-variant.patch
+0027-x86-crashdump-32-Simplify-copy_oldmem_page.patch
+0028-mips-crashdump-Simplify-copy_oldmem_page.patch
+0029-ARM-mm-Replace-kmap_atomic_pfn.patch
+0030-highmem-Remove-kmap_atomic_pfn.patch
+0031-drm-ttm-Replace-kmap_atomic-usage.patch
+0032-drm-vmgfx-Replace-kmap_atomic.patch
+0033-highmem-Remove-kmap_atomic_prot.patch
+0034-drm-qxl-Replace-io_mapping_map_atomic_wc.patch
+0035-drm-nouveau-device-Replace-io_mapping_map_atomic_wc.patch
+0036-drm-i915-Replace-io_mapping_map_atomic_wc.patch
+0037-io-mapping-Remove-io_mapping_map_atomic_wc.patch
#
highmem-Don-t-disable-preemption-on-RT-in-kmap_atomi.patch
@@ -74,6 +92,9 @@ block-mq-Disable-preemption-in-blk_mq_complete_reque.patch
# 20201028181041.xyeothhkouc3p4md@linutronix.de
lib-test_lockup-Minimum-fix-to-get-it-compiled-on-PR.patch
+# 20201103190937.hga67rqhvknki3tp@linutronix.de
+timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch
+
############################################################
# Ready for posting
############################################################
@@ -275,6 +296,7 @@ net-core-use-local_bh_disable-in-netif_rx_ni.patch
# RTMUTEX
pid.h-include-atomic.h.patch
ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+ptrace-fix-ptrace_unfreeze_traced-race-with-rt-lock.patch
# MIGRATE DISABLE AND PER CPU
# Revisit
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 67b9b3560baf..99b3ce936317 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Restored if set_restore_sigmask() was used: */
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
-@@ -263,6 +263,7 @@ static inline void init_sigpending(struc
+@@ -265,6 +265,7 @@ static inline void init_sigpending(struc
}
extern void flush_sigqueue(struct sigpending *queue);
diff --git a/patches/timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch b/patches/timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch
new file mode 100644
index 000000000000..81c0cfe97629
--- /dev/null
+++ b/patches/timers-Don-t-block-on-expiry_lock-for-TIMER_IRQSAFE.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 2 Nov 2020 14:14:24 +0100
+Subject: [PATCH] timers: Don't block on ->expiry_lock for TIMER_IRQSAFE
+
+PREEMPT_RT does not spin and wait until a running timer completes its
+callback but instead it blocks on a sleeping lock to prevent a deadlock.
+
+This blocking can not be done for workqueue's IRQ_SAFE timer which will
+be canceled in an IRQ-off region. It has to happen to in IRQ-off region
+because changing the PENDING bit and clearing the timer must not be
+interrupted to avoid a busy-loop.
+
+The callback invocation of IRQSAFE timer is not preempted on PREEMPT_RT
+so there is no need to synchronize on timer_base::expiry_lock.
+
+Don't acquire the timer_base::expiry_lock for TIMER_IRQSAFE flagged
+timer.
+Add a lockdep annotation to ensure that this function is always invoked
+in preemptible context on PREEMPT_RT.
+
+Reported-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Cc: stable-rt@vger.kernel.org
+---
+ kernel/time/timer.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -1283,7 +1283,7 @@ static void del_timer_wait_running(struc
+ u32 tf;
+
+ tf = READ_ONCE(timer->flags);
+- if (!(tf & TIMER_MIGRATING)) {
++ if (!(tf & (TIMER_MIGRATING | TIMER_IRQSAFE))) {
+ struct timer_base *base = get_timer_base(tf);
+
+ /*
+@@ -1367,6 +1367,13 @@ int del_timer_sync(struct timer_list *ti
+ */
+ WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
+
++ /*
++ * Must be able to sleep on PREEMPT_RT because of the slowpath in
++ * del_timer_wait_running().
++ */
++ if (IS_ENABLED(CONFIG_PREEMPT_RT) && !(timer->flags & TIMER_IRQSAFE))
++ lockdep_assert_preemption_enabled();
++
+ do {
+ ret = try_to_del_timer_sync(timer);
+