summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-07-06 11:52:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2021-07-06 11:52:58 -0700
commit77d34a4683b053108ecd466cc7c4193b45805528 (patch)
treecde2636039cc9290421baad0705c22a86b66ce8e /arch/arm/mm
parent4c55e2aeb8082cb118cd63596bfe0dc5247b78e1 (diff)
parent6fa630bf473827aee48cbf0efbbdf6f03134e890 (diff)
downloadlinux-next-77d34a4683b053108ecd466cc7c4193b45805528.tar.gz
Merge tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm
Pull ARM development updates from Russell King: - Make it clear __swp_entry_to_pte() uses PTE_TYPE_FAULT - Updates for setting vmalloc size via command line to resolve an issue with the 8MiB hole not properly being accounted for, and clean up the code. - ftrace support for module PLTs - Spelling fixes - kbuild updates for removing generated files and pattern rules for generating files - Clang/llvm updates - Change the way the kernel is mapped, placing it in vmalloc space instead. - Remove arm_pm_restart from arm and aarch64. * tag 'for-linus' of git://git.armlinux.org.uk/~rmk/linux-arm: (29 commits) ARM: 9098/1: ftrace: MODULE_PLT: Fix build problem without DYNAMIC_FTRACE ARM: 9097/1: mmu: Declare section start/end correctly ARM: 9096/1: Remove arm_pm_restart() ARM: 9095/1: ARM64: Remove arm_pm_restart() ARM: 9094/1: Register with kernel restart handler ARM: 9093/1: drivers: firmwapsci: Register with kernel restart handler ARM: 9092/1: xen: Register with kernel restart handler ARM: 9091/1: Revert "mm: qsd8x50: Fix incorrect permission faults" ARM: 9090/1: Map the lowmem and kernel separately ARM: 9089/1: Define kernel physical section start and end ARM: 9088/1: Split KERNEL_OFFSET from PAGE_OFFSET ARM: 9087/1: kprobes: test-thumb: fix for LLVM_IAS=1 ARM: 9086/1: syscalls: use pattern rules to generate syscall headers ARM: 9085/1: remove unneeded abi parameter to syscallnr.sh ARM: 9084/1: simplify the build rule of mach-types.h ARM: 9083/1: uncompress: atags_to_fdt: Spelling s/REturn/Return/ ARM: 9082/1: [v2] mark prepare_page_table as __init ARM: 9079/1: ftrace: Add MODULE_PLTS support ARM: 9078/1: Add warn suppress parameter to arm_gen_branch_link() ARM: 9077/1: PLT: Move struct plt_entries definition to header ...
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/abort-ev7.S26
-rw-r--r--arch/arm/mm/mmu.c166
3 files changed, 123 insertions, 71 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 35f43d0aa056..8355c3895894 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -601,8 +601,6 @@ config CPU_TLB_V6
config CPU_TLB_V7
bool
-config VERIFY_PERMISSION_FAULT
- bool
endif
config CPU_HAS_ASID
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index f7cc5d68444b..f81bceacc660 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -17,31 +17,5 @@ ENTRY(v7_early_abort)
mrc p15, 0, r1, c5, c0, 0 @ get FSR
mrc p15, 0, r0, c6, c0, 0 @ get FAR
uaccess_disable ip @ disable userspace access
-
- /*
- * V6 code adjusts the returned DFSR.
- * New designs should not need to patch up faults.
- */
-
-#if defined(CONFIG_VERIFY_PERMISSION_FAULT)
- /*
- * Detect erroneous permission failures and fix
- */
- ldr r3, =0x40d @ On permission fault
- and r3, r1, r3
- cmp r3, #0x0d
- bne do_DataAbort
-
- mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
- isb
- mrc p15, 0, ip, c7, c4, 0 @ Read the PAR
- and r3, ip, #0x7b @ On translation fault
- cmp r3, #0x0b
- bne do_DataAbort
- bic r1, r1, #0xf @ Fix up FSR FS[5:0]
- and ip, ip, #0x7e
- orr r1, r1, ip, LSR #1
-#endif
-
b do_DataAbort
ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c1e12aab67b8..7583bda5ea7d 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1121,31 +1121,32 @@ void __init debug_ll_io_init(void)
}
#endif
-static void * __initdata vmalloc_min =
- (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
+static unsigned long __initdata vmalloc_size = 240 * SZ_1M;
/*
* vmalloc=size forces the vmalloc area to be exactly 'size'
* bytes. This can be used to increase (or decrease) the vmalloc
- * area - the default is 240m.
+ * area - the default is 240MiB.
*/
static int __init early_vmalloc(char *arg)
{
unsigned long vmalloc_reserve = memparse(arg, NULL);
+ unsigned long vmalloc_max;
if (vmalloc_reserve < SZ_16M) {
vmalloc_reserve = SZ_16M;
- pr_warn("vmalloc area too small, limiting to %luMB\n",
+ pr_warn("vmalloc area is too small, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
- if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
- vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
- pr_warn("vmalloc area is too big, limiting to %luMB\n",
+ vmalloc_max = VMALLOC_END - (PAGE_OFFSET + SZ_32M + VMALLOC_OFFSET);
+ if (vmalloc_reserve > vmalloc_max) {
+ vmalloc_reserve = vmalloc_max;
+ pr_warn("vmalloc area is too big, limiting to %luMiB\n",
vmalloc_reserve >> 20);
}
- vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
+ vmalloc_size = vmalloc_reserve;
return 0;
}
early_param("vmalloc", early_vmalloc);
@@ -1165,7 +1166,8 @@ void __init adjust_lowmem_bounds(void)
* and may itself be outside the valid range for which phys_addr_t
* and therefore __pa() is defined.
*/
- vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
+ vmalloc_limit = (u64)VMALLOC_END - vmalloc_size - VMALLOC_OFFSET -
+ PAGE_OFFSET + PHYS_OFFSET;
/*
* The first usable region must be PMD aligned. Mark its start
@@ -1246,7 +1248,7 @@ void __init adjust_lowmem_bounds(void)
memblock_set_current_limit(memblock_limit);
}
-static inline void prepare_page_table(void)
+static __init void prepare_page_table(void)
{
unsigned long addr;
phys_addr_t end;
@@ -1457,8 +1459,6 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
- phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
- phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
phys_addr_t start, end;
u64 i;
@@ -1466,55 +1466,126 @@ static void __init map_lowmem(void)
for_each_mem_range(i, &start, &end) {
struct map_desc map;
+ pr_debug("map lowmem start: 0x%08llx, end: 0x%08llx\n",
+ (long long)start, (long long)end);
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
break;
- if (end < kernel_x_start) {
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY_RWX;
+ /*
+ * If our kernel image is in the VMALLOC area we need to remove
+ * the kernel physical memory from lowmem since the kernel will
+ * be mapped separately.
+ *
+ * The kernel will typically be at the very start of lowmem,
+ * but any placement relative to memory ranges is possible.
+ *
+ * If the memblock contains the kernel, we have to chisel out
+ * the kernel memory from it and map each part separately. We
+ * get 6 different theoretical cases:
+ *
+ * +--------+ +--------+
+ * +-- start --+ +--------+ | Kernel | | Kernel |
+ * | | | Kernel | | case 2 | | case 5 |
+ * | | | case 1 | +--------+ | | +--------+
+ * | Memory | +--------+ | | | Kernel |
+ * | range | +--------+ | | | case 6 |
+ * | | | Kernel | +--------+ | | +--------+
+ * | | | case 3 | | Kernel | | |
+ * +-- end ----+ +--------+ | case 4 | | |
+ * +--------+ +--------+
+ */
- create_mapping(&map);
- } else if (start >= kernel_x_end) {
- map.pfn = __phys_to_pfn(start);
- map.virtual = __phys_to_virt(start);
- map.length = end - start;
- map.type = MT_MEMORY_RW;
+ /* Case 5: kernel covers range, don't map anything, should be rare */
+ if ((start > kernel_sec_start) && (end < kernel_sec_end))
+ break;
- create_mapping(&map);
- } else {
- /* This better cover the entire kernel */
- if (start < kernel_x_start) {
+ /* Cases where the kernel is starting inside the range */
+ if ((kernel_sec_start >= start) && (kernel_sec_start <= end)) {
+ /* Case 6: kernel is embedded in the range, we need two mappings */
+ if ((start < kernel_sec_start) && (end > kernel_sec_end)) {
+ /* Map memory below the kernel */
map.pfn = __phys_to_pfn(start);
map.virtual = __phys_to_virt(start);
- map.length = kernel_x_start - start;
+ map.length = kernel_sec_start - start;
map.type = MT_MEMORY_RW;
-
create_mapping(&map);
- }
-
- map.pfn = __phys_to_pfn(kernel_x_start);
- map.virtual = __phys_to_virt(kernel_x_start);
- map.length = kernel_x_end - kernel_x_start;
- map.type = MT_MEMORY_RWX;
-
- create_mapping(&map);
-
- if (kernel_x_end < end) {
- map.pfn = __phys_to_pfn(kernel_x_end);
- map.virtual = __phys_to_virt(kernel_x_end);
- map.length = end - kernel_x_end;
+ /* Map memory above the kernel */
+ map.pfn = __phys_to_pfn(kernel_sec_end);
+ map.virtual = __phys_to_virt(kernel_sec_end);
+ map.length = end - kernel_sec_end;
map.type = MT_MEMORY_RW;
-
create_mapping(&map);
+ break;
}
+ /* Case 1: kernel and range start at the same address, should be common */
+ if (kernel_sec_start == start)
+ start = kernel_sec_end;
+ /* Case 3: kernel and range end at the same address, should be rare */
+ if (kernel_sec_end == end)
+ end = kernel_sec_start;
+ } else if ((kernel_sec_start < start) && (kernel_sec_end > start) && (kernel_sec_end < end)) {
+ /* Case 2: kernel ends inside range, starts below it */
+ start = kernel_sec_end;
+ } else if ((kernel_sec_start > start) && (kernel_sec_start < end) && (kernel_sec_end > end)) {
+ /* Case 4: kernel starts inside range, ends above it */
+ end = kernel_sec_start;
}
+ map.pfn = __phys_to_pfn(start);
+ map.virtual = __phys_to_virt(start);
+ map.length = end - start;
+ map.type = MT_MEMORY_RW;
+ create_mapping(&map);
}
}
+static void __init map_kernel(void)
+{
+ /*
+ * We use the well known kernel section start and end and split the area in the
+ * middle like this:
+ * . .
+ * | RW memory |
+ * +----------------+ kernel_x_start
+ * | Executable |
+ * | kernel memory |
+ * +----------------+ kernel_x_end / kernel_nx_start
+ * | Non-executable |
+ * | kernel memory |
+ * +----------------+ kernel_nx_end
+ * | RW memory |
+ * . .
+ *
+ * Notice that we are dealing with section sized mappings here so all of this
+ * will be bumped to the closest section boundary. This means that some of the
+ * non-executable part of the kernel memory is actually mapped as executable.
+ * This will only persist until we turn on proper memory management later on
+ * and we remap the whole kernel with page granularity.
+ */
+ phys_addr_t kernel_x_start = kernel_sec_start;
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_nx_start = kernel_x_end;
+ phys_addr_t kernel_nx_end = kernel_sec_end;
+ struct map_desc map;
+
+ map.pfn = __phys_to_pfn(kernel_x_start);
+ map.virtual = __phys_to_virt(kernel_x_start);
+ map.length = kernel_x_end - kernel_x_start;
+ map.type = MT_MEMORY_RWX;
+ create_mapping(&map);
+
+ /* If the nx part is small it may end up covered by the tail of the RWX section */
+ if (kernel_x_end == kernel_nx_end)
+ return;
+
+ map.pfn = __phys_to_pfn(kernel_nx_start);
+ map.virtual = __phys_to_virt(kernel_nx_start);
+ map.length = kernel_nx_end - kernel_nx_start;
+ map.type = MT_MEMORY_RW;
+ create_mapping(&map);
+}
+
#ifdef CONFIG_ARM_PV_FIXUP
typedef void pgtables_remap(long long offset, unsigned long pgd);
pgtables_remap lpae_pgtables_remap_asm;
@@ -1645,9 +1716,18 @@ void __init paging_init(const struct machine_desc *mdesc)
{
void *zero_page;
+ pr_debug("physical kernel sections: 0x%08x-0x%08x\n",
+ kernel_sec_start, kernel_sec_end);
+
prepare_page_table();
map_lowmem();
memblock_set_current_limit(arm_lowmem_limit);
+ pr_debug("lowmem limit is %08llx\n", (long long)arm_lowmem_limit);
+ /*
+ * After this point early_alloc(), i.e. the memblock allocator, can
+ * be used
+ */
+ map_kernel();
dma_contiguous_remap();
early_fixmap_shutdown();
devicemaps_init(mdesc);