diff options
Diffstat (limited to 'arch')
106 files changed, 437 insertions, 747 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 2300e896fd16..3a25f4b6ee24 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -14,6 +14,26 @@ menu "General architecture-dependent options" config CRASH_CORE bool +config CRASH_AUTO_STR + string "Memory reserved for crash kernel" + depends on CRASH_CORE + default "1G-64G:128M,64G-1T:256M,1T-:512M" + help + This configures the reserved memory dependent + on the value of System RAM. The syntax is: + crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset] + range=start-[end] + + For example: + crashkernel=512M-2G:64M,2G-:128M + + This would mean: + + 1) if the RAM is smaller than 512M, then don't reserve anything + (this is the "rescue" case) + 2) if the RAM size is between 512M and 2G (exclusive), then reserve 64M + 3) if the RAM size is larger than 2G, then reserve 128M + config KEXEC_CORE select CRASH_CORE bool @@ -829,6 +849,17 @@ config HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD config HAVE_ARCH_HUGE_VMAP bool +# +# Archs that select this would be capable of PMD-sized vmaps (i.e., +# arch_vmap_pmd_supported() returns true), and they must make no assumptions +# that vmalloc memory is mapped with PAGE_SIZE ptes. The VM_NO_HUGE_VMAP flag +# can be used to prohibit arch-specific allocations from using hugepages to +# help with this (e.g., modules may require it). +# +config HAVE_ARCH_HUGE_VMALLOC + depends on HAVE_ARCH_HUGE_VMAP + bool + config ARCH_WANT_HUGE_PMD_SHARE bool diff --git a/arch/alpha/mm/init.c b/arch/alpha/mm/init.c index 3c42b3147fd6..a97650a618f1 100644 --- a/arch/alpha/mm/init.c +++ b/arch/alpha/mm/init.c @@ -282,5 +282,4 @@ mem_init(void) set_max_mapnr(max_low_pfn); high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); } diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index bc8d6aecfbbd..2d98501c0897 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -6,6 +6,7 @@ config ARC def_bool y select ARC_TIMERS + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_PTE_SPECIAL @@ -28,6 +29,7 @@ config ARC select GENERIC_SMP_IDLE_THREAD select HAVE_ARCH_KGDB select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_KMEMLEAK select HAVE_FUTEX_CMPXCHG if FUTEX @@ -48,9 +50,6 @@ config ARC select HAVE_ARCH_JUMP_LABEL if ISA_ARCV2 && !CPU_ENDIAN_BE32 select SET_FS -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config TRACE_IRQFLAGS_SUPPORT def_bool y @@ -86,10 +85,6 @@ config STACKTRACE_SUPPORT def_bool y select STACKTRACE -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARC_MMU_V4 - menu "ARC Architecture Configuration" menu "ARC Platform/SoC/Board" diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c index ce07e697916c..33832e36bdb7 100644 --- a/arch/arc/mm/init.c +++ b/arch/arc/mm/init.c @@ -194,7 +194,6 @@ void __init mem_init(void) { memblock_free_all(); highmem_init(); - mem_init_print_info(NULL); } #ifdef CONFIG_HIGHMEM diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e6e08d8a45fc..24804f11302d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -31,8 +31,10 @@ config ARM select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT if CPU_V7 select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_HUGETLBFS if ARM_LPAE select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF + select ARCH_USE_MEMTEST select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -76,6 +78,7 @@ config ARM select HAVE_ARCH_SECCOMP_FILTER if AEABI && !OABI_COMPAT select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK + select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARM_LPAE select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 select HAVE_CONTEXT_TRACKING @@ -1510,14 +1513,6 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - depends on ARM_LPAE - -config HAVE_ARCH_TRANSPARENT_HUGEPAGE - def_bool y - depends on ARM_LPAE - config ARCH_WANT_GENERAL_HUGETLB def_bool y diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 2b85d175e999..d4edab51a77c 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -186,8 +186,6 @@ static inline pte_t pte_mkspecial(pte_t pte) #define pmd_write(pmd) (pmd_isclear((pmd), L_PMD_SECT_RDONLY)) #define pmd_dirty(pmd) (pmd_isset((pmd), L_PMD_SECT_DIRTY)) -#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) -#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index c02f24400369..d63a5bb6bd0c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -166,6 +166,9 @@ extern struct page *empty_zero_page; extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; +#define pud_page(pud) pmd_page(__pmd(pud_val(pud))) +#define pud_write(pud) pmd_write(__pmd(pud_val(pud))) + #define pmd_none(pmd) (!pmd_val(pmd)) static inline pte_t *pmd_page_vaddr(pmd_t pmd) diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 44f7292ec27b..f1da3b439b96 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 6a769a6c314e..d8a115de5507 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -8,6 +8,7 @@ #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/shmparam.h> #include <asm/tlbflush.h> diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index eb5d338657d1..bcb485620a05 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -13,6 +13,7 @@ #include <linux/init.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 8356bf1daa28..1ba9f9f9dbd8 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -320,8 +320,6 @@ void __init mem_init(void) free_highpages(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 570fa52eb6f0..f0b17d758912 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -11,6 +11,12 @@ config ARM64 select ACPI_PPTT if ACPI select ARCH_HAS_DEBUG_WX select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2 + select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DMA_PREP_COHERENT @@ -67,10 +73,12 @@ config ARM64 select ARCH_KEEP_MEMBLOCK select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_GNU_PROPERTY + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS select ARCH_SUPPORTS_DEBUG_PAGEALLOC + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_SHADOW_CALL_STACK if CC_HAVE_SHADOW_CALL_STACK select ARCH_SUPPORTS_LTO_CLANG if CPU_LITTLE_ENDIAN @@ -212,6 +220,7 @@ config ARM64 select SWIOTLB select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select HAVE_ARCH_USERFAULTFD_MINOR if USERFAULTFD help ARM 64-bit (AArch64) Linux support. @@ -307,10 +316,7 @@ config ZONE_DMA32 bool "Support DMA32 zone" if EXPERT default y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y config SMP @@ -1069,18 +1075,9 @@ config HW_PERF_EVENTS def_bool y depends on ARM_PMU -config SYS_SUPPORTS_HUGETLBFS - def_bool y - -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y if PGTABLE_LEVELS > 2 - # Supported by clang >= 7.0 config CC_HAVE_SHADOW_CALL_STACK def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18) @@ -1922,14 +1919,6 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on TRANSPARENT_HUGEPAGE - menu "Power management options" source "kernel/power/Kconfig" diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e6c7417bfb92..6d9915d066fa 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -250,8 +250,8 @@ static inline const void *__tag_set(const void *addr, u8 tag) #define arch_init_tags(max_tag) mte_init_tags(max_tag) #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) -#define arch_set_mem_tag_range(addr, size, tag) \ - mte_set_mem_tag_range((addr), (size), (tag)) +#define arch_set_mem_tag_range(addr, size, tag, init) \ + mte_set_mem_tag_range((addr), (size), (tag), (init)) #endif /* CONFIG_KASAN_HW_TAGS */ /* diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index 4acf8bf41cad..ddd4d17cf9a0 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -53,7 +53,8 @@ static inline u8 mte_get_random_tag(void) * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and * size must be non-zero and MTE_GRANULE_SIZE aligned. */ -static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +static inline void mte_set_mem_tag_range(void *addr, size_t size, + u8 tag, bool init) { u64 curr, end; @@ -63,18 +64,27 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) curr = (u64)__tag_set(addr, tag); end = curr + size; - do { - /* - * 'asm volatile' is required to prevent the compiler to move - * the statement outside of the loop. - */ - asm volatile(__MTE_PREAMBLE "stg %0, [%0]" - : - : "r" (curr) - : "memory"); - - curr += MTE_GRANULE_SIZE; - } while (curr != end); + /* + * 'asm volatile' is required to prevent the compiler to move + * the statement outside of the loop. + */ + if (init) { + do { + asm volatile(__MTE_PREAMBLE "stzg %0, [%0]" + : + : "r" (curr) + : "memory"); + curr += MTE_GRANULE_SIZE; + } while (curr != end); + } else { + do { + asm volatile(__MTE_PREAMBLE "stg %0, [%0]" + : + : "r" (curr) + : "memory"); + curr += MTE_GRANULE_SIZE; + } while (curr != end); + } } void mte_enable_kernel_sync(void); @@ -101,7 +111,8 @@ static inline u8 mte_get_random_tag(void) return 0xFF; } -static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +static inline void mte_set_mem_tag_range(void *addr, size_t size, + u8 tag, bool init) { } diff --git a/arch/arm64/include/asm/vmalloc.h b/arch/arm64/include/asm/vmalloc.h index 2ca708ab9b20..7a22aeea9bb5 100644 --- a/arch/arm64/include/asm/vmalloc.h +++ b/arch/arm64/include/asm/vmalloc.h @@ -1,4 +1,28 @@ #ifndef _ASM_ARM64_VMALLOC_H #define _ASM_ARM64_VMALLOC_H +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + /* See arch_vmap_pud_supported() */ + return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +#endif + #endif /* _ASM_ARM64_VMALLOC_H */ diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 5bfd9b87f85d..4ea9392f86e0 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -134,7 +134,7 @@ SYM_FUNC_START(_cpu_resume) */ bl cpu_do_resume -#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) mov x0, sp bl kasan_unpoison_task_stack_below #endif diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c index 55ecf6de9ff7..58987a98e179 100644 --- a/arch/arm64/mm/hugetlbpage.c +++ b/arch/arm64/mm/hugetlbpage.c @@ -252,7 +252,7 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, set_pte(ptep, pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; @@ -284,9 +284,8 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, */ ptep = pte_alloc_map(mm, pmdp, addr); } else if (sz == PMD_SIZE) { - if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && - pud_none(READ_ONCE(*pudp))) - ptep = huge_pmd_share(mm, addr, pudp); + if (want_pmd_share(vma, addr) && pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, vma, addr, pudp); else ptep = (pte_t *)pmd_alloc(mm, pudp, addr); } else if (sz == (CONT_PMD_SIZE)) { diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 6cb22da2e226..0696a459ea95 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -494,8 +494,6 @@ void __init mem_init(void) /* this will put all unused low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can be * detected at build time already. @@ -524,7 +522,7 @@ void free_initmem(void) * prevents the region from being reused for kernel modules, which * is not supported by kallsyms. */ - unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); + vunmap_range((u64)__init_begin, (u64)__init_end); } void dump_mem_limit(void) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index d563335ad43f..70fa3cdbe841 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -1339,27 +1339,6 @@ void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} - -int __init arch_ioremap_pud_supported(void) -{ - /* - * Only 4k granule supports level 1 block mappings. - * SW table walks can't handle removal of intermediate entries. - */ - return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && - !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); -} - -int __init arch_ioremap_pmd_supported(void) -{ - /* See arch_ioremap_pud_supported() */ - return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); -} - int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) { pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); @@ -1451,11 +1430,6 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr) return 1; } -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; /* Don't attempt a block mapping */ -} - #ifdef CONFIG_MEMORY_HOTPLUG static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) { diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig index 34e91224adc3..8de5b987edb9 100644 --- a/arch/csky/Kconfig +++ b/arch/csky/Kconfig @@ -314,7 +314,7 @@ config FORCE_MAX_ZONEORDER int "Maximum zone order" default "11" -config RAM_BASE +config DRAM_BASE hex "DRAM start addr (the same with memory-section in dts)" default 0x0 diff --git a/arch/csky/abiv1/cacheflush.c b/arch/csky/abiv1/cacheflush.c index 9f1fe80cc847..07ff17ea33de 100644 --- a/arch/csky/abiv1/cacheflush.c +++ b/arch/csky/abiv1/cacheflush.c @@ -4,6 +4,7 @@ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <linux/syscalls.h> #include <linux/spinlock.h> #include <asm/page.h> diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 3b91fc3cf36f..ed7451478b1b 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -28,7 +28,7 @@ #define SSEG_SIZE 0x20000000 #define LOWMEM_LIMIT (SSEG_SIZE * 2) -#define PHYS_OFFSET_OFFSET (CONFIG_RAM_BASE & (SSEG_SIZE - 1)) +#define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) #ifndef __ASSEMBLY__ diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c index 894050a8ce09..bf2004aa811a 100644 --- a/arch/csky/mm/init.c +++ b/arch/csky/mm/init.c @@ -107,7 +107,6 @@ void __init mem_init(void) free_highmem_page(page); } #endif - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/h8300/include/asm/bitops.h b/arch/h8300/include/asm/bitops.h index 7aa16c732aa9..c867a80cab5b 100644 --- a/arch/h8300/include/asm/bitops.h +++ b/arch/h8300/include/asm/bitops.h @@ -9,6 +9,10 @@ #include <linux/compiler.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> + #ifdef __KERNEL__ #ifndef _LINUX_BITOPS_H @@ -173,8 +177,4 @@ static inline unsigned long __ffs(unsigned long word) #endif /* __KERNEL__ */ -#include <asm-generic/bitops/fls.h> -#include <asm-generic/bitops/__fls.h> -#include <asm-generic/bitops/fls64.h> - #endif /* _H8300_BITOPS_H */ diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c index 1f3b345d68b9..f7bf4693e3b2 100644 --- a/arch/h8300/mm/init.c +++ b/arch/h8300/mm/init.c @@ -98,6 +98,4 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - - mem_init_print_info(NULL); } diff --git a/arch/hexagon/mm/init.c b/arch/hexagon/mm/init.c index f2e6c868e477..f01e91e10d95 100644 --- a/arch/hexagon/mm/init.c +++ b/arch/hexagon/mm/init.c @@ -55,7 +55,6 @@ void __init mem_init(void) { /* No idea where this is actually declared. Seems to evade LXR. */ memblock_free_all(); - mem_init_print_info(NULL); /* * To-Do: someone somewhere should wipe out the bootmem map diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 2ad7a8d29fcc..279252e3e0f7 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -13,6 +13,8 @@ config IA64 select ARCH_MIGHT_HAVE_PC_SERIO select ACPI select ACPI_NUMA if NUMA + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_SUPPORTS_ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI @@ -32,6 +34,7 @@ config IA64 select TTY select HAVE_ARCH_TRACEHOOK select HAVE_VIRT_CPU_ACCOUNTING + select HUGETLB_PAGE_SIZE_VARIABLE if HUGETLB_PAGE select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_PENDING_IRQ if SMP @@ -82,11 +85,6 @@ config STACKTRACE_SUPPORT config GENERIC_LOCKBREAK def_bool n -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE - default y - config GENERIC_CALIBRATE_DELAY bool default y @@ -250,12 +248,6 @@ config HOTPLUG_CPU can be controlled through /sys/devices/system/cpu/cpu#. Say N if you want to disable CPU hotplug. -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config SCHED_SMT bool "SMT scheduler support" depends on SMP @@ -286,15 +278,6 @@ config FORCE_CPEI_RETARGET config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_DISCONTIGMEM_ENABLE - def_bool y - depends on BROKEN - help - Say Y to support efficient handling of discontiguous physical memory, - for architectures which are either NUMA (Non-Uniform Memory Access) - or have huge holes in the physical address space for other reasons. - See <file:Documentation/vm/numa.rst> for more. - config ARCH_FLATMEM_ENABLE def_bool y @@ -325,22 +308,8 @@ config NODES_SHIFT MAX_NUMNODES will be 2^(This value). If in doubt, use the default. -# VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. -# VIRTUAL_MEM_MAP has been retained for historical reasons. -config VIRTUAL_MEM_MAP - bool "Virtual mem map" - depends on !SPARSEMEM && !FLATMEM - default y - help - Say Y to compile the kernel with support for a virtual mem map. - This code also only takes effect if a memory hole of greater than - 1 Gb is found during boot. You must turn this option on if you - require the DISCONTIGMEM option for your machine. If you are - unsure, say Y. - config HOLES_IN_ZONE bool - default y if VIRTUAL_MEM_MAP config HAVE_ARCH_NODEDATA_EXTENSION def_bool y diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig index c409756b5396..0341a67cc1bf 100644 --- a/arch/ia64/configs/bigsur_defconfig +++ b/arch/ia64/configs/bigsur_defconfig @@ -9,7 +9,6 @@ CONFIG_SGI_PARTITION=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_PREEMPT=y -# CONFIG_VIRTUAL_MEM_MAP is not set CONFIG_IA64_PALINFO=y CONFIG_EFI_VARS=y CONFIG_BINFMT_MISC=m diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig index ca0d596c800d..8916a2850c48 100644 --- a/arch/ia64/configs/generic_defconfig +++ b/arch/ia64/configs/generic_defconfig @@ -55,8 +55,6 @@ CONFIG_CHR_DEV_SG=m CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_SYM53C8XX_2=y CONFIG_SCSI_QLOGIC_1280=y -CONFIG_ATA=y -CONFIG_ATA_PIIX=y CONFIG_SATA_VITESSE=y CONFIG_MD=y CONFIG_BLK_DEV_MD=m diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h index e789c0818edb..6c47a239fc26 100644 --- a/arch/ia64/include/asm/meminit.h +++ b/arch/ia64/include/asm/meminit.h @@ -58,15 +58,4 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end); extern int register_active_ranges(u64 start, u64 len, int nid); -#ifdef CONFIG_VIRTUAL_MEM_MAP - extern unsigned long VMALLOC_END; - extern struct page *vmem_map; - extern int create_mem_map_page_table(u64 start, u64 end, void *arg); - extern int vmemmap_find_next_valid_pfn(int, int); -#else -static inline int vmemmap_find_next_valid_pfn(int node, int i) -{ - return i + 1; -} -#endif #endif /* meminit_h */ diff --git a/arch/ia64/include/asm/module.h b/arch/ia64/include/asm/module.h index 5a29652e6def..7271b9c5fc76 100644 --- a/arch/ia64/include/asm/module.h +++ b/arch/ia64/include/asm/module.h @@ -14,16 +14,20 @@ struct elf64_shdr; /* forward declration */ struct mod_arch_specific { + /* Used only at module load time. */ struct elf64_shdr *core_plt; /* core PLT section */ struct elf64_shdr *init_plt; /* init PLT section */ struct elf64_shdr *got; /* global offset table */ struct elf64_shdr *opd; /* official procedure descriptors */ struct elf64_shdr *unwind; /* unwind-table section */ unsigned long gp; /* global-pointer for module */ + unsigned int next_got_entry; /* index of next available got entry */ + /* Used at module run and cleanup time. */ void *core_unw_table; /* core unwind-table cookie returned by unwinder */ void *init_unw_table; /* init unwind-table cookie returned by unwinder */ - unsigned int next_got_entry; /* index of next available got entry */ + void *opd_addr; /* symbolize uses .opd to get to actual function */ + unsigned long opd_size; }; #define ARCH_SHF_SMALL SHF_IA_64_SHORT diff --git a/arch/ia64/include/asm/page.h b/arch/ia64/include/asm/page.h index b69a5499d75b..f4dc81fa7146 100644 --- a/arch/ia64/include/asm/page.h +++ b/arch/ia64/include/asm/page.h @@ -95,31 +95,10 @@ do { \ #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern int ia64_pfn_valid (unsigned long pfn); -#else -# define ia64_pfn_valid(pfn) 1 -#endif - -#ifdef CONFIG_VIRTUAL_MEM_MAP -extern struct page *vmem_map; -#ifdef CONFIG_DISCONTIGMEM -# define page_to_pfn(page) ((unsigned long) (page - vmem_map)) -# define pfn_to_page(pfn) (vmem_map + (pfn)) -# define __pfn_to_phys(pfn) PFN_PHYS(pfn) -#else -# include <asm-generic/memory_model.h> -#endif -#else -# include <asm-generic/memory_model.h> -#endif +#include <asm-generic/memory_model.h> #ifdef CONFIG_FLATMEM -# define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn)) -#elif defined(CONFIG_DISCONTIGMEM) -extern unsigned long min_low_pfn; -extern unsigned long max_low_pfn; -# define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn)) +# define pfn_valid(pfn) ((pfn) < max_mapnr) #endif #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 9b4efe89e62d..d765fd948fae 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -223,10 +223,6 @@ ia64_phys_addr_valid (unsigned long addr) #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) -#ifdef CONFIG_VIRTUAL_MEM_MAP -# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) -extern unsigned long VMALLOC_END; -#else #if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) /* SPARSEMEM_VMEMMAP uses half of vmalloc... */ # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 10))) @@ -234,7 +230,6 @@ extern unsigned long VMALLOC_END; #else # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) #endif -#endif /* fs/proc/kcore.c */ #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE)) @@ -328,7 +323,7 @@ extern void __ia64_sync_icache_dcache(pte_t pteval); static inline void set_pte(pte_t *ptep, pte_t pteval) { /* page is present && page is user && page is executable - * && (page swapin or new page or page migraton + * && (page swapin or new page or page migration * || copy_on_write with page copying.) */ if (pte_present_exec_user(pteval) && diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 78717819131c..08d4a2ba0652 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -9,7 +9,7 @@ endif extra-y := head.o vmlinux.lds -obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ +obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o pal.o patch.o process.o ptrace.o sal.o \ salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ unwind.o mca.o mca_asm.o topology.o dma-mapping.o iosapic.o acpi.o \ diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a5636524af76..e2af6b172200 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -446,7 +446,8 @@ void __init acpi_numa_fixup(void) if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); - return; + slit_distance(0, 0) = LOCAL_DISTANCE; + goto out; } /* @@ -489,7 +490,7 @@ void __init acpi_numa_fixup(void) for (j = 0; j < MAX_NUMNODES; j++) slit_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; - return; + goto out; } memset(numa_slit, -1, sizeof(numa_slit)); @@ -514,6 +515,8 @@ void __init acpi_numa_fixup(void) printk("\n"); } #endif +out: + node_possible_map = node_online_map; } #endif /* CONFIG_ACPI_NUMA */ diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index c5fe21de46a8..31149e41f9be 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -415,10 +415,10 @@ efi_get_pal_addr (void) mask = ~((1 << IA64_GRANULE_SHIFT) - 1); printk(KERN_INFO "CPU %d: mapping PAL code " - "[0x%lx-0x%lx) into [0x%lx-0x%lx)\n", - smp_processor_id(), md->phys_addr, - md->phys_addr + efi_md_size(md), - vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); + "[0x%llx-0x%llx) into [0x%llx-0x%llx)\n", + smp_processor_id(), md->phys_addr, + md->phys_addr + efi_md_size(md), + vaddr & mask, (vaddr & mask) + IA64_GRANULE_SIZE); #endif return __va(md->phys_addr); } @@ -560,6 +560,7 @@ efi_init (void) { efi_memory_desc_t *md; void *p; + unsigned int i; for (i = 0, p = efi_map_start; p < efi_map_end; ++i, p += efi_desc_size) @@ -586,7 +587,7 @@ efi_init (void) } printk("mem%02d: %s " - "range=[0x%016lx-0x%016lx) (%4lu%s)\n", + "range=[0x%016llx-0x%016llx) (%4lu%s)\n", i, efi_md_typeattr_format(buf, sizeof(buf), md), md->phys_addr, md->phys_addr + efi_md_size(md), size, unit); diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index 0750a716adc7..2094f3249019 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -172,7 +172,7 @@ ENTRY(fsys_gettimeofday) // r25 = itc_lastcycle value // r26 = address clocksource cycle_last // r27 = (not used) - // r28 = sequence number at the beginning of critcal section + // r28 = sequence number at the beginning of critical section // r29 = address of itc_jitter // r30 = time processing flags / memory address // r31 = pointer to result @@ -432,7 +432,7 @@ GLOBAL_ENTRY(fsys_bubble_down) * - r29: psr * * We used to clear some PSR bits here but that requires slow - * serialization. Fortuntely, that isn't really necessary. + * serialization. Fortunately, that isn't really necessary. * The rationale is as follows: we used to clear bits * ~PSR_PRESERVED_BITS in PSR.L. Since * PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 30f1ef760136..f22469f1c1fc 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -33,7 +33,6 @@ #include <asm/mca_asm.h> #include <linux/init.h> #include <linux/linkage.h> -#include <linux/pgtable.h> #include <asm/export.h> #ifdef CONFIG_HOTPLUG_CPU @@ -405,11 +404,6 @@ start_ap: // This is executed by the bootstrap processor (bsp) only: -#ifdef CONFIG_IA64_FW_EMU - // initialize PAL & SAL emulator: - br.call.sptk.many rp=sys_fw_init -.ret1: -#endif br.call.sptk.many rp=start_kernel .ret2: addl r3=@ltoff(halt_msg),gp ;; diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c deleted file mode 100644 index f8150ee74f29..000000000000 --- a/arch/ia64/kernel/ia64_ksyms.c +++ /dev/null @@ -1,12 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Architecture-specific kernel symbols - */ - -#if defined(CONFIG_VIRTUAL_MEM_MAP) || defined(CONFIG_DISCONTIGMEM) -#include <linux/compiler.h> -#include <linux/export.h> -#include <linux/memblock.h> -EXPORT_SYMBOL(min_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic code */ -#endif diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index af310dc8a356..4db9ca144fa5 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -143,7 +143,7 @@ void machine_kexec(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { -#if defined(CONFIG_DISCONTIGMEM) || defined(CONFIG_SPARSEMEM) +#if defined(CONFIG_SPARSEMEM) VMCOREINFO_SYMBOL(pgdat_list); VMCOREINFO_LENGTH(pgdat_list, MAX_NUMNODES); #endif diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index adf6521525f4..cdbac4b52f30 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -109,9 +109,9 @@ #include "irq.h" #if defined(IA64_MCA_DEBUG_INFO) -# define IA64_MCA_DEBUG(fmt...) printk(fmt) +# define IA64_MCA_DEBUG(fmt...) printk(fmt) #else -# define IA64_MCA_DEBUG(fmt...) +# define IA64_MCA_DEBUG(fmt...) do {} while (0) #endif #define NOTIFY_INIT(event, regs, arg, spin) \ diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c index 00a496cb346f..2cba53c1da82 100644 --- a/arch/ia64/kernel/module.c +++ b/arch/ia64/kernel/module.c @@ -905,9 +905,31 @@ register_unwind_table (struct module *mod) int module_finalize (const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *mod) { + struct mod_arch_specific *mas = &mod->arch; + DEBUGP("%s: init: entry=%p\n", __func__, mod->init); - if (mod->arch.unwind) + if (mas->unwind) register_unwind_table(mod); + + /* + * ".opd" was already relocated to the final destination. Store + * it's address for use in symbolizer. + */ + mas->opd_addr = (void *)mas->opd->sh_addr; + mas->opd_size = mas->opd->sh_size; + + /* + * Module relocation was already done at this point. Section + * headers are about to be deleted. Wipe out load-time context. + */ + mas->core_plt = NULL; + mas->init_plt = NULL; + mas->got = NULL; + mas->opd = NULL; + mas->unwind = NULL; + mas->gp = 0; + mas->next_got_entry = 0; + return 0; } @@ -926,10 +948,9 @@ module_arch_cleanup (struct module *mod) void *dereference_module_function_descriptor(struct module *mod, void *ptr) { - Elf64_Shdr *opd = mod->arch.opd; + struct mod_arch_specific *mas = &mod->arch; - if (ptr < (void *)opd->sh_addr || - ptr >= (void *)(opd->sh_addr + opd->sh_size)) + if (ptr < mas->opd_addr || ptr >= mas->opd_addr + mas->opd_size) return ptr; return dereference_function_descriptor(ptr); diff --git a/arch/ia64/kernel/pal.S b/arch/ia64/kernel/pal.S index d3e22c018b68..06d01a070aae 100644 --- a/arch/ia64/kernel/pal.S +++ b/arch/ia64/kernel/pal.S @@ -86,7 +86,7 @@ GLOBAL_ENTRY(ia64_pal_call_static) mov ar.pfs = loc1 mov rp = loc0 ;; - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_static) EXPORT_SYMBOL(ia64_pal_call_static) @@ -194,7 +194,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_static) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_static) EXPORT_SYMBOL(ia64_pal_call_phys_static) @@ -252,7 +252,7 @@ GLOBAL_ENTRY(ia64_pal_call_phys_stacked) mov rp = loc0 ;; mov ar.rsc=loc4 // restore RSE configuration - srlz.d // seralize restoration of psr.l + srlz.d // serialize restoration of psr.l br.ret.sptk.many b0 END(ia64_pal_call_phys_stacked) EXPORT_SYMBOL(ia64_pal_call_phys_stacked) diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index 99a35039b548..c03f63c62ac4 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -7,6 +7,5 @@ obj-y := init.o fault.o tlb.o extable.o ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o -obj-$(CONFIG_DISCONTIGMEM) += discontig.o obj-$(CONFIG_SPARSEMEM) += discontig.o obj-$(CONFIG_FLATMEM) += contig.o diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 62fe80a16f42..42e025cfbd08 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -153,11 +153,7 @@ find_memory (void) efi_memmap_walk(find_max_min_low_pfn, NULL); max_pfn = max_low_pfn; -#ifdef CONFIG_VIRTUAL_MEM_MAP - efi_memmap_walk(filter_memory, register_active_ranges); -#else memblock_add_node(0, PFN_PHYS(max_low_pfn), 0); -#endif find_initrd(); diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 03b3a02375ff..791d4176e4a6 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -95,7 +95,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len, * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been * called yet. Note that node 0 will also count all non-existent cpus. */ -static int __meminit early_nr_cpus_node(int node) +static int early_nr_cpus_node(int node) { int cpu, n = 0; @@ -110,7 +110,7 @@ static int __meminit early_nr_cpus_node(int node) * compute_pernodesize - compute size of pernode data * @node: the node id. */ -static unsigned long __meminit compute_pernodesize(int node) +static unsigned long compute_pernodesize(int node) { unsigned long pernodesize = 0, cpus; @@ -367,7 +367,7 @@ static void __init reserve_pernode_space(void) } } -static void __meminit scatter_node_data(void) +static void scatter_node_data(void) { pg_data_t **dst; int node; @@ -585,25 +585,6 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg) } } -static void __init virtual_map_init(void) -{ -#ifdef CONFIG_VIRTUAL_MEM_MAP - int node; - - VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * - sizeof(struct page)); - vmem_map = (struct page *) VMALLOC_END; - efi_memmap_walk(create_mem_map_page_table, NULL); - printk("Virtual mem_map starts at 0x%p\n", vmem_map); - - for_each_online_node(node) { - unsigned long pfn_offset = mem_data[node].min_pfn; - - NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; - } -#endif -} - /** * paging_init - setup page tables * @@ -619,8 +600,6 @@ void __init paging_init(void) sparse_init(); - virtual_map_init(); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); max_zone_pfns[ZONE_DMA32] = max_dma; max_zone_pfns[ZONE_NORMAL] = max_low_pfn; diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c index cd9766d2b6e0..02de2e70c587 100644 --- a/arch/ia64/mm/fault.c +++ b/arch/ia64/mm/fault.c @@ -84,18 +84,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re if (faulthandler_disabled() || !mm) goto no_context; -#ifdef CONFIG_VIRTUAL_MEM_MAP - /* - * If fault is in region 5 and we are in the kernel, we may already - * have the mmap_lock (pfn_valid macro is called during mmap). There - * is no vma for region 5 addr's anyway, so skip getting the semaphore - * and go directly to the exception handling code. - */ - - if ((REGION_NUMBER(address) == 5) && !user_mode(regs)) - goto bad_area_no_up; -#endif - /* * This is to handle the kprobes on user space access instructions */ @@ -213,9 +201,6 @@ retry: bad_area: mmap_read_unlock(mm); -#ifdef CONFIG_VIRTUAL_MEM_MAP - bad_area_no_up: -#endif if ((isr & IA64_ISR_SP) || ((isr & IA64_ISR_NA) && (isr & IA64_ISR_CODE_MASK) == IA64_ISR_CODE_LFETCH)) { diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index b331f94d20ac..f993cb36c062 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -25,7 +25,8 @@ unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT; EXPORT_SYMBOL(hpage_shift); pte_t * -huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { unsigned long taddr = htlbpage_to_page(addr); pgd_t *pgd; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 16d0d7d22657..064a967a7b6e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -43,13 +43,6 @@ extern void ia64_tlb_init (void); unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; -#ifdef CONFIG_VIRTUAL_MEM_MAP -unsigned long VMALLOC_END = VMALLOC_END_INIT; -EXPORT_SYMBOL(VMALLOC_END); -struct page *vmem_map; -EXPORT_SYMBOL(vmem_map); -#endif - struct page *zero_page_memmap_ptr; /* map entry for zero page */ EXPORT_SYMBOL(zero_page_memmap_ptr); @@ -373,212 +366,6 @@ void ia64_mmu_init(void *my_cpu_data) #endif } -#ifdef CONFIG_VIRTUAL_MEM_MAP -int vmemmap_find_next_valid_pfn(int node, int i) -{ - unsigned long end_address, hole_next_pfn; - unsigned long stop_address; - pg_data_t *pgdat = NODE_DATA(node); - - end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i]; - end_address = PAGE_ALIGN(end_address); - stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)]; - - do { - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - pgd = pgd_offset_k(end_address); - if (pgd_none(*pgd)) { - end_address += PGDIR_SIZE; - continue; - } - - p4d = p4d_offset(pgd, end_address); - if (p4d_none(*p4d)) { - end_address += P4D_SIZE; - continue; - } - - pud = pud_offset(p4d, end_address); - if (pud_none(*pud)) { - end_address += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, end_address); - if (pmd_none(*pmd)) { - end_address += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, end_address); -retry_pte: - if (pte_none(*pte)) { - end_address += PAGE_SIZE; - pte++; - if ((end_address < stop_address) && - (end_address != ALIGN(end_address, 1UL << PMD_SHIFT))) - goto retry_pte; - continue; - } - /* Found next valid vmem_map page */ - break; - } while (end_address < stop_address); - - end_address = min(end_address, stop_address); - end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1; - hole_next_pfn = end_address / sizeof(struct page); - return hole_next_pfn - pgdat->node_start_pfn; -} - -int __init create_mem_map_page_table(u64 start, u64 end, void *arg) -{ - unsigned long address, start_page, end_page; - struct page *map_start, *map_end; - int node; - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - start_page = (unsigned long) map_start & PAGE_MASK; - end_page = PAGE_ALIGN((unsigned long) map_end); - node = paddr_to_nid(__pa(start)); - - for (address = start_page; address < end_page; address += PAGE_SIZE) { - pgd = pgd_offset_k(address); - if (pgd_none(*pgd)) { - p4d = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!p4d) - goto err_alloc; - pgd_populate(&init_mm, pgd, p4d); - } - p4d = p4d_offset(pgd, address); - - if (p4d_none(*p4d)) { - pud = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pud) - goto err_alloc; - p4d_populate(&init_mm, p4d, pud); - } - pud = pud_offset(p4d, address); - - if (pud_none(*pud)) { - pmd = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pmd) - goto err_alloc; - pud_populate(&init_mm, pud, pmd); - } - pmd = pmd_offset(pud, address); - - if (pmd_none(*pmd)) { - pte = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, node); - if (!pte) - goto err_alloc; - pmd_populate_kernel(&init_mm, pmd, pte); - } - pte = pte_offset_kernel(pmd, address); - - if (pte_none(*pte)) { - void *page = memblock_alloc_node(PAGE_SIZE, PAGE_SIZE, - node); - if (!page) - goto err_alloc; - set_pte(pte, pfn_pte(__pa(page) >> PAGE_SHIFT, - PAGE_KERNEL)); - } - } - return 0; - -err_alloc: - panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d\n", - __func__, PAGE_SIZE, PAGE_SIZE, node); - return -ENOMEM; -} - -struct memmap_init_callback_data { - struct page *start; - struct page *end; - int nid; - unsigned long zone; -}; - -static int __meminit -virtual_memmap_init(u64 start, u64 end, void *arg) -{ - struct memmap_init_callback_data *args; - struct page *map_start, *map_end; - - args = (struct memmap_init_callback_data *) arg; - map_start = vmem_map + (__pa(start) >> PAGE_SHIFT); - map_end = vmem_map + (__pa(end) >> PAGE_SHIFT); - - if (map_start < args->start) - map_start = args->start; - if (map_end > args->end) - map_end = args->end; - - /* - * We have to initialize "out of bounds" struct page elements that fit completely - * on the same pages that were allocated for the "in bounds" elements because they - * may be referenced later (and found to be "reserved"). - */ - map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page); - map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end) - / sizeof(struct page)); - - if (map_start < map_end) - memmap_init_range((unsigned long)(map_end - map_start), - args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end), - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - return 0; -} - -void __meminit memmap_init_zone(struct zone *zone) -{ - int nid = zone_to_nid(zone), zone_id = zone_idx(zone); - unsigned long start_pfn = zone->zone_start_pfn; - unsigned long size = zone->spanned_pages; - - if (!vmem_map) { - memmap_init_range(size, nid, zone_id, start_pfn, start_pfn + size, - MEMINIT_EARLY, NULL, MIGRATE_MOVABLE); - } else { - struct page *start; - struct memmap_init_callback_data args; - - start = pfn_to_page(start_pfn); - args.start = start; - args.end = start + size; - args.nid = nid; - args.zone = zone_id; - - efi_memmap_walk(virtual_memmap_init, &args); - } -} - -int -ia64_pfn_valid (unsigned long pfn) -{ - char byte; - struct page *pg = pfn_to_page(pfn); - - return (__get_user(byte, (char __user *) pg) == 0) - && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK)) - || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0)); -} -EXPORT_SYMBOL(ia64_pfn_valid); - -#endif /* CONFIG_VIRTUAL_MEM_MAP */ - int __init register_active_ranges(u64 start, u64 len, int nid) { u64 end = start + len; @@ -644,13 +431,16 @@ mem_init (void) * _before_ any drivers that may need the PCI DMA interface are * initialized or bootmem has been freed. */ + do { #ifdef CONFIG_INTEL_IOMMU - detect_intel_iommu(); - if (!iommu_detected) + detect_intel_iommu(); + if (iommu_detected) + break; #endif #ifdef CONFIG_SWIOTLB swiotlb_init(1); #endif + } while (0); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); @@ -659,7 +449,6 @@ mem_init (void) set_max_mapnr(max_low_pfn); high_memory = __va(max_low_pfn * PAGE_SIZE); memblock_free_all(); - mem_init_print_info(NULL); /* * For fsyscall entrpoints with no light-weight handler, use the ordinary diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index 10133a968c8e..7b414099e5fc 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -440,8 +440,6 @@ static inline unsigned long ffz(unsigned long word) #endif -#include <asm-generic/bitops/find.h> - #ifdef __KERNEL__ #if defined(CONFIG_CPU_HAS_NO_BITFIELDS) @@ -525,10 +523,12 @@ static inline int __fls(int x) #define __clear_bit_unlock clear_bit_unlock #include <asm-generic/bitops/ext2-atomic.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> #include <asm-generic/bitops/hweight.h> +#include <asm-generic/bitops/le.h> #endif /* __KERNEL__ */ +#include <asm-generic/bitops/find.h> + #endif /* _M68K_BITOPS_H */ diff --git a/arch/m68k/mm/init.c b/arch/m68k/mm/init.c index 14c1e541451c..1759ab875d47 100644 --- a/arch/m68k/mm/init.c +++ b/arch/m68k/mm/init.c @@ -153,5 +153,4 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); init_pointer_tables(); - mem_init_print_info(NULL); } diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c index 05cf1fb3f5ff..ab55c70380a5 100644 --- a/arch/microblaze/mm/init.c +++ b/arch/microblaze/mm/init.c @@ -131,7 +131,6 @@ void __init mem_init(void) highmem_setup(); #endif - mem_init_print_info(NULL); mem_init_done = 1; } diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index e9893cd34992..7a174ea61ca5 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -16,8 +16,10 @@ config MIPS select ARCH_SUPPORTS_UPROBES select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if 64BIT + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS + select ARCH_SUPPORTS_HUGETLBFS if CPU_SUPPORTS_HUGEPAGES select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_LD_ORPHAN_WARN @@ -1286,11 +1288,6 @@ config SYS_SUPPORTS_BIG_ENDIAN config SYS_SUPPORTS_LITTLE_ENDIAN bool -config SYS_SUPPORTS_HUGETLBFS - bool - depends on CPU_SUPPORTS_HUGEPAGES - default y - config MIPS_HUGE_TLB_SUPPORT def_bool HUGETLB_PAGE || TRANSPARENT_HUGEPAGE diff --git a/arch/mips/loongson64/numa.c b/arch/mips/loongson64/numa.c index 8315c871c435..fa9b4a487a47 100644 --- a/arch/mips/loongson64/numa.c +++ b/arch/mips/loongson64/numa.c @@ -178,7 +178,6 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } /* All PCI device belongs to logical Node-0 */ diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index 7719d632df8d..a7bf0c80371c 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c @@ -15,6 +15,7 @@ #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/processor.h> diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c index b9f76f433617..7eaff5b07873 100644 --- a/arch/mips/mm/hugetlbpage.c +++ b/arch/mips/mm/hugetlbpage.c @@ -21,8 +21,8 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, - unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pgd; p4d_t *p4d; diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 5cb73bf74a8b..c36358758969 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c @@ -467,7 +467,6 @@ void __init mem_init(void) memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ mem_init_free_highmem(); - mem_init_print_info(NULL); #ifdef CONFIG_64BIT if ((unsigned long) &_text > (unsigned long) CKSEG0) diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c index 87bb6945ec25..6173684b5aaa 100644 --- a/arch/mips/sgi-ip27/ip27-memory.c +++ b/arch/mips/sgi-ip27/ip27-memory.c @@ -420,5 +420,4 @@ void __init mem_init(void) high_memory = (void *) __va(get_num_physpages() << PAGE_SHIFT); memblock_free_all(); setup_zero_pages(); /* This comes from node 0 */ - mem_init_print_info(NULL); } diff --git a/arch/nds32/mm/init.c b/arch/nds32/mm/init.c index fa86f7b2f416..f63f839738c4 100644 --- a/arch/nds32/mm/init.c +++ b/arch/nds32/mm/init.c @@ -191,7 +191,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" diff --git a/arch/nios2/mm/cacheflush.c b/arch/nios2/mm/cacheflush.c index 65de1bd6a760..6aa9257c3ede 100644 --- a/arch/nios2/mm/cacheflush.c +++ b/arch/nios2/mm/cacheflush.c @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <asm/cacheflush.h> #include <asm/cpuinfo.h> diff --git a/arch/nios2/mm/init.c b/arch/nios2/mm/init.c index 61862dbb0e32..613fcaa5988a 100644 --- a/arch/nios2/mm/init.c +++ b/arch/nios2/mm/init.c @@ -71,7 +71,6 @@ void __init mem_init(void) /* this will put all memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); } void __init mmu_init(void) diff --git a/arch/openrisc/mm/init.c b/arch/openrisc/mm/init.c index bf9b2310fc93..d5641198b90c 100644 --- a/arch/openrisc/mm/init.c +++ b/arch/openrisc/mm/init.c @@ -211,8 +211,6 @@ void __init mem_init(void) /* this will put all low memory onto the freelists */ memblock_free_all(); - mem_init_print_info(NULL); - printk("mem_init_done ...........................................\n"); mem_init_done = 1; return; diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index afc3b8d03572..bde9907bc5b2 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -12,6 +12,7 @@ config PARISC select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL select ARCH_NO_SG_CHAIN + select ARCH_SUPPORTS_HUGETLBFS if PA20 select ARCH_SUPPORTS_MEMORY_FAILURE select DMA_OPS select RTC_CLASS @@ -138,10 +139,6 @@ config PGTABLE_LEVELS default 3 if 64BIT && PARISC_PAGE_SIZE_4KB default 2 -config SYS_SUPPORTS_HUGETLBFS - def_bool y if PA20 - - menu "Processor type and features" choice diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c index 43652de5f139..d1d3990b83f6 100644 --- a/arch/parisc/mm/hugetlbpage.c +++ b/arch/parisc/mm/hugetlbpage.c @@ -44,7 +44,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr, } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 9ca4e4ff6895..591a4e939415 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -573,8 +573,6 @@ void __init mem_init(void) #endif parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START); - mem_init_print_info(NULL); - #if 0 /* * Do not expose the virtual kernel memory layout to userspace. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 0b1b6732d8af..ec30434e1ad8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -118,6 +118,8 @@ config PPC # Please keep this list sorted alphabetically. # select ARCH_32BIT_OFF_T if PPC32 + select ARCH_ENABLE_MEMORY_HOTPLUG + select ARCH_ENABLE_MEMORY_HOTREMOVE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -151,6 +153,7 @@ config PPC select ARCH_SUPPORTS_DEBUG_PAGEALLOC if PPC32 || PPC_BOOK3S_64 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS if PPC_QUEUED_SPINLOCKS select ARCH_WANT_IPC_PARSE_VERSION @@ -183,6 +186,7 @@ config PPC select GENERIC_GETTIMEOFDAY select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HUGE_VMAP if PPC_BOOK3S_64 && PPC_RADIX_MMU + select HAVE_ARCH_HUGE_VMALLOC if HAVE_ARCH_HUGE_VMAP select HAVE_ARCH_JUMP_LABEL select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if PPC32 && PPC_PAGE_SHIFT <= 14 @@ -234,6 +238,7 @@ config PPC select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HUGETLB_PAGE_SIZE_VARIABLE if PPC_BOOK3S_64 && HUGETLB_PAGE select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_PAGE_SIZE select HAVE_REGS_AND_STACK_ACCESS_API @@ -418,11 +423,6 @@ config HIGHMEM source "kernel/Kconfig.hz" -config HUGETLB_PAGE_SIZE_VARIABLE - bool - depends on HUGETLB_PAGE && PPC_BOOK3S_64 - default y - config MATH_EMULATION bool "Math emulation" depends on 4xx || PPC_8xx || PPC_MPC832x || BOOKE @@ -518,12 +518,6 @@ config ARCH_CPU_PROBE_RELEASE def_bool y depends on HOTPLUG_CPU -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - config PPC64_SUPPORTS_MEMORY_FAILURE bool "Add support for memory hwpoison" depends on PPC_BOOK3S_64 @@ -703,9 +697,6 @@ config ARCH_SPARSEMEM_DEFAULT def_bool y depends on PPC_BOOK3S_64 -config SYS_SUPPORTS_HUGETLBFS - bool - config ILLEGAL_POINTER_VALUE hex # This is roughly half way between the top of user space and the bottom diff --git a/arch/powerpc/include/asm/vmalloc.h b/arch/powerpc/include/asm/vmalloc.h index b992dfaaa161..4c69ece52a31 100644 --- a/arch/powerpc/include/asm/vmalloc.h +++ b/arch/powerpc/include/asm/vmalloc.h @@ -1,4 +1,24 @@ #ifndef _ASM_POWERPC_VMALLOC_H #define _ASM_POWERPC_VMALLOC_H +#include <asm/mmu.h> +#include <asm/page.h> + +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + /* HPT does not cope with large pages in the vmalloc area */ + return radix_enabled(); +} + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return radix_enabled(); +} + +#endif + #endif /* _ASM_POWERPC_VMALLOC_H */ diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c index 2257d24e6a26..39c625737c09 100644 --- a/arch/powerpc/kernel/isa-bridge.c +++ b/arch/powerpc/kernel/isa-bridge.c @@ -48,7 +48,7 @@ static void remap_isa_base(phys_addr_t pa, unsigned long size) if (slab_is_available()) { if (ioremap_page_range(ISA_IO_BASE, ISA_IO_BASE + size, pa, pgprot_noncached(PAGE_KERNEL))) - unmap_kernel_range(ISA_IO_BASE, size); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + size); } else { early_ioremap_range(ISA_IO_BASE, pa, size, pgprot_noncached(PAGE_KERNEL)); @@ -311,7 +311,7 @@ static void isa_bridge_remove(void) isa_bridge_pcidev = NULL; /* Unmap the ISA area */ - unmap_kernel_range(ISA_IO_BASE, 0x10000); + vunmap_range(ISA_IO_BASE, ISA_IO_BASE + 0x10000); } /** diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c index a211b0253cdb..cdb2d88c54e7 100644 --- a/arch/powerpc/kernel/module.c +++ b/arch/powerpc/kernel/module.c @@ -8,6 +8,7 @@ #include <linux/moduleloader.h> #include <linux/err.h> #include <linux/vmalloc.h> +#include <linux/mm.h> #include <linux/bug.h> #include <asm/module.h> #include <linux/uaccess.h> @@ -87,13 +88,26 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } -#ifdef MODULES_VADDR void *module_alloc(unsigned long size) { + unsigned long start = VMALLOC_START; + unsigned long end = VMALLOC_END; + +#ifdef MODULES_VADDR BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + start = MODULES_VADDR; + end = MODULES_END; +#endif + + /* + * Don't do huge page allocations for modules yet until more testing + * is done. STRICT_MODULE_RWX may require extra work to support this + * too. + */ - return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, GFP_KERNEL, - PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS, NUMA_NO_NODE, + return __vmalloc_node_range(size, 1, start, end, GFP_KERNEL, + PAGE_KERNEL_EXEC, + VM_NO_HUGE_VMAP | VM_FLUSH_RESET_PERMS, + NUMA_NO_NODE, __builtin_return_address(0)); } -#endif diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 9312e6eda7ff..3fb7e572abed 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c @@ -140,7 +140,7 @@ void __iomem *ioremap_phb(phys_addr_t paddr, unsigned long size) addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, paddr, pgprot_noncached(PAGE_KERNEL))) { - unmap_kernel_range(addr, size); + vunmap_range(addr, addr + size); return NULL; } diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index 55f26c0e389e..a6d7c6656172 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -1082,22 +1082,6 @@ void radix__ptep_modify_prot_commit(struct vm_area_struct *vma, set_pte_at(mm, addr, ptep, pte); } -int __init arch_ioremap_pud_supported(void) -{ - /* HPT does not cope with large pages in the vmalloc area */ - return radix_enabled(); -} - -int __init arch_ioremap_pmd_supported(void) -{ - return radix_enabled(); -} - -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; -} - int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) { pte_t *ptep = (pte_t *)pud; @@ -1181,8 +1165,3 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) return 1; } - -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index d142b76d507d..9a75ba078e1b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -106,7 +106,8 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp, * At this point we do the placement change only for BOOK3S 64. This would * possibly work on other subarchs. */ -pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz) +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, unsigned long sz) { pgd_t *pg; p4d_t *p4; diff --git a/arch/powerpc/mm/ioremap.c b/arch/powerpc/mm/ioremap.c index b1a0aebe8c48..57342154d2b0 100644 --- a/arch/powerpc/mm/ioremap.c +++ b/arch/powerpc/mm/ioremap.c @@ -93,7 +93,7 @@ void __iomem *do_ioremap(phys_addr_t pa, phys_addr_t offset, unsigned long size, if (!ret) return (void __iomem *)area->addr + offset; - unmap_kernel_range(va, size); + vunmap_range(va, va + size); free_vm_area(area); return NULL; diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 7a59a5c9aa5d..fd21679e647d 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -312,7 +312,6 @@ void __init mem_init(void) (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; #endif - mem_init_print_info(NULL); #ifdef CONFIG_PPC32 pr_info("Kernel virtual memory layout:\n"); #ifdef CONFIG_KASAN diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 9240743caefc..ff4350a78e47 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -40,8 +40,8 @@ config PPC_85xx config PPC_8xx bool "Freescale 8xx" + select ARCH_SUPPORTS_HUGETLBFS select FSL_SOC - select SYS_SUPPORTS_HUGETLBFS select PPC_HAVE_KUEP select PPC_HAVE_KUAP select HAVE_ARCH_VMAP_STACK @@ -95,9 +95,11 @@ config PPC_BOOK3S_64 bool "Server processors" select PPC_FPU select PPC_HAVE_PMU_SUPPORT - select SYS_SUPPORTS_HUGETLBFS select HAVE_ARCH_TRANSPARENT_HUGEPAGE + select ARCH_ENABLE_HUGEPAGE_MIGRATION if HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_PMD_SPLIT_PTLOCK select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE + select ARCH_SUPPORTS_HUGETLBFS select ARCH_SUPPORTS_NUMA_BALANCING select IRQ_WORK select PPC_MM_SLICES @@ -280,9 +282,9 @@ config FSL_BOOKE # this is for common code between PPC32 & PPC64 FSL BOOKE config PPC_FSL_BOOK3E bool + select ARCH_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select FSL_EMB_PERFMON select PPC_SMP_MUXED_IPI - select SYS_SUPPORTS_HUGETLBFS if PHYS_64BIT || PPC64 select PPC_DOORBELL default y if FSL_BOOKE @@ -357,10 +359,6 @@ config SPE If in doubt, say Y here. -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on PPC_BOOK3S_64 - config PPC_RADIX_MMU bool "Radix MMU Support" depends on PPC_BOOK3S_64 @@ -420,10 +418,6 @@ config PPC_PKEY depends on PPC_BOOK3S_64 depends on PPC_MEM_KEYS || PPC_KUAP || PPC_KUEP -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on PPC_BOOK3S_64 && HUGETLB_PAGE && MIGRATION - config PPC_MMU_NOHASH def_bool y diff --git a/arch/powerpc/sysdev/xive/common.c b/arch/powerpc/sysdev/xive/common.c index 595310e056f4..d6c2069cc828 100644 --- a/arch/powerpc/sysdev/xive/common.c +++ b/arch/powerpc/sysdev/xive/common.c @@ -959,16 +959,12 @@ EXPORT_SYMBOL_GPL(is_xive_irq); void xive_cleanup_irq_data(struct xive_irq_data *xd) { if (xd->eoi_mmio) { - unmap_kernel_range((unsigned long)xd->eoi_mmio, - 1u << xd->esb_shift); iounmap(xd->eoi_mmio); if (xd->eoi_mmio == xd->trig_mmio) xd->trig_mmio = NULL; xd->eoi_mmio = NULL; } if (xd->trig_mmio) { - unmap_kernel_range((unsigned long)xd->trig_mmio, - 1u << xd->esb_shift); iounmap(xd->trig_mmio); xd->trig_mmio = NULL; } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index c338f82428a1..424b7353554d 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -32,6 +32,7 @@ config RISCV select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT + select ARCH_SUPPORTS_HUGETLBFS if MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_HUGE_PMD_SHARE if 64BIT @@ -168,10 +169,6 @@ config ARCH_WANT_GENERAL_HUGETLB config ARCH_SUPPORTS_UPROBES def_bool y -config SYS_SUPPORTS_HUGETLBFS - depends on MMU - def_bool y - config STACKTRACE_SUPPORT def_bool y diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 7f5036fbee8c..3c5ee3b7d811 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -102,7 +102,6 @@ void __init mem_init(void) high_memory = (void *)(__va(PFN_PHYS(max_low_pfn))); memblock_free_all(); - mem_init_print_info(NULL); print_vm_layout(); } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index c1ff874e6c2e..d72989591223 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -60,6 +60,9 @@ config S390 imply IMA_SECURE_AND_OR_TRUSTED_BOOT select ARCH_32BIT_USTAT_F_TINODE select ARCH_BINFMT_ELF_STATE + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM + select ARCH_ENABLE_MEMORY_HOTREMOVE + select ARCH_ENABLE_SPLIT_PMD_PTLOCK select ARCH_HAS_DEBUG_VM_PGTABLE select ARCH_HAS_DEBUG_WX select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -626,15 +629,6 @@ config ARCH_SPARSEMEM_ENABLE config ARCH_SPARSEMEM_DEFAULT def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y if SPARSEMEM - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - config MAX_PHYSMEM_BITS int "Maximum size of supported physical memory in bits (42-53)" range 42 53 diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c index 3b5a4d25ca9b..da36d13ffc16 100644 --- a/arch/s390/mm/hugetlbpage.c +++ b/arch/s390/mm/hugetlbpage.c @@ -189,7 +189,7 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm, return pte; } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgdp; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 0e76b2127dc6..8ac710de1ab1 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -209,8 +209,6 @@ void __init mem_init(void) setup_zero_pages(); /* Setup zeroed pages. */ cmma_init_nodat(); - - mem_init_print_info(NULL); } void free_initmem(void) diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e798e55915c2..68129537e350 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -2,6 +2,8 @@ config SUPERH def_bool y select ARCH_32BIT_OFF_T + select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU + select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU @@ -101,9 +103,6 @@ config SYS_SUPPORTS_APM_EMULATION bool select ARCH_SUSPEND_POSSIBLE -config SYS_SUPPORTS_HUGETLBFS - bool - config SYS_SUPPORTS_SMP bool @@ -175,12 +174,12 @@ config CPU_SH3 config CPU_SH4 bool + select ARCH_SUPPORTS_HUGETLBFS if MMU select CPU_HAS_INTEVT select CPU_HAS_SR_RB select CPU_HAS_FPU if !CPU_SH4AL_DSP select SH_INTC select SYS_SUPPORTS_SH_TMU - select SYS_SUPPORTS_HUGETLBFS if MMU config CPU_SH4A bool diff --git a/arch/sh/include/asm/bitops.h b/arch/sh/include/asm/bitops.h index 450b5854d7c6..3b6c7b5b7ec9 100644 --- a/arch/sh/include/asm/bitops.h +++ b/arch/sh/include/asm/bitops.h @@ -58,15 +58,16 @@ static inline unsigned long __ffs(unsigned long word) return result; } -#include <asm-generic/bitops/find.h> #include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/hweight.h> #include <asm-generic/bitops/lock.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/le.h> #include <asm-generic/bitops/ext2-atomic.h> #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> #include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/find.h> + #endif /* __ASM_SH_BITOPS_H */ diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index 360f713d009b..aeb8915e9254 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h @@ -4,12 +4,11 @@ #ifndef __ASSEMBLY__ #include <linux/pagemap.h> +#include <asm-generic/tlb.h> #ifdef CONFIG_MMU #include <linux/swap.h> -#include <asm-generic/tlb.h> - #if defined(CONFIG_CPU_SH4) extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); extern void tlb_unwire_entry(void); @@ -24,12 +23,7 @@ static inline void tlb_unwire_entry(void) { BUG(); } -#endif - -#else /* CONFIG_MMU */ - -#include <asm-generic/tlb.h> - +#endif /* CONFIG_CPU_SH4 */ #endif /* CONFIG_MMU */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_SH_TLB_H */ diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 77aa2f802d8d..d551a9cac41e 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -136,14 +136,6 @@ config ARCH_SPARSEMEM_DEFAULT config ARCH_SELECT_MEMORY_MODEL def_bool y -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on SPARSEMEM && MMU - -config ARCH_ENABLE_MEMORY_HOTREMOVE - def_bool y - depends on SPARSEMEM && MMU - config ARCH_MEMORY_PROBE def_bool y depends on MEMORY_HOTPLUG diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index ddfa9685f1ef..72c2e1b46c08 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -16,6 +16,7 @@ #include <linux/mutex.h> #include <linux/fs.h> #include <linux/highmem.h> +#include <linux/pagemap.h> #include <asm/mmu_context.h> #include <asm/cache_insns.h> #include <asm/cacheflush.h> diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 4c67b3d88775..9b63a53a5e46 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -13,6 +13,7 @@ #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> +#include <linux/pagemap.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> diff --git a/arch/sh/mm/hugetlbpage.c b/arch/sh/mm/hugetlbpage.c index 220d7bc43d2b..999ab5916e69 100644 --- a/arch/sh/mm/hugetlbpage.c +++ b/arch/sh/mm/hugetlbpage.c @@ -21,7 +21,7 @@ #include <asm/tlbflush.h> #include <asm/cacheflush.h> -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 0db6919af8d3..168d7d4dd735 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -359,7 +359,6 @@ void __init mem_init(void) vsyscall_init(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h index 632cdb959542..a5cf79c149fe 100644 --- a/arch/sparc/include/asm/pgtable_32.h +++ b/arch/sparc/include/asm/pgtable_32.h @@ -321,6 +321,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) pgprot_val(newprot)); } +/* only used by the huge vmap code, should never be called */ +#define pud_page(pud) NULL + struct seq_file; void mmu_info(struct seq_file *m); diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index ad4b42f04988..04d8790f6c32 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -279,7 +279,7 @@ unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&p unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); } unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); } -pte_t *huge_pte_alloc(struct mm_struct *mm, +pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { pgd_t *pgd; diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c index 6139c5700ccc..1e9f577f084d 100644 --- a/arch/sparc/mm/init_32.c +++ b/arch/sparc/mm/init_32.c @@ -292,8 +292,6 @@ void __init mem_init(void) map_high_region(start_pfn, end_pfn); } - - mem_init_print_info(NULL); } void sparc_flush_page_to_ram(struct page *page) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 182bb7bdaa0a..e454f179cf5d 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2520,7 +2520,6 @@ void __init mem_init(void) } mark_page_reserved(mem_map_zero); - mem_init_print_info(NULL); if (tlb_type == cheetah || tlb_type == cheetah_plus) cheetah_ecache_flush_init(); diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index 20ee14739333..9a725547578e 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/swap.h> #include <linux/preempt.h> +#include <linux/pagemap.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> diff --git a/arch/um/kernel/mem.c b/arch/um/kernel/mem.c index 9242dc91d751..9019ff5905b1 100644 --- a/arch/um/kernel/mem.c +++ b/arch/um/kernel/mem.c @@ -54,7 +54,6 @@ void __init mem_init(void) memblock_free_all(); max_low_pfn = totalram_pages(); max_pfn = max_low_pfn; - mem_init_print_info(NULL); kmalloc_ok = 1; } diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 413a60054d11..daecba1ce7a1 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -60,7 +60,13 @@ config X86 select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ARCH_32BIT_OFF_T if X86_32 select ARCH_CLOCKSOURCE_INIT + select ARCH_ENABLE_HUGEPAGE_MIGRATION if x86_64 && HUGETLB_PAGE && MIGRATION + select ARCH_ENABLE_MEMORY_HOTPLUG if X86_64 || (X86_32 && HIGHMEM) + select ARCH_ENABLE_MEMORY_HOTREMOVE if MEMORY_HOTPLUG + select ARCH_ENABLE_SPLIT_PMD_PTLOCK if X86_64 || X86_PAE + select ARCH_ENABLE_THP_MIGRATION if x86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI + select ARCH_HAS_CACHE_LINE_SIZE select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED @@ -100,6 +106,7 @@ config X86 select ARCH_SUPPORTS_LTO_CLANG if X86_64 select ARCH_SUPPORTS_LTO_CLANG_THIN if X86_64 select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_SYM_ANNOTATIONS @@ -164,6 +171,7 @@ config X86 select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 select HAVE_ARCH_USERFAULTFD_WP if X86_64 && USERFAULTFD + select HAVE_ARCH_USERFAULTFD_MINOR if X86_64 && USERFAULTFD select HAVE_ARCH_VMAP_STACK if X86_64 select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET select HAVE_ARCH_WITHIN_STACK_FRAMES @@ -314,9 +322,6 @@ config GENERIC_CALIBRATE_DELAY config ARCH_HAS_CPU_RELAX def_bool y -config ARCH_HAS_CACHE_LINE_SIZE - def_bool y - config ARCH_HAS_FILTER_PGPROT def_bool y @@ -2426,30 +2431,13 @@ config ARCH_HAS_ADD_PAGES def_bool y depends on X86_64 && ARCH_ENABLE_MEMORY_HOTPLUG -config ARCH_ENABLE_MEMORY_HOTPLUG - def_bool y - depends on X86_64 || (X86_32 && HIGHMEM) - -config ARCH_ENABLE_MEMORY_HOTREMOVE +config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE def_bool y - depends on MEMORY_HOTPLUG config USE_PERCPU_NUMA_NODE_ID def_bool y depends on NUMA -config ARCH_ENABLE_SPLIT_PMD_PTLOCK - def_bool y - depends on X86_64 || X86_PAE - -config ARCH_ENABLE_HUGEPAGE_MIGRATION - def_bool y - depends on X86_64 && HUGETLB_PAGE && MIGRATION - -config ARCH_ENABLE_THP_MIGRATION - def_bool y - depends on X86_64 && TRANSPARENT_HUGEPAGE - menu "Power management and ACPI options" config ARCH_HIBERNATION_HEADER diff --git a/arch/x86/include/asm/vmalloc.h b/arch/x86/include/asm/vmalloc.h index 29837740b520..49ce331f3ac6 100644 --- a/arch/x86/include/asm/vmalloc.h +++ b/arch/x86/include/asm/vmalloc.h @@ -1,6 +1,26 @@ #ifndef _ASM_X86_VMALLOC_H #define _ASM_X86_VMALLOC_H +#include <asm/cpufeature.h> +#include <asm/page.h> #include <asm/pgtable_areas.h> +#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP + +#ifdef CONFIG_X86_64 +#define arch_vmap_pud_supported arch_vmap_pud_supported +static inline bool arch_vmap_pud_supported(pgprot_t prot) +{ + return boot_cpu_has(X86_FEATURE_GBPAGES); +} +#endif + +#define arch_vmap_pmd_supported arch_vmap_pmd_supported +static inline bool arch_vmap_pmd_supported(pgprot_t prot) +{ + return boot_cpu_has(X86_FEATURE_PSE); +} + +#endif + #endif /* _ASM_X86_VMALLOC_H */ diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index 56b6865afb2a..d5d8a352eafa 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -115,7 +115,7 @@ SYM_FUNC_START(do_suspend_lowlevel) movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 -#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK +#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK) /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. diff --git a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c index 935af2ac6b1a..05a89e33fde2 100644 --- a/arch/x86/kernel/cpu/resctrl/pseudo_lock.c +++ b/arch/x86/kernel/cpu/resctrl/pseudo_lock.c @@ -1458,7 +1458,7 @@ static int pseudo_lock_dev_release(struct inode *inode, struct file *filp) return 0; } -static int pseudo_lock_dev_mremap(struct vm_area_struct *area, unsigned long flags) +static int pseudo_lock_dev_mremap(struct vm_area_struct *area) { /* Not supported */ return -EINVAL; diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index da31c2635ee4..21ffb03f6c72 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -755,8 +755,6 @@ void __init mem_init(void) after_bootmem = 1; x86_init.hyper.init_after_bootmem(); - mem_init_print_info(NULL); - /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 55247451ba85..e527d829e1ed 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -826,6 +826,106 @@ void __init paging_init(void) zone_sizes_init(); } +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#define PAGE_UNUSED 0xFD + +/* + * The unused vmemmap range, which was not yet memset(PAGE_UNUSED), ranges + * from unused_pmd_start to next PMD_SIZE boundary. + */ +static unsigned long unused_pmd_start __meminitdata; + +static void __meminit vmemmap_flush_unused_pmd(void) +{ + if (!unused_pmd_start) + return; + /* + * Clears (unused_pmd_start, PMD_END] + */ + memset((void *)unused_pmd_start, PAGE_UNUSED, + ALIGN(unused_pmd_start, PMD_SIZE) - unused_pmd_start); + unused_pmd_start = 0; +} + +#ifdef CONFIG_MEMORY_HOTPLUG +/* Returns true if the PMD is completely unused and thus it can be freed */ +static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end) +{ + unsigned long start = ALIGN_DOWN(addr, PMD_SIZE); + + /* + * Flush the unused range cache to ensure that memchr_inv() will work + * for the whole range. + */ + vmemmap_flush_unused_pmd(); + memset((void *)addr, PAGE_UNUSED, end - addr); + + return !memchr_inv((void *)start, PAGE_UNUSED, PMD_SIZE); +} +#endif + +static void __meminit __vmemmap_use_sub_pmd(unsigned long start) +{ + /* + * As we expect to add in the same granularity as we remove, it's + * sufficient to mark only some piece used to block the memmap page from + * getting removed when removing some other adjacent memmap (just in + * case the first memmap never gets initialized e.g., because the memory + * block never gets onlined). + */ + memset((void *)start, 0, sizeof(struct page)); +} + +static void __meminit vmemmap_use_sub_pmd(unsigned long start, unsigned long end) +{ + /* + * We only optimize if the new used range directly follows the + * previously unused range (esp., when populating consecutive sections). + */ + if (unused_pmd_start == start) { + if (likely(IS_ALIGNED(end, PMD_SIZE))) + unused_pmd_start = 0; + else + unused_pmd_start = end; + return; + } + + /* + * If the range does not contiguously follows previous one, make sure + * to mark the unused range of the previous one so it can be removed. + */ + vmemmap_flush_unused_pmd(); + __vmemmap_use_sub_pmd(start); +} + + +static void __meminit vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end) +{ + vmemmap_flush_unused_pmd(); + + /* + * Could be our memmap page is filled with PAGE_UNUSED already from a + * previous remove. Make sure to reset it. + */ + __vmemmap_use_sub_pmd(start); + + /* + * Mark with PAGE_UNUSED the unused parts of the new memmap range + */ + if (!IS_ALIGNED(start, PMD_SIZE)) + memset((void *)start, PAGE_UNUSED, + start - ALIGN_DOWN(start, PMD_SIZE)); + + /* + * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of + * consecutive sections. Remember for the last added PMD where the + * unused range begins. + */ + if (!IS_ALIGNED(end, PMD_SIZE)) + unused_pmd_start = end; +} +#endif + /* * Memory hotplug specific functions */ @@ -871,8 +971,6 @@ int arch_add_memory(int nid, u64 start, u64 size, return add_pages(nid, start_pfn, nr_pages, params); } -#define PAGE_INUSE 0xFD - static void __meminit free_pagetable(struct page *page, int order) { unsigned long magic; @@ -962,7 +1060,6 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, { unsigned long next, pages = 0; pte_t *pte; - void *page_addr; phys_addr_t phys_addr; pte = pte_start + pte_index(addr); @@ -983,42 +1080,15 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, if (phys_addr < (phys_addr_t)0x40000000) return; - if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) { - /* - * Do not free direct mapping pages since they were - * freed when offlining, or simply not in use. - */ - if (!direct) - free_pagetable(pte_page(*pte), 0); - - spin_lock(&init_mm.page_table_lock); - pte_clear(&init_mm, addr, pte); - spin_unlock(&init_mm.page_table_lock); - - /* For non-direct mapping, pages means nothing. */ - pages++; - } else { - /* - * If we are here, we are freeing vmemmap pages since - * direct mapped memory ranges to be freed are aligned. - * - * If we are not removing the whole page, it means - * other page structs in this page are being used and - * we cannot remove them. So fill the unused page_structs - * with 0xFD, and remove the page when it is wholly - * filled with 0xFD. - */ - memset((void *)addr, PAGE_INUSE, next - addr); + if (!direct) + free_pagetable(pte_page(*pte), 0); - page_addr = page_address(pte_page(*pte)); - if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { - free_pagetable(pte_page(*pte), 0); + spin_lock(&init_mm.page_table_lock); + pte_clear(&init_mm, addr, pte); + spin_unlock(&init_mm.page_table_lock); - spin_lock(&init_mm.page_table_lock); - pte_clear(&init_mm, addr, pte); - spin_unlock(&init_mm.page_table_lock); - } - } + /* For non-direct mapping, pages means nothing. */ + pages++; } /* Call free_pte_table() in remove_pmd_table(). */ @@ -1034,7 +1104,6 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, unsigned long next, pages = 0; pte_t *pte_base; pmd_t *pmd; - void *page_addr; pmd = pmd_start + pmd_index(addr); for (; addr < end; addr = next, pmd++) { @@ -1054,22 +1123,16 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); pages++; - } else { - /* If here, we are freeing vmemmap pages. */ - memset((void *)addr, PAGE_INUSE, next - addr); - - page_addr = page_address(pmd_page(*pmd)); - if (!memchr_inv(page_addr, PAGE_INUSE, - PMD_SIZE)) { + } +#ifdef CONFIG_SPARSEMEM_VMEMMAP + else if (vmemmap_pmd_is_unused(addr, next)) { free_hugepage_table(pmd_page(*pmd), altmap); - spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); - } } - +#endif continue; } @@ -1090,7 +1153,6 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, unsigned long next, pages = 0; pmd_t *pmd_base; pud_t *pud; - void *page_addr; pud = pud_start + pud_index(addr); for (; addr < end; addr = next, pud++) { @@ -1099,33 +1161,13 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, if (!pud_present(*pud)) continue; - if (pud_large(*pud)) { - if (IS_ALIGNED(addr, PUD_SIZE) && - IS_ALIGNED(next, PUD_SIZE)) { - if (!direct) - free_pagetable(pud_page(*pud), - get_order(PUD_SIZE)); - - spin_lock(&init_mm.page_table_lock); - pud_clear(pud); - spin_unlock(&init_mm.page_table_lock); - pages++; - } else { - /* If here, we are freeing vmemmap pages. */ - memset((void *)addr, PAGE_INUSE, next - addr); - - page_addr = page_address(pud_page(*pud)); - if (!memchr_inv(page_addr, PAGE_INUSE, - PUD_SIZE)) { - free_pagetable(pud_page(*pud), - get_order(PUD_SIZE)); - - spin_lock(&init_mm.page_table_lock); - pud_clear(pud); - spin_unlock(&init_mm.page_table_lock); - } - } - + if (pud_large(*pud) && + IS_ALIGNED(addr, PUD_SIZE) && + IS_ALIGNED(next, PUD_SIZE)) { + spin_lock(&init_mm.page_table_lock); + pud_clear(pud); + spin_unlock(&init_mm.page_table_lock); + pages++; continue; } @@ -1197,6 +1239,9 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct, void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { + VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); + VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); + remove_pagetable(start, end, false, altmap); } @@ -1306,8 +1351,6 @@ void __init mem_init(void) kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); preallocate_vmalloc_pages(); - - mem_init_print_info(NULL); } #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1538,11 +1581,17 @@ static int __meminit vmemmap_populate_hugepages(unsigned long start, addr_end = addr + PMD_SIZE; p_end = p + PMD_SIZE; + + if (!IS_ALIGNED(addr, PMD_SIZE) || + !IS_ALIGNED(next, PMD_SIZE)) + vmemmap_use_new_sub_pmd(addr, next); + continue; } else if (altmap) return -ENOMEM; /* no fallback */ } else if (pmd_large(*pmd)) { vmemmap_verify((pte_t *)pmd, node, addr, next); + vmemmap_use_sub_pmd(addr, next); continue; } if (vmemmap_populate_basepages(addr, next, node, NULL)) @@ -1556,6 +1605,9 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, { int err; + VM_BUG_ON(!IS_ALIGNED(start, PAGE_SIZE)); + VM_BUG_ON(!IS_ALIGNED(end, PAGE_SIZE)); + if (end - start < PAGES_PER_SECTION * sizeof(struct page)) err = vmemmap_populate_basepages(start, end, node, NULL); else if (boot_cpu_has(X86_FEATURE_PSE)) diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 9e5ccc56f8e0..12c686c65ea9 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -481,25 +481,6 @@ void iounmap(volatile void __iomem *addr) } EXPORT_SYMBOL(iounmap); -int __init arch_ioremap_p4d_supported(void) -{ - return 0; -} - -int __init arch_ioremap_pud_supported(void) -{ -#ifdef CONFIG_X86_64 - return boot_cpu_has(X86_FEATURE_GBPAGES); -#else - return 0; -#endif -} - -int __init arch_ioremap_pmd_supported(void) -{ - return boot_cpu_has(X86_FEATURE_PSE); -} - /* * Convert a physical pointer to a virtual kernel pointer for /dev/mem * access diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 427980617557..156cd235659f 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -16,6 +16,8 @@ #include <linux/pci.h> #include <linux/vmalloc.h> #include <linux/libnvdimm.h> +#include <linux/vmstat.h> +#include <linux/kernel.h> #include <asm/e820/api.h> #include <asm/processor.h> @@ -91,6 +93,12 @@ static void split_page_count(int level) return; direct_pages_count[level]--; + if (system_state == SYSTEM_RUNNING) { + if (level == PG_LEVEL_2M) + count_vm_event(DIRECT_MAP_LEVEL2_SPLIT); + else if (level == PG_LEVEL_1G) + count_vm_event(DIRECT_MAP_LEVEL3_SPLIT); + } direct_pages_count[level - 1] += PTRS_PER_PTE; } diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index f6a9e2e36642..d27cf69e811d 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -780,14 +780,6 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } -/* - * Until we support 512GB pages, skip them in the vmap area. - */ -int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) -{ - return 0; -} - #ifdef CONFIG_X86_64 /** * pud_free_pmd_page - Clear pud entry and free pmd page. @@ -861,11 +853,6 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) #else /* !CONFIG_X86_64 */ -int pud_free_pmd_page(pud_t *pud, unsigned long addr) -{ - return pud_none(*pud); -} - /* * Disable free page handling on x86-PAE. This assures that ioremap() * does not update sync'd pmd entries. See vmalloc_sync_one(). diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 6ad4c1161518..2332b2156993 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -7,6 +7,7 @@ config XTENSA select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU select ARCH_HAS_DMA_SET_UNCACHED if MMU + select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_SPINLOCKS select ARCH_WANT_FRAME_POINTERS diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c index 2daeba9e454e..6a32b2cf2718 100644 --- a/arch/xtensa/mm/init.c +++ b/arch/xtensa/mm/init.c @@ -119,7 +119,6 @@ void __init mem_init(void) memblock_free_all(); - mem_init_print_info(NULL); pr_info("virtual kernel memory layout:\n" #ifdef CONFIG_KASAN " kasan : 0x%08lx - 0x%08lx (%5lu MB)\n" |