From 5355ccbe02da413df22eb05f89ca2da9959f9147 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 15 Jan 2018 17:21:48 +0100 Subject: x86/cpufeature: Reindent _static_cpu_has() Because its daft.. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Borislav Petkov Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 78 +++++++++++++++++++-------------------- 1 file changed, 39 insertions(+), 39 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 70eddb3922ff..910a30699ffb 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -148,45 +148,45 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); */ static __always_inline __pure bool _static_cpu_has(u16 bit) { - asm_volatile_goto("1: jmp 6f\n" - "2:\n" - ".skip -(((5f-4f) - (2b-1b)) > 0) * " - "((5f-4f) - (2b-1b)),0x90\n" - "3:\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 4f - .\n" /* repl offset */ - " .word %P1\n" /* always replace */ - " .byte 3b - 1b\n" /* src len */ - " .byte 5f - 4f\n" /* repl len */ - " .byte 3b - 2b\n" /* pad len */ - ".previous\n" - ".section .altinstr_replacement,\"ax\"\n" - "4: jmp %l[t_no]\n" - "5:\n" - ".previous\n" - ".section .altinstructions,\"a\"\n" - " .long 1b - .\n" /* src offset */ - " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ - " .byte 3b - 1b\n" /* src len */ - " .byte 0\n" /* repl len */ - " .byte 0\n" /* pad len */ - ".previous\n" - ".section .altinstr_aux,\"ax\"\n" - "6:\n" - " testb %[bitnum],%[cap_byte]\n" - " jnz %l[t_yes]\n" - " jmp %l[t_no]\n" - ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS), - [bitnum] "i" (1 << (bit & 7)), - [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) - : : t_yes, t_no); - t_yes: - return true; - t_no: - return false; + asm_volatile_goto("1: jmp 6f\n" + "2:\n" + ".skip -(((5f-4f) - (2b-1b)) > 0) * " + "((5f-4f) - (2b-1b)),0x90\n" + "3:\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 4f - .\n" /* repl offset */ + " .word %P1\n" /* always replace */ + " .byte 3b - 1b\n" /* src len */ + " .byte 5f - 4f\n" /* repl len */ + " .byte 3b - 2b\n" /* pad len */ + ".previous\n" + ".section .altinstr_replacement,\"ax\"\n" + "4: jmp %l[t_no]\n" + "5:\n" + ".previous\n" + ".section .altinstructions,\"a\"\n" + " .long 1b - .\n" /* src offset */ + " .long 0\n" /* no replacement */ + " .word %P0\n" /* feature bit */ + " .byte 3b - 1b\n" /* src len */ + " .byte 0\n" /* repl len */ + " .byte 0\n" /* pad len */ + ".previous\n" + ".section .altinstr_aux,\"ax\"\n" + "6:\n" + " testb %[bitnum],%[cap_byte]\n" + " jnz %l[t_yes]\n" + " jmp %l[t_no]\n" + ".previous\n" + : : "i" (bit), "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), + [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) + : : t_yes, t_no); +t_yes: + return true; +t_no: + return false; } #define static_cpu_has(bit) \ -- cgit v1.2.1 From 3197b04bb39b596613ff2f8143c5cd0a6908debf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 16 Jan 2018 09:34:01 +0100 Subject: x86/cpufeature: Update _static_cpu_has() to use all named variables Because more readable.. Requested-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/include/asm/cpufeature.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 910a30699ffb..736771c9822e 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -156,7 +156,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ " .long 4f - .\n" /* repl offset */ - " .word %P1\n" /* always replace */ + " .word %P[always]\n" /* always replace */ " .byte 3b - 1b\n" /* src len */ " .byte 5f - 4f\n" /* repl len */ " .byte 3b - 2b\n" /* pad len */ @@ -168,7 +168,7 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) ".section .altinstructions,\"a\"\n" " .long 1b - .\n" /* src offset */ " .long 0\n" /* no replacement */ - " .word %P0\n" /* feature bit */ + " .word %P[feature]\n" /* feature bit */ " .byte 3b - 1b\n" /* src len */ " .byte 0\n" /* repl len */ " .byte 0\n" /* pad len */ @@ -179,8 +179,9 @@ static __always_inline __pure bool _static_cpu_has(u16 bit) " jnz %l[t_yes]\n" " jmp %l[t_no]\n" ".previous\n" - : : "i" (bit), "i" (X86_FEATURE_ALWAYS), - [bitnum] "i" (1 << (bit & 7)), + : : [feature] "i" (bit), + [always] "i" (X86_FEATURE_ALWAYS), + [bitnum] "i" (1 << (bit & 7)), [cap_byte] "m" (((const char *)boot_cpu_data.x86_capability)[bit >> 3]) : : t_yes, t_no); t_yes: -- cgit v1.2.1 From c80c5ec1b2fa8d3675fc2a6807a64771ea156698 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 10 Feb 2018 15:53:14 +0100 Subject: x86/MCE: Fix build warning introduced by "x86: do not use print_symbol()" The following commit: 7b6061627eb8 ("x86: do not use print_symbol()") ... introduced a new build warning on 32-bit x86: arch/x86/kernel/cpu/mcheck/mce.c:237:21: warning: cast to pointer from integer of different size [-Wint-to-pointer-cast] pr_cont("{%pS}", (void *)m->ip); ^ Fix the type mismatch between the 'void *' expected by %pS and the mce->ip field which is u64 by casting to long. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-kernel@vger.kernel.org Fixes: 7b6061627eb8 ("x86: do not use print_symbol()") Link: http://lkml.kernel.org/r/20180210145314.22174-1-bp@alien8.de [ Cleaned up the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3a8e88a611eb..75f405ac085c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -234,7 +234,7 @@ static void __print_mce(struct mce *m) m->cs, m->ip); if (m->cs == __KERNEL_CS) - pr_cont("{%pS}", (void *)m->ip); + pr_cont("{%pS}", (void *)(unsigned long)m->ip); pr_cont("\n"); } -- cgit v1.2.1 From a0d0bb4deba831085d3eeb32d39fe73713ce6eb2 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 9 Feb 2018 16:51:03 -0800 Subject: x86/Kconfig: Simplify NR_CPUS config Clean up and simplify the X86 NR_CPUS Kconfig symbol/option by introducing RANGE_BEGIN_CPUS, RANGE_END_CPUS, and DEF_CONFIG_CPUS. Then combine some default values when their conditionals can be reduced. Also move the X86_BIGSMP kconfig option inside an "if X86_32"/"endif" config block and drop its explicit "depends on X86_32". Combine the max. 8192 cases of RANGE_END_CPUS (X86_64 only). Split RANGE_END_CPUS and DEF_CONFIG_CPUS into separate cases for X86_32 and X86_64. Suggested-by: Linus Torvalds Signed-off-by: Randy Dunlap Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/0b833246-ed4b-e451-c426-c4464725be92@infradead.org Link: lkml.kernel.org/r/CA+55aFzOd3j6ZUSkEwTdk85qtt1JywOtm3ZAb-qAvt8_hJ6D4A@mail.gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 57 +++++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 42 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 63bf349b2b24..9d921b78b145 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -423,12 +423,6 @@ config X86_MPPARSE For old smp systems that do not have proper acpi support. Newer systems (esp with 64bit cpus) with acpi support, MADT and DSDT will override it -config X86_BIGSMP - bool "Support for big SMP systems with more than 8 CPUs" - depends on X86_32 && SMP - ---help--- - This option is needed for the systems that have more than 8 CPUs - config GOLDFISH def_bool y depends on X86_GOLDFISH @@ -460,6 +454,12 @@ config INTEL_RDT Say N if unsure. if X86_32 +config X86_BIGSMP + bool "Support for big SMP systems with more than 8 CPUs" + depends on SMP + ---help--- + This option is needed for the systems that have more than 8 CPUs + config X86_EXTENDED_PLATFORM bool "Support for extended (non-PC) x86 platforms" default y @@ -949,17 +949,44 @@ config MAXSMP Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. +config RANGE_END_CPUS + int + depends on X86_32 + default 8 if SMP && !X86_BIGSMP + default 64 if SMP && X86_BIGSMP + default 1 if !SMP + +config RANGE_END_CPUS + int + depends on X86_64 + default 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK + default 8192 if SMP && (MAXSMP || CPUMASK_OFFSTACK) + default 1 if !SMP + +config RANGE_BEGIN_CPUS + int + default 1 if !SMP + default RANGE_END_CPUS if MAXSMP + default 2 + +config DEF_CONFIG_CPUS + int + depends on X86_32 + default 1 if !SMP + default 32 if X86_BIGSMP + default 8 if SMP + +config DEF_CONFIG_CPUS + int + depends on X86_64 + default 1 if !SMP + default 8192 if MAXSMP + default 64 if SMP + config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP - range 2 8 if SMP && X86_32 && !X86_BIGSMP - range 2 64 if SMP && X86_32 && X86_BIGSMP - range 2 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK && X86_64 - range 2 8192 if SMP && !MAXSMP && CPUMASK_OFFSTACK && X86_64 - default "1" if !SMP - default "8192" if MAXSMP - default "32" if SMP && X86_BIGSMP - default "8" if SMP && X86_32 - default "64" if SMP + range RANGE_BEGIN_CPUS RANGE_END_CPUS + default DEF_CONFIG_CPUS ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum -- cgit v1.2.1 From aec6487e994d2f625197970a56a4aac40c2c7547 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Sat, 10 Feb 2018 12:36:29 +0100 Subject: x86/Kconfig: Further simplify the NR_CPUS config Clean up various aspects of the x86 CONFIG_NR_CPUS configuration switches: - Rename the three CONFIG_NR_CPUS related variables to create a common namespace for them: RANGE_BEGIN_CPUS => NR_CPUS_RANGE_BEGIN RANGE_END_CPUS => NR_CPUS_RANGE_END DEF_CONFIG_CPUS => NR_CPUS_DEFAULT - Align them vertically, such as: config NR_CPUS_RANGE_END int depends on X86_64 default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) default 1 if !SMP - Update help text, add more comments. Test results: # i386 allnoconfig: CONFIG_NR_CPUS_RANGE_BEGIN=1 CONFIG_NR_CPUS_RANGE_END=1 CONFIG_NR_CPUS_DEFAULT=1 CONFIG_NR_CPUS=1 # i386 defconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=8 CONFIG_NR_CPUS_DEFAULT=8 CONFIG_NR_CPUS=8 # i386 allyesconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=64 CONFIG_NR_CPUS_DEFAULT=32 CONFIG_NR_CPUS=32 # x86_64 allnoconfig: CONFIG_NR_CPUS_RANGE_BEGIN=1 CONFIG_NR_CPUS_RANGE_END=1 CONFIG_NR_CPUS_DEFAULT=1 CONFIG_NR_CPUS=1 # x86_64 defconfig: CONFIG_NR_CPUS_RANGE_BEGIN=2 CONFIG_NR_CPUS_RANGE_END=512 CONFIG_NR_CPUS_DEFAULT=64 CONFIG_NR_CPUS=64 # x86_64 allyesconfig: CONFIG_NR_CPUS_RANGE_BEGIN=8192 CONFIG_NR_CPUS_RANGE_END=8192 CONFIG_NR_CPUS_DEFAULT=8192 CONFIG_NR_CPUS=8192 Acked-by: Randy Dunlap Acked-by: Linus Torvalds Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180210113629.jcv6su3r4suuno63@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 66 ++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 40 insertions(+), 26 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 9d921b78b145..a528c14d45a5 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -949,52 +949,66 @@ config MAXSMP Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. -config RANGE_END_CPUS +# +# The maximum number of CPUs supported: +# +# The main config value is NR_CPUS, which defaults to NR_CPUS_DEFAULT, +# and which can be configured interactively in the +# [NR_CPUS_RANGE_BEGIN ... NR_CPUS_RANGE_END] range. +# +# The ranges are different on 32-bit and 64-bit kernels, depending on +# hardware capabilities and scalability features of the kernel. +# +# ( If MAXSMP is enabled we just use the highest possible value and disable +# interactive configuration. ) +# + +config NR_CPUS_RANGE_BEGIN int - depends on X86_32 - default 8 if SMP && !X86_BIGSMP - default 64 if SMP && X86_BIGSMP - default 1 if !SMP + default NR_CPUS_RANGE_END if MAXSMP + default 1 if !SMP + default 2 -config RANGE_END_CPUS +config NR_CPUS_RANGE_END int - depends on X86_64 - default 512 if SMP && !MAXSMP && !CPUMASK_OFFSTACK - default 8192 if SMP && (MAXSMP || CPUMASK_OFFSTACK) - default 1 if !SMP + depends on X86_32 + default 64 if SMP && X86_BIGSMP + default 8 if SMP && !X86_BIGSMP + default 1 if !SMP -config RANGE_BEGIN_CPUS +config NR_CPUS_RANGE_END int - default 1 if !SMP - default RANGE_END_CPUS if MAXSMP - default 2 + depends on X86_64 + default 8192 if SMP && ( MAXSMP || CPUMASK_OFFSTACK) + default 512 if SMP && (!MAXSMP && !CPUMASK_OFFSTACK) + default 1 if !SMP -config DEF_CONFIG_CPUS +config NR_CPUS_DEFAULT int depends on X86_32 - default 1 if !SMP - default 32 if X86_BIGSMP - default 8 if SMP + default 32 if X86_BIGSMP + default 8 if SMP + default 1 if !SMP -config DEF_CONFIG_CPUS +config NR_CPUS_DEFAULT int depends on X86_64 - default 1 if !SMP - default 8192 if MAXSMP - default 64 if SMP + default 8192 if MAXSMP + default 64 if SMP + default 1 if !SMP config NR_CPUS int "Maximum number of CPUs" if SMP && !MAXSMP - range RANGE_BEGIN_CPUS RANGE_END_CPUS - default DEF_CONFIG_CPUS + range NR_CPUS_RANGE_BEGIN NR_CPUS_RANGE_END + default NR_CPUS_DEFAULT ---help--- This allows you to specify the maximum number of CPUs which this kernel will support. If CPUMASK_OFFSTACK is enabled, the maximum supported value is 8192, otherwise the maximum value is 512. The minimum value which makes sense is 2. - This is purely to save memory - each supported CPU adds - approximately eight kilobytes to the kernel image. + This is purely to save memory: each supported CPU adds about 8KB + to the kernel image. config SCHED_SMT bool "SMT (Hyperthreading) scheduler support" -- cgit v1.2.1 From 7cc4eb1bdd8b082f3d889daccd9412aa10e56165 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 9 Feb 2018 17:22:25 +0300 Subject: x86/boot/compressed/64: Rename pagetable.c to kaslr_64.c The name of the file -- pagetable.c -- is misleading: it only contains helpers used for KASLR in 64-bit mode. Let's rename the file to reflect its content. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180209142228.21231-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/Makefile | 2 +- arch/x86/boot/compressed/kaslr_64.c | 157 +++++++++++++++++++++++++++++++++++ arch/x86/boot/compressed/pagetable.c | 157 ----------------------------------- 3 files changed, 158 insertions(+), 158 deletions(-) create mode 100644 arch/x86/boot/compressed/kaslr_64.c delete mode 100644 arch/x86/boot/compressed/pagetable.c (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index f25e1530e064..1f734cd98fd3 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -78,7 +78,7 @@ vmlinux-objs-y := $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \ vmlinux-objs-$(CONFIG_EARLY_PRINTK) += $(obj)/early_serial_console.o vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr.o ifdef CONFIG_X86_64 - vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/pagetable.o + vmlinux-objs-$(CONFIG_RANDOMIZE_BASE) += $(obj)/kaslr_64.o vmlinux-objs-y += $(obj)/mem_encrypt.o vmlinux-objs-y += $(obj)/pgtable_64.o endif diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c new file mode 100644 index 000000000000..b5e5e02f8cde --- /dev/null +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This code is used on x86_64 to create page table identity mappings on + * demand by building up a new set of page tables (or appending to the + * existing ones), and then switching over to them when ready. + * + * Copyright (C) 2015-2016 Yinghai Lu + * Copyright (C) 2016 Kees Cook + */ + +/* + * Since we're dealing with identity mappings, physical and virtual + * addresses are the same, so override these defines which are ultimately + * used by the headers in misc.h. + */ +#define __pa(x) ((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long)(x))) + +/* + * The pgtable.h and mm/ident_map.c includes make use of the SME related + * information which is not used in the compressed image support. Un-define + * the SME support to avoid any compile and link errors. + */ +#undef CONFIG_AMD_MEM_ENCRYPT + +/* No PAGE_TABLE_ISOLATION support needed either: */ +#undef CONFIG_PAGE_TABLE_ISOLATION + +#include "misc.h" + +/* These actually do the work of building the kernel identity maps. */ +#include +#include +/* Use the static base for this part of the boot process */ +#undef __PAGE_OFFSET +#define __PAGE_OFFSET __PAGE_OFFSET_BASE +#include "../../mm/ident_map.c" + +/* Used by pgtable.h asm code to force instruction serialization. */ +unsigned long __force_order; + +/* Used to track our page table allocation area. */ +struct alloc_pgt_data { + unsigned char *pgt_buf; + unsigned long pgt_buf_size; + unsigned long pgt_buf_offset; +}; + +/* + * Allocates space for a page table entry, using struct alloc_pgt_data + * above. Besides the local callers, this is used as the allocation + * callback in mapping_info below. + */ +static void *alloc_pgt_page(void *context) +{ + struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; + unsigned char *entry; + + /* Validate there is space available for a new page. */ + if (pages->pgt_buf_offset >= pages->pgt_buf_size) { + debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); + debug_putaddr(pages->pgt_buf_offset); + debug_putaddr(pages->pgt_buf_size); + return NULL; + } + + entry = pages->pgt_buf + pages->pgt_buf_offset; + pages->pgt_buf_offset += PAGE_SIZE; + + return entry; +} + +/* Used to track our allocated page tables. */ +static struct alloc_pgt_data pgt_data; + +/* The top level page table entry pointer. */ +static unsigned long top_level_pgt; + +/* + * Mapping information structure passed to kernel_ident_mapping_init(). + * Due to relocation, pointers must be assigned at run time not build time. + */ +static struct x86_mapping_info mapping_info; + +/* Locates and clears a region for a new top level page table. */ +void initialize_identity_maps(void) +{ + unsigned long sev_me_mask = get_sev_encryption_mask(); + + /* Init mapping_info with run-time function/buffer pointers. */ + mapping_info.alloc_pgt_page = alloc_pgt_page; + mapping_info.context = &pgt_data; + mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; + mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; + + /* + * It should be impossible for this not to already be true, + * but since calling this a second time would rewind the other + * counters, let's just make sure this is reset too. + */ + pgt_data.pgt_buf_offset = 0; + + /* + * If we came here via startup_32(), cr3 will be _pgtable already + * and we must append to the existing area instead of entirely + * overwriting it. + * + * With 5-level paging, we use '_pgtable' to allocate the p4d page table, + * the top-level page table is allocated separately. + * + * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level + * cases. On 4-level paging it's equal to 'top_level_pgt'. + */ + top_level_pgt = read_cr3_pa(); + if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { + debug_putstr("booted via startup_32()\n"); + pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; + pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; + memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); + } else { + debug_putstr("booted via startup_64()\n"); + pgt_data.pgt_buf = _pgtable; + pgt_data.pgt_buf_size = BOOT_PGT_SIZE; + memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); + top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); + } +} + +/* + * Adds the specified range to what will become the new identity mappings. + * Once all ranges have been added, the new mapping is activated by calling + * finalize_identity_maps() below. + */ +void add_identity_map(unsigned long start, unsigned long size) +{ + unsigned long end = start + size; + + /* Align boundary to 2M. */ + start = round_down(start, PMD_SIZE); + end = round_up(end, PMD_SIZE); + if (start >= end) + return; + + /* Build the mapping. */ + kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, + start, end); +} + +/* + * This switches the page tables to the new level4 that has been built + * via calls to add_identity_map() above. If booted via startup_32(), + * this is effectively a no-op. + */ +void finalize_identity_maps(void) +{ + write_cr3(top_level_pgt); +} diff --git a/arch/x86/boot/compressed/pagetable.c b/arch/x86/boot/compressed/pagetable.c deleted file mode 100644 index b5e5e02f8cde..000000000000 --- a/arch/x86/boot/compressed/pagetable.c +++ /dev/null @@ -1,157 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * This code is used on x86_64 to create page table identity mappings on - * demand by building up a new set of page tables (or appending to the - * existing ones), and then switching over to them when ready. - * - * Copyright (C) 2015-2016 Yinghai Lu - * Copyright (C) 2016 Kees Cook - */ - -/* - * Since we're dealing with identity mappings, physical and virtual - * addresses are the same, so override these defines which are ultimately - * used by the headers in misc.h. - */ -#define __pa(x) ((unsigned long)(x)) -#define __va(x) ((void *)((unsigned long)(x))) - -/* - * The pgtable.h and mm/ident_map.c includes make use of the SME related - * information which is not used in the compressed image support. Un-define - * the SME support to avoid any compile and link errors. - */ -#undef CONFIG_AMD_MEM_ENCRYPT - -/* No PAGE_TABLE_ISOLATION support needed either: */ -#undef CONFIG_PAGE_TABLE_ISOLATION - -#include "misc.h" - -/* These actually do the work of building the kernel identity maps. */ -#include -#include -/* Use the static base for this part of the boot process */ -#undef __PAGE_OFFSET -#define __PAGE_OFFSET __PAGE_OFFSET_BASE -#include "../../mm/ident_map.c" - -/* Used by pgtable.h asm code to force instruction serialization. */ -unsigned long __force_order; - -/* Used to track our page table allocation area. */ -struct alloc_pgt_data { - unsigned char *pgt_buf; - unsigned long pgt_buf_size; - unsigned long pgt_buf_offset; -}; - -/* - * Allocates space for a page table entry, using struct alloc_pgt_data - * above. Besides the local callers, this is used as the allocation - * callback in mapping_info below. - */ -static void *alloc_pgt_page(void *context) -{ - struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context; - unsigned char *entry; - - /* Validate there is space available for a new page. */ - if (pages->pgt_buf_offset >= pages->pgt_buf_size) { - debug_putstr("out of pgt_buf in " __FILE__ "!?\n"); - debug_putaddr(pages->pgt_buf_offset); - debug_putaddr(pages->pgt_buf_size); - return NULL; - } - - entry = pages->pgt_buf + pages->pgt_buf_offset; - pages->pgt_buf_offset += PAGE_SIZE; - - return entry; -} - -/* Used to track our allocated page tables. */ -static struct alloc_pgt_data pgt_data; - -/* The top level page table entry pointer. */ -static unsigned long top_level_pgt; - -/* - * Mapping information structure passed to kernel_ident_mapping_init(). - * Due to relocation, pointers must be assigned at run time not build time. - */ -static struct x86_mapping_info mapping_info; - -/* Locates and clears a region for a new top level page table. */ -void initialize_identity_maps(void) -{ - unsigned long sev_me_mask = get_sev_encryption_mask(); - - /* Init mapping_info with run-time function/buffer pointers. */ - mapping_info.alloc_pgt_page = alloc_pgt_page; - mapping_info.context = &pgt_data; - mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; - mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; - - /* - * It should be impossible for this not to already be true, - * but since calling this a second time would rewind the other - * counters, let's just make sure this is reset too. - */ - pgt_data.pgt_buf_offset = 0; - - /* - * If we came here via startup_32(), cr3 will be _pgtable already - * and we must append to the existing area instead of entirely - * overwriting it. - * - * With 5-level paging, we use '_pgtable' to allocate the p4d page table, - * the top-level page table is allocated separately. - * - * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level - * cases. On 4-level paging it's equal to 'top_level_pgt'. - */ - top_level_pgt = read_cr3_pa(); - if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) { - debug_putstr("booted via startup_32()\n"); - pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE; - pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE; - memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); - } else { - debug_putstr("booted via startup_64()\n"); - pgt_data.pgt_buf = _pgtable; - pgt_data.pgt_buf_size = BOOT_PGT_SIZE; - memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size); - top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data); - } -} - -/* - * Adds the specified range to what will become the new identity mappings. - * Once all ranges have been added, the new mapping is activated by calling - * finalize_identity_maps() below. - */ -void add_identity_map(unsigned long start, unsigned long size) -{ - unsigned long end = start + size; - - /* Align boundary to 2M. */ - start = round_down(start, PMD_SIZE); - end = round_up(end, PMD_SIZE); - if (start >= end) - return; - - /* Build the mapping. */ - kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt, - start, end); -} - -/* - * This switches the page tables to the new level4 that has been built - * via calls to add_identity_map() above. If booted via startup_32(), - * this is effectively a no-op. - */ -void finalize_identity_maps(void) -{ - write_cr3(top_level_pgt); -} -- cgit v1.2.1 From 4440977be1347d43503f381716e4918413b5a6f0 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 9 Feb 2018 17:22:26 +0300 Subject: x86/boot/compressed/64: Introduce paging_prepare() Rename l5_paging_required() to paging_prepare() and change the interface of the function. This is a preparation for the next patch, which would make the function also allocate memory for the 32-bit trampoline. The function now returns a 128-bit structure. RAX would return trampoline memory address (zero for now) and RDX would indicate if we need to enable 5-level paging. Signed-off-by: Kirill A. Shutemov [ Typo fixes and general clarification. ] Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Cyrill Gorcunov Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180209142228.21231-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 41 ++++++++++++++++------------------- arch/x86/boot/compressed/pgtable_64.c | 25 ++++++++++----------- 2 files changed, 31 insertions(+), 35 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index fc313e29fe2c..d598d65db32c 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -304,20 +304,6 @@ ENTRY(startup_64) /* Set up the stack */ leaq boot_stack_end(%rbx), %rsp -#ifdef CONFIG_X86_5LEVEL - /* - * Check if we need to enable 5-level paging. - * RSI holds real mode data and need to be preserved across - * a function call. - */ - pushq %rsi - call l5_paging_required - popq %rsi - - /* If l5_paging_required() returned zero, we're done here. */ - cmpq $0, %rax - je lvl5 - /* * At this point we are in long mode with 4-level paging enabled, * but we want to enable 5-level paging. @@ -325,12 +311,28 @@ ENTRY(startup_64) * The problem is that we cannot do it directly. Setting LA57 in * long mode would trigger #GP. So we need to switch off long mode * first. + */ + + /* + * paging_prepare() sets up the trampoline and checks if we need to + * enable 5-level paging. * - * NOTE: This is not going to work if bootloader put us above 4G - * limit. + * Address of the trampoline is returned in RAX. + * Non zero RDX on return means we need to enable 5-level paging. * - * The first step is go into compatibility mode. + * RSI holds real mode data and needs to be preserved across + * this function call. */ + pushq %rsi + call paging_prepare + popq %rsi + + /* Save the trampoline address in RCX */ + movq %rax, %rcx + + /* Check if we need to enable 5-level paging */ + cmpq $0, %rdx + jz lvl5 /* Clear additional page table */ leaq lvl5_pgtable(%rbx), %rdi @@ -352,7 +354,6 @@ ENTRY(startup_64) pushq %rax lretq lvl5: -#endif /* Zero EFLAGS */ pushq $0 @@ -490,7 +491,6 @@ relocated: jmp *%rax .code32 -#ifdef CONFIG_X86_5LEVEL compatible_mode: /* Setup data and stack segments */ movl $__KERNEL_DS, %eax @@ -526,7 +526,6 @@ compatible_mode: movl %eax, %cr0 lret -#endif no_longmode: /* This isn't an x86-64 CPU so hang */ @@ -585,7 +584,5 @@ boot_stack_end: .balign 4096 pgtable: .fill BOOT_PGT_SIZE, 1, 0 -#ifdef CONFIG_X86_5LEVEL lvl5_pgtable: .fill PAGE_SIZE, 1, 0 -#endif diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index b4469a37e9a1..3f1697fcc7a8 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -9,20 +9,19 @@ */ unsigned long __force_order; -int l5_paging_required(void) -{ - /* Check if leaf 7 is supported. */ - - if (native_cpuid_eax(0) < 7) - return 0; +struct paging_config { + unsigned long trampoline_start; + unsigned long l5_required; +}; - /* Check if la57 is supported. */ - if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) - return 0; +struct paging_config paging_prepare(void) +{ + struct paging_config paging_config = {}; - /* Check if 5-level paging has already been enabled. */ - if (native_read_cr4() & X86_CR4_LA57) - return 0; + /* Check if LA57 is desired and supported */ + if (IS_ENABLED(CONFIG_X86_5LEVEL) && native_cpuid_eax(0) >= 7 && + (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + paging_config.l5_required = 1; - return 1; + return paging_config; } -- cgit v1.2.1 From 595dd46ebfc10be041a365d0a3fa99df50b6ba73 Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 12 Feb 2018 22:44:53 +0800 Subject: vfs/proc/kcore, x86/mm/kcore: Fix SMAP fault when dumping vsyscall user page Commit: df04abfd181a ("fs/proc/kcore.c: Add bounce buffer for ktext data") ... introduced a bounce buffer to work around CONFIG_HARDENED_USERCOPY=y. However, accessing the vsyscall user page will cause an SMAP fault. Replace memcpy() with copy_from_user() to fix this bug works, but adding a common way to handle this sort of user page may be useful for future. Currently, only vsyscall page requires KCORE_USER. Signed-off-by: Jia Zhang Reviewed-by: Jiri Olsa Cc: Al Viro Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1518446694-21124-2-git-send-email-zhang.jia@linux.alibaba.com Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ab42c852069..6aa33d1e198f 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1193,8 +1193,7 @@ void __init mem_init(void) register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, - PAGE_SIZE, KCORE_OTHER); + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } -- cgit v1.2.1 From cd026ca2861e7f384d677626a483da797c76b9da Mon Sep 17 00:00:00 2001 From: Jia Zhang Date: Mon, 12 Feb 2018 22:44:54 +0800 Subject: x86/mm/kcore: Add vsyscall page to /proc/kcore conditionally The vsyscall page should be visible only if vsyscall=emulate/native when dumping /proc/kcore. Signed-off-by: Jia Zhang Reviewed-by: Jiri Olsa Cc: Al Viro Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jolsa@redhat.com Link: http://lkml.kernel.org/r/1518446694-21124-3-git-send-email-zhang.jia@linux.alibaba.com Signed-off-by: Ingo Molnar --- arch/x86/mm/init_64.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6aa33d1e198f..8ba9c3128947 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -1193,7 +1193,8 @@ void __init mem_init(void) register_page_bootmem_info(); /* Register memory areas for /proc/kcore */ - kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); + if (get_gate_vma(&init_mm)) + kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR, PAGE_SIZE, KCORE_USER); mem_init_print_info(NULL); } -- cgit v1.2.1 From 295cc7eb314eb3321fb6d67ca6f7305f5c50d10f Mon Sep 17 00:00:00 2001 From: Masayoshi Mizuma Date: Thu, 8 Feb 2018 09:19:08 -0500 Subject: x86/smpboot: Fix uncore_pci_remove() indexing bug when hot-removing a physical CPU When a physical CPU is hot-removed, the following warning messages are shown while the uncore device is removed in uncore_pci_remove(): WARNING: CPU: 120 PID: 5 at arch/x86/events/intel/uncore.c:988 uncore_pci_remove+0xf1/0x110 ... CPU: 120 PID: 5 Comm: kworker/u1024:0 Not tainted 4.15.0-rc8 #1 Workqueue: kacpi_hotplug acpi_hotplug_work_fn ... Call Trace: pci_device_remove+0x36/0xb0 device_release_driver_internal+0x145/0x210 pci_stop_bus_device+0x76/0xa0 pci_stop_root_bus+0x44/0x60 acpi_pci_root_remove+0x1f/0x80 acpi_bus_trim+0x54/0x90 acpi_bus_trim+0x2e/0x90 acpi_device_hotplug+0x2bc/0x4b0 acpi_hotplug_work_fn+0x1a/0x30 process_one_work+0x141/0x340 worker_thread+0x47/0x3e0 kthread+0xf5/0x130 When uncore_pci_remove() runs, it tries to get the package ID to clear the value of uncore_extra_pci_dev[].dev[] by using topology_phys_to_logical_pkg(). The warning messesages are shown because topology_phys_to_logical_pkg() returns -1. arch/x86/events/intel/uncore.c: static void uncore_pci_remove(struct pci_dev *pdev) { ... phys_id = uncore_pcibus_to_physid(pdev->bus); ... pkg = topology_phys_to_logical_pkg(phys_id); // returns -1 for (i = 0; i < UNCORE_EXTRA_PCI_DEV_MAX; i++) { if (uncore_extra_pci_dev[pkg].dev[i] == pdev) { uncore_extra_pci_dev[pkg].dev[i] = NULL; break; } } WARN_ON_ONCE(i >= UNCORE_EXTRA_PCI_DEV_MAX); // <=========== HERE!! topology_phys_to_logical_pkg() tries to find cpuinfo_x86->phys_proc_id that matches the phys_pkg argument. arch/x86/kernel/smpboot.c: int topology_phys_to_logical_pkg(unsigned int phys_pkg) { int cpu; for_each_possible_cpu(cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); if (c->initialized && c->phys_proc_id == phys_pkg) return c->logical_proc_id; } return -1; } However, the phys_proc_id was already set to 0 by remove_siblinginfo() when the CPU was offlined. So, topology_phys_to_logical_pkg() cannot find the correct logical_proc_id and always returns -1. As the result, uncore_pci_remove() calls WARN_ON_ONCE() and the warning messages are shown. What is worse is that the bogus 'pkg' index results in two bugs: - We dereference uncore_extra_pci_dev[] with a negative index - We fail to clean up a stale pointer in uncore_extra_pci_dev[][] To fix these bugs, remove the clearing of ->phys_proc_id from remove_siblinginfo(). This should not cause any problems, because ->phys_proc_id is not used after it is hot-removed and it is re-set while hot-adding. Signed-off-by: Masayoshi Mizuma Acked-by: Thomas Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: yasu.isimatu@gmail.com Cc: Fixes: 30bb9811856f ("x86/topology: Avoid wasting 128k for package id array") Link: http://lkml.kernel.org/r/ed738d54-0f01-b38b-b794-c31dc118c207@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 6f27facbaa9b..cfc61e1d45e2 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1430,7 +1430,6 @@ static void remove_siblinginfo(int cpu) cpumask_clear(cpu_llc_shared_mask(cpu)); cpumask_clear(topology_sibling_cpumask(cpu)); cpumask_clear(topology_core_cpumask(cpu)); - c->phys_proc_id = 0; c->cpu_core_id = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); -- cgit v1.2.1 From a06cc94f3f8dfab74fe7fac3a6e9f15d77566d00 Mon Sep 17 00:00:00 2001 From: Cao jin Date: Thu, 8 Feb 2018 14:38:57 +0800 Subject: x86/build: Drop superfluous ALIGN from the linker script ALIGN(8) is superfluous since macro TEXT_TEXT already has one. bonus cleanups: - indentation fix - spaces -> tab. Signed-off-by: Cao jin Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180208063857.15197-1-caoj.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/vmlinux.lds.S | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9b138a06c1a4..1c43a2e839fa 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -102,7 +102,6 @@ SECTIONS _stext = .; /* bootstrapping code */ HEAD_TEXT - . = ALIGN(8); TEXT_TEXT SCHED_TEXT CPUIDLE_TEXT @@ -198,7 +197,7 @@ SECTIONS . = __vvar_beginning_hack + PAGE_SIZE; } :data - . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); + . = ALIGN(__vvar_page + PAGE_SIZE, PAGE_SIZE); /* Init code and data - will be freed after init */ . = ALIGN(PAGE_SIZE); @@ -366,8 +365,8 @@ SECTIONS . = ALIGN(PAGE_SIZE); /* keep VO_INIT_SIZE page aligned */ _end = .; - STABS_DEBUG - DWARF_DEBUG + STABS_DEBUG + DWARF_DEBUG /* Sections to be discarded */ DISCARDS -- cgit v1.2.1 From 74eb816b21d520ce37ce8aaf03128ca6067bbe22 Mon Sep 17 00:00:00 2001 From: Progyan Bhattacharya Date: Tue, 6 Feb 2018 10:45:23 +0530 Subject: x86/build: Add arch/x86/tools/insn_decoder_test to .gitignore The file was generated by make command and should not be in the source tree. Signed-off-by: Progyan Bhattacharya Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/.gitignore | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/.gitignore b/arch/x86/.gitignore index aff152c87cf4..5a82bac5e0bc 100644 --- a/arch/x86/.gitignore +++ b/arch/x86/.gitignore @@ -1,6 +1,7 @@ boot/compressed/vmlinux tools/test_get_len tools/insn_sanity +tools/insn_decoder_test purgatory/kexec-purgatory.c purgatory/purgatory.ro -- cgit v1.2.1 From c25d99d20ba69824a1e2cc118e04b877cd427afc Mon Sep 17 00:00:00 2001 From: "mike.travis@hpe.com" Date: Mon, 5 Feb 2018 16:15:04 -0600 Subject: x86/platform/UV: Fix GAM Range Table entries less than 1GB The latest UV platforms include the new ApachePass NVDIMMs into the UV address space. This has introduced address ranges in the Global Address Map Table that are less than the previous lowest range, which was 2GB. Fix the address calculation so it accommodates address ranges from bytes to exabytes. Signed-off-by: Mike Travis Reviewed-by: Andrew Banman Reviewed-by: Dimitri Sivanich Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russ Anderson Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180205221503.190219903@stormcage.americas.sgi.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_uv_x.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 46b675aaf20b..f11910b44638 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c @@ -1176,16 +1176,25 @@ static void __init decode_gam_rng_tbl(unsigned long ptr) uv_gre_table = gre; for (; gre->type != UV_GAM_RANGE_TYPE_UNUSED; gre++) { + unsigned long size = ((unsigned long)(gre->limit - lgre) + << UV_GAM_RANGE_SHFT); + int order = 0; + char suffix[] = " KMGTPE"; + + while (size > 9999 && order < sizeof(suffix)) { + size /= 1024; + order++; + } + if (!index) { pr_info("UV: GAM Range Table...\n"); pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN"); } - pr_info("UV: %2d: 0x%014lx-0x%014lx %5luG %3d %04x %02x %02x\n", + pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n", index++, (unsigned long)lgre << UV_GAM_RANGE_SHFT, (unsigned long)gre->limit << UV_GAM_RANGE_SHFT, - ((unsigned long)(gre->limit - lgre)) >> - (30 - UV_GAM_RANGE_SHFT), /* 64M -> 1G */ + size, suffix[order], gre->type, gre->nasid, gre->sockid, gre->pnode); lgre = gre->limit; -- cgit v1.2.1 From 01684e72f16727e6ae0aeb1392f478e11ec5b8f7 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 2 Feb 2018 15:56:19 +0100 Subject: x86/error_inject: Make just_return_func() globally visible With link time optimizations enabled, I get a link failure: ./ccLbOEHX.ltrans19.ltrans.o: In function `override_function_with_return': :(.text+0x7f3): undefined reference to `just_return_func' Marking the symbol .globl makes it work as expected. Signed-off-by: Arnd Bergmann Acked-by: Masami Hiramatsu Acked-by: Thomas Gleixner Cc: Alexei Starovoitov Cc: Josef Bacik Cc: Linus Torvalds Cc: Nicolas Pitre Cc: Peter Zijlstra Fixes: 540adea3809f ("error-injection: Separate error-injection from kprobe") Link: http://lkml.kernel.org/r/20180202145634.200291-3-arnd@arndb.de Signed-off-by: Ingo Molnar --- arch/x86/lib/error-inject.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/lib/error-inject.c b/arch/x86/lib/error-inject.c index 7b881d03d0dd..3cdf06128d13 100644 --- a/arch/x86/lib/error-inject.c +++ b/arch/x86/lib/error-inject.c @@ -7,6 +7,7 @@ asmlinkage void just_return_func(void); asm( ".type just_return_func, @function\n" + ".globl just_return_func\n" "just_return_func:\n" " ret\n" ".size just_return_func, .-just_return_func\n" -- cgit v1.2.1 From 515ab7c41306aad1f80a980e1936ef635c61570c Mon Sep 17 00:00:00 2001 From: Nadav Amit Date: Wed, 31 Jan 2018 13:19:12 -0800 Subject: x86/mm: Align TLB invalidation info The TLB invalidation info is allocated on the stack, which might cause it to be unaligned. Since this information may be transferred to different cores for TLB shootdown, this may cause an additional cache line to become shared. While the overhead is likely to be small, the fix is simple. We do not use __cacheline_aligned() since it also defines the section, which is inappropriate for stack variables. Signed-off-by: Nadav Amit Acked-by: Andy Lutomirski Cc: Dave Hansen Cc: Linus Torvalds Cc: Nadav Amit Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180131211912.52064-1-namit@vmware.com Signed-off-by: Ingo Molnar --- arch/x86/mm/tlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 8dcc0607f805..6550d37d0f06 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -613,7 +613,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, { int cpu; - struct flush_tlb_info info = { + struct flush_tlb_info info __aligned(SMP_CACHE_BYTES) = { .mm = mm, }; -- cgit v1.2.1 From 1cd9c22fee3ac21db52a0997d08cf2f065d2c0c0 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 31 Jan 2018 16:54:02 +0300 Subject: x86/mm/encrypt: Move page table helpers into separate translation unit There are bunch of functions in mem_encrypt.c that operate on the identity mapping, which means they want virtual addresses to be equal to physical one, without PAGE_OFFSET shift. We also need to avoid paravirtualizaion call there. Getting this done is tricky. We cannot use usual page table helpers. It forces us to open-code a lot of things. It makes code ugly and hard to modify. We can get it work with the page table helpers, but it requires few preprocessor tricks. These tricks may have side effects for the rest of the file. Let's isolate such functions into own translation unit. Tested-by: Tom Lendacky Signed-off-by: Kirill A. Shutemov Reviewed-by: Tom Lendacky Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180131135404.40692-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mem_encrypt.h | 1 + arch/x86/mm/Makefile | 14 +- arch/x86/mm/mem_encrypt.c | 578 +---------------------------------- arch/x86/mm/mem_encrypt_identity.c | 597 +++++++++++++++++++++++++++++++++++++ 4 files changed, 608 insertions(+), 582 deletions(-) create mode 100644 arch/x86/mm/mem_encrypt_identity.c (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 22c5f3e6f820..8fe61ad21047 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -22,6 +22,7 @@ #ifdef CONFIG_AMD_MEM_ENCRYPT extern u64 sme_me_mask; +extern bool sev_enabled; void sme_encrypt_execute(unsigned long encrypted_kernel_vaddr, unsigned long decrypted_kernel_vaddr, diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 27e9e90a8d35..03c6c8561623 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -1,12 +1,15 @@ # SPDX-License-Identifier: GPL-2.0 -# Kernel does not boot with instrumentation of tlb.c and mem_encrypt.c -KCOV_INSTRUMENT_tlb.o := n -KCOV_INSTRUMENT_mem_encrypt.o := n +# Kernel does not boot with instrumentation of tlb.c and mem_encrypt*.c +KCOV_INSTRUMENT_tlb.o := n +KCOV_INSTRUMENT_mem_encrypt.o := n +KCOV_INSTRUMENT_mem_encrypt_identity.o := n -KASAN_SANITIZE_mem_encrypt.o := n +KASAN_SANITIZE_mem_encrypt.o := n +KASAN_SANITIZE_mem_encrypt_identity.o := n ifdef CONFIG_FUNCTION_TRACER -CFLAGS_REMOVE_mem_encrypt.o = -pg +CFLAGS_REMOVE_mem_encrypt.o = -pg +CFLAGS_REMOVE_mem_encrypt_identity.o = -pg endif obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ @@ -47,4 +50,5 @@ obj-$(CONFIG_RANDOMIZE_MEMORY) += kaslr.o obj-$(CONFIG_PAGE_TABLE_ISOLATION) += pti.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt.o +obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_identity.o obj-$(CONFIG_AMD_MEM_ENCRYPT) += mem_encrypt_boot.o diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 1a53071e2e17..3a1b5fe4c2ca 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -25,17 +25,12 @@ #include #include #include -#include #include #include #include #include "mm_internal.h" -static char sme_cmdline_arg[] __initdata = "mem_encrypt"; -static char sme_cmdline_on[] __initdata = "on"; -static char sme_cmdline_off[] __initdata = "off"; - /* * Since SME related variables are set early in the boot process they must * reside in the .data section so as not to be zeroed out when the .bss @@ -46,7 +41,7 @@ EXPORT_SYMBOL(sme_me_mask); DEFINE_STATIC_KEY_FALSE(sev_enable_key); EXPORT_SYMBOL_GPL(sev_enable_key); -static bool sev_enabled __section(.data); +bool sev_enabled __section(.data); /* Buffer used for early in-place encryption by BSP, no locking needed */ static char sme_early_buffer[PAGE_SIZE] __aligned(PAGE_SIZE); @@ -463,574 +458,3 @@ void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) /* Make the SWIOTLB buffer area decrypted */ set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); } - -struct sme_populate_pgd_data { - void *pgtable_area; - pgd_t *pgd; - - pmdval_t pmd_flags; - pteval_t pte_flags; - unsigned long paddr; - - unsigned long vaddr; - unsigned long vaddr_end; -}; - -static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) -{ - unsigned long pgd_start, pgd_end, pgd_size; - pgd_t *pgd_p; - - pgd_start = ppd->vaddr & PGDIR_MASK; - pgd_end = ppd->vaddr_end & PGDIR_MASK; - - pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); - - pgd_p = ppd->pgd + pgd_index(ppd->vaddr); - - memset(pgd_p, 0, pgd_size); -} - -#define PGD_FLAGS _KERNPG_TABLE_NOENC -#define P4D_FLAGS _KERNPG_TABLE_NOENC -#define PUD_FLAGS _KERNPG_TABLE_NOENC -#define PMD_FLAGS _KERNPG_TABLE_NOENC - -#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) - -#define PMD_FLAGS_DEC PMD_FLAGS_LARGE -#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ - (_PAGE_PAT | _PAGE_PWT)) - -#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) - -#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) - -#define PTE_FLAGS_DEC PTE_FLAGS -#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ - (_PAGE_PAT | _PAGE_PWT)) - -#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) - -static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) -{ - pgd_t *pgd_p; - p4d_t *p4d_p; - pud_t *pud_p; - pmd_t *pmd_p; - - pgd_p = ppd->pgd + pgd_index(ppd->vaddr); - if (native_pgd_val(*pgd_p)) { - if (IS_ENABLED(CONFIG_X86_5LEVEL)) - p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); - else - pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); - } else { - pgd_t pgd; - - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p = ppd->pgtable_area; - memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); - ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; - - pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); - } else { - pud_p = ppd->pgtable_area; - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; - - pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); - } - native_set_pgd(pgd_p, pgd); - } - - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p += p4d_index(ppd->vaddr); - if (native_p4d_val(*p4d_p)) { - pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); - } else { - p4d_t p4d; - - pud_p = ppd->pgtable_area; - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; - - p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); - native_set_p4d(p4d_p, p4d); - } - } - - pud_p += pud_index(ppd->vaddr); - if (native_pud_val(*pud_p)) { - if (native_pud_val(*pud_p) & _PAGE_PSE) - return NULL; - - pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); - } else { - pud_t pud; - - pmd_p = ppd->pgtable_area; - memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; - - pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); - native_set_pud(pud_p, pud); - } - - return pmd_p; -} - -static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) -{ - pmd_t *pmd_p; - - pmd_p = sme_prepare_pgd(ppd); - if (!pmd_p) - return; - - pmd_p += pmd_index(ppd->vaddr); - if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); -} - -static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) -{ - pmd_t *pmd_p; - pte_t *pte_p; - - pmd_p = sme_prepare_pgd(ppd); - if (!pmd_p) - return; - - pmd_p += pmd_index(ppd->vaddr); - if (native_pmd_val(*pmd_p)) { - if (native_pmd_val(*pmd_p) & _PAGE_PSE) - return; - - pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); - } else { - pmd_t pmd; - - pte_p = ppd->pgtable_area; - memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); - ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; - - pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); - native_set_pmd(pmd_p, pmd); - } - - pte_p += pte_index(ppd->vaddr); - if (!native_pte_val(*pte_p)) - native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); -} - -static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) -{ - while (ppd->vaddr < ppd->vaddr_end) { - sme_populate_pgd_large(ppd); - - ppd->vaddr += PMD_PAGE_SIZE; - ppd->paddr += PMD_PAGE_SIZE; - } -} - -static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) -{ - while (ppd->vaddr < ppd->vaddr_end) { - sme_populate_pgd(ppd); - - ppd->vaddr += PAGE_SIZE; - ppd->paddr += PAGE_SIZE; - } -} - -static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, - pmdval_t pmd_flags, pteval_t pte_flags) -{ - unsigned long vaddr_end; - - ppd->pmd_flags = pmd_flags; - ppd->pte_flags = pte_flags; - - /* Save original end value since we modify the struct value */ - vaddr_end = ppd->vaddr_end; - - /* If start is not 2MB aligned, create PTE entries */ - ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); - __sme_map_range_pte(ppd); - - /* Create PMD entries */ - ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; - __sme_map_range_pmd(ppd); - - /* If end is not 2MB aligned, create PTE entries */ - ppd->vaddr_end = vaddr_end; - __sme_map_range_pte(ppd); -} - -static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) -{ - __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); -} - -static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) -{ - __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); -} - -static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) -{ - __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); -} - -static unsigned long __init sme_pgtable_calc(unsigned long len) -{ - unsigned long p4d_size, pud_size, pmd_size, pte_size; - unsigned long total; - - /* - * Perform a relatively simplistic calculation of the pagetable - * entries that are needed. Those mappings will be covered mostly - * by 2MB PMD entries so we can conservatively calculate the required - * number of P4D, PUD and PMD structures needed to perform the - * mappings. For mappings that are not 2MB aligned, PTE mappings - * would be needed for the start and end portion of the address range - * that fall outside of the 2MB alignment. This results in, at most, - * two extra pages to hold PTE entries for each range that is mapped. - * Incrementing the count for each covers the case where the addresses - * cross entries. - */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - - total = p4d_size + pud_size + pmd_size + pte_size; - - /* - * Now calculate the added pagetable structures needed to populate - * the new pagetables. - */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - - total += p4d_size + pud_size + pmd_size; - - return total; -} - -void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) -{ - unsigned long workarea_start, workarea_end, workarea_len; - unsigned long execute_start, execute_end, execute_len; - unsigned long kernel_start, kernel_end, kernel_len; - unsigned long initrd_start, initrd_end, initrd_len; - struct sme_populate_pgd_data ppd; - unsigned long pgtable_area_len; - unsigned long decrypted_base; - - if (!sme_active()) - return; - - /* - * Prepare for encrypting the kernel and initrd by building new - * pagetables with the necessary attributes needed to encrypt the - * kernel in place. - * - * One range of virtual addresses will map the memory occupied - * by the kernel and initrd as encrypted. - * - * Another range of virtual addresses will map the memory occupied - * by the kernel and initrd as decrypted and write-protected. - * - * The use of write-protect attribute will prevent any of the - * memory from being cached. - */ - - /* Physical addresses gives us the identity mapped virtual addresses */ - kernel_start = __pa_symbol(_text); - kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); - kernel_len = kernel_end - kernel_start; - - initrd_start = 0; - initrd_end = 0; - initrd_len = 0; -#ifdef CONFIG_BLK_DEV_INITRD - initrd_len = (unsigned long)bp->hdr.ramdisk_size | - ((unsigned long)bp->ext_ramdisk_size << 32); - if (initrd_len) { - initrd_start = (unsigned long)bp->hdr.ramdisk_image | - ((unsigned long)bp->ext_ramdisk_image << 32); - initrd_end = PAGE_ALIGN(initrd_start + initrd_len); - initrd_len = initrd_end - initrd_start; - } -#endif - - /* Set the encryption workarea to be immediately after the kernel */ - workarea_start = kernel_end; - - /* - * Calculate required number of workarea bytes needed: - * executable encryption area size: - * stack page (PAGE_SIZE) - * encryption routine page (PAGE_SIZE) - * intermediate copy buffer (PMD_PAGE_SIZE) - * pagetable structures for the encryption of the kernel - * pagetable structures for workarea (in case not currently mapped) - */ - execute_start = workarea_start; - execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE; - execute_len = execute_end - execute_start; - - /* - * One PGD for both encrypted and decrypted mappings and a set of - * PUDs and PMDs for each of the encrypted and decrypted mappings. - */ - pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; - pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; - if (initrd_len) - pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; - - /* PUDs and PMDs needed in the current pagetables for the workarea */ - pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); - - /* - * The total workarea includes the executable encryption area and - * the pagetable area. The start of the workarea is already 2MB - * aligned, align the end of the workarea on a 2MB boundary so that - * we don't try to create/allocate PTE entries from the workarea - * before it is mapped. - */ - workarea_len = execute_len + pgtable_area_len; - workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); - - /* - * Set the address to the start of where newly created pagetable - * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable - * structures are created when the workarea is added to the current - * pagetables and when the new encrypted and decrypted kernel - * mappings are populated. - */ - ppd.pgtable_area = (void *)execute_end; - - /* - * Make sure the current pagetable structure has entries for - * addressing the workarea. - */ - ppd.pgd = (pgd_t *)native_read_cr3_pa(); - ppd.paddr = workarea_start; - ppd.vaddr = workarea_start; - ppd.vaddr_end = workarea_end; - sme_map_range_decrypted(&ppd); - - /* Flush the TLB - no globals so cr3 is enough */ - native_write_cr3(__native_read_cr3()); - - /* - * A new pagetable structure is being built to allow for the kernel - * and initrd to be encrypted. It starts with an empty PGD that will - * then be populated with new PUDs and PMDs as the encrypted and - * decrypted kernel mappings are created. - */ - ppd.pgd = ppd.pgtable_area; - memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); - ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; - - /* - * A different PGD index/entry must be used to get different - * pagetable entries for the decrypted mapping. Choose the next - * PGD index and convert it to a virtual address to be used as - * the base of the mapping. - */ - decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); - if (initrd_len) { - unsigned long check_base; - - check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); - decrypted_base = max(decrypted_base, check_base); - } - decrypted_base <<= PGDIR_SHIFT; - - /* Add encrypted kernel (identity) mappings */ - ppd.paddr = kernel_start; - ppd.vaddr = kernel_start; - ppd.vaddr_end = kernel_end; - sme_map_range_encrypted(&ppd); - - /* Add decrypted, write-protected kernel (non-identity) mappings */ - ppd.paddr = kernel_start; - ppd.vaddr = kernel_start + decrypted_base; - ppd.vaddr_end = kernel_end + decrypted_base; - sme_map_range_decrypted_wp(&ppd); - - if (initrd_len) { - /* Add encrypted initrd (identity) mappings */ - ppd.paddr = initrd_start; - ppd.vaddr = initrd_start; - ppd.vaddr_end = initrd_end; - sme_map_range_encrypted(&ppd); - /* - * Add decrypted, write-protected initrd (non-identity) mappings - */ - ppd.paddr = initrd_start; - ppd.vaddr = initrd_start + decrypted_base; - ppd.vaddr_end = initrd_end + decrypted_base; - sme_map_range_decrypted_wp(&ppd); - } - - /* Add decrypted workarea mappings to both kernel mappings */ - ppd.paddr = workarea_start; - ppd.vaddr = workarea_start; - ppd.vaddr_end = workarea_end; - sme_map_range_decrypted(&ppd); - - ppd.paddr = workarea_start; - ppd.vaddr = workarea_start + decrypted_base; - ppd.vaddr_end = workarea_end + decrypted_base; - sme_map_range_decrypted(&ppd); - - /* Perform the encryption */ - sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, - kernel_len, workarea_start, (unsigned long)ppd.pgd); - - if (initrd_len) - sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, - initrd_len, workarea_start, - (unsigned long)ppd.pgd); - - /* - * At this point we are running encrypted. Remove the mappings for - * the decrypted areas - all that is needed for this is to remove - * the PGD entry/entries. - */ - ppd.vaddr = kernel_start + decrypted_base; - ppd.vaddr_end = kernel_end + decrypted_base; - sme_clear_pgd(&ppd); - - if (initrd_len) { - ppd.vaddr = initrd_start + decrypted_base; - ppd.vaddr_end = initrd_end + decrypted_base; - sme_clear_pgd(&ppd); - } - - ppd.vaddr = workarea_start + decrypted_base; - ppd.vaddr_end = workarea_end + decrypted_base; - sme_clear_pgd(&ppd); - - /* Flush the TLB - no globals so cr3 is enough */ - native_write_cr3(__native_read_cr3()); -} - -void __init __nostackprotector sme_enable(struct boot_params *bp) -{ - const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; - unsigned int eax, ebx, ecx, edx; - unsigned long feature_mask; - bool active_by_default; - unsigned long me_mask; - char buffer[16]; - u64 msr; - - /* Check for the SME/SEV support leaf */ - eax = 0x80000000; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - if (eax < 0x8000001f) - return; - -#define AMD_SME_BIT BIT(0) -#define AMD_SEV_BIT BIT(1) - /* - * Set the feature mask (SME or SEV) based on whether we are - * running under a hypervisor. - */ - eax = 1; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; - - /* - * Check for the SME/SEV feature: - * CPUID Fn8000_001F[EAX] - * - Bit 0 - Secure Memory Encryption support - * - Bit 1 - Secure Encrypted Virtualization support - * CPUID Fn8000_001F[EBX] - * - Bits 5:0 - Pagetable bit position used to indicate encryption - */ - eax = 0x8000001f; - ecx = 0; - native_cpuid(&eax, &ebx, &ecx, &edx); - if (!(eax & feature_mask)) - return; - - me_mask = 1UL << (ebx & 0x3f); - - /* Check if memory encryption is enabled */ - if (feature_mask == AMD_SME_BIT) { - /* For SME, check the SYSCFG MSR */ - msr = __rdmsr(MSR_K8_SYSCFG); - if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) - return; - } else { - /* For SEV, check the SEV MSR */ - msr = __rdmsr(MSR_AMD64_SEV); - if (!(msr & MSR_AMD64_SEV_ENABLED)) - return; - - /* SEV state cannot be controlled by a command line option */ - sme_me_mask = me_mask; - sev_enabled = true; - return; - } - - /* - * Fixups have not been applied to phys_base yet and we're running - * identity mapped, so we must obtain the address to the SME command - * line argument data using rip-relative addressing. - */ - asm ("lea sme_cmdline_arg(%%rip), %0" - : "=r" (cmdline_arg) - : "p" (sme_cmdline_arg)); - asm ("lea sme_cmdline_on(%%rip), %0" - : "=r" (cmdline_on) - : "p" (sme_cmdline_on)); - asm ("lea sme_cmdline_off(%%rip), %0" - : "=r" (cmdline_off) - : "p" (sme_cmdline_off)); - - if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) - active_by_default = true; - else - active_by_default = false; - - cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | - ((u64)bp->ext_cmd_line_ptr << 32)); - - cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); - - if (!strncmp(buffer, cmdline_on, sizeof(buffer))) - sme_me_mask = me_mask; - else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) - sme_me_mask = 0; - else - sme_me_mask = active_by_default ? me_mask : 0; -} diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c new file mode 100644 index 000000000000..a28978a37bfa --- /dev/null +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -0,0 +1,597 @@ +/* + * AMD Memory Encryption Support + * + * Copyright (C) 2016 Advanced Micro Devices, Inc. + * + * Author: Tom Lendacky + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define DISABLE_BRANCH_PROFILING + +#include +#include + +#include +#include +#include + +#include "mm_internal.h" + +#define PGD_FLAGS _KERNPG_TABLE_NOENC +#define P4D_FLAGS _KERNPG_TABLE_NOENC +#define PUD_FLAGS _KERNPG_TABLE_NOENC +#define PMD_FLAGS _KERNPG_TABLE_NOENC + +#define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL) + +#define PMD_FLAGS_DEC PMD_FLAGS_LARGE +#define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC) + +#define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL) + +#define PTE_FLAGS_DEC PTE_FLAGS +#define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \ + (_PAGE_PAT | _PAGE_PWT)) + +#define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC) + +struct sme_populate_pgd_data { + void *pgtable_area; + pgd_t *pgd; + + pmdval_t pmd_flags; + pteval_t pte_flags; + unsigned long paddr; + + unsigned long vaddr; + unsigned long vaddr_end; +}; + +static char sme_cmdline_arg[] __initdata = "mem_encrypt"; +static char sme_cmdline_on[] __initdata = "on"; +static char sme_cmdline_off[] __initdata = "off"; + +static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) +{ + unsigned long pgd_start, pgd_end, pgd_size; + pgd_t *pgd_p; + + pgd_start = ppd->vaddr & PGDIR_MASK; + pgd_end = ppd->vaddr_end & PGDIR_MASK; + + pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t); + + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); + + memset(pgd_p, 0, pgd_size); +} + +static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) +{ + pgd_t *pgd_p; + p4d_t *p4d_p; + pud_t *pud_p; + pmd_t *pmd_p; + + pgd_p = ppd->pgd + pgd_index(ppd->vaddr); + if (native_pgd_val(*pgd_p)) { + if (IS_ENABLED(CONFIG_X86_5LEVEL)) + p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); + else + pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); + } else { + pgd_t pgd; + + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + p4d_p = ppd->pgtable_area; + memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); + ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; + + pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); + } else { + pud_p = ppd->pgtable_area; + memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + + pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); + } + native_set_pgd(pgd_p, pgd); + } + + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + p4d_p += p4d_index(ppd->vaddr); + if (native_p4d_val(*p4d_p)) { + pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); + } else { + p4d_t p4d; + + pud_p = ppd->pgtable_area; + memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); + ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; + + p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); + native_set_p4d(p4d_p, p4d); + } + } + + pud_p += pud_index(ppd->vaddr); + if (native_pud_val(*pud_p)) { + if (native_pud_val(*pud_p) & _PAGE_PSE) + return NULL; + + pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); + } else { + pud_t pud; + + pmd_p = ppd->pgtable_area; + memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); + ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; + + pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); + native_set_pud(pud_p, pud); + } + + return pmd_p; +} + +static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); + if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) + native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); +} + +static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) +{ + pmd_t *pmd_p; + pte_t *pte_p; + + pmd_p = sme_prepare_pgd(ppd); + if (!pmd_p) + return; + + pmd_p += pmd_index(ppd->vaddr); + if (native_pmd_val(*pmd_p)) { + if (native_pmd_val(*pmd_p) & _PAGE_PSE) + return; + + pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); + } else { + pmd_t pmd; + + pte_p = ppd->pgtable_area; + memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; + + pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); + native_set_pmd(pmd_p, pmd); + } + + pte_p += pte_index(ppd->vaddr); + if (!native_pte_val(*pte_p)) + native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); +} + +static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd_large(ppd); + + ppd->vaddr += PMD_PAGE_SIZE; + ppd->paddr += PMD_PAGE_SIZE; + } +} + +static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd) +{ + while (ppd->vaddr < ppd->vaddr_end) { + sme_populate_pgd(ppd); + + ppd->vaddr += PAGE_SIZE; + ppd->paddr += PAGE_SIZE; + } +} + +static void __init __sme_map_range(struct sme_populate_pgd_data *ppd, + pmdval_t pmd_flags, pteval_t pte_flags) +{ + unsigned long vaddr_end; + + ppd->pmd_flags = pmd_flags; + ppd->pte_flags = pte_flags; + + /* Save original end value since we modify the struct value */ + vaddr_end = ppd->vaddr_end; + + /* If start is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE); + __sme_map_range_pte(ppd); + + /* Create PMD entries */ + ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK; + __sme_map_range_pmd(ppd); + + /* If end is not 2MB aligned, create PTE entries */ + ppd->vaddr_end = vaddr_end; + __sme_map_range_pte(ppd); +} + +static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC); +} + +static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC); +} + +static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) +{ + __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP); +} + +static unsigned long __init sme_pgtable_calc(unsigned long len) +{ + unsigned long p4d_size, pud_size, pmd_size, pte_size; + unsigned long total; + + /* + * Perform a relatively simplistic calculation of the pagetable + * entries that are needed. Those mappings will be covered mostly + * by 2MB PMD entries so we can conservatively calculate the required + * number of P4D, PUD and PMD structures needed to perform the + * mappings. For mappings that are not 2MB aligned, PTE mappings + * would be needed for the start and end portion of the address range + * that fall outside of the 2MB alignment. This results in, at most, + * two extra pages to hold PTE entries for each range that is mapped. + * Incrementing the count for each covers the case where the addresses + * cross entries. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; + p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; + pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1; + pud_size *= sizeof(pud_t) * PTRS_PER_PUD; + } else { + p4d_size = 0; + pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; + pud_size *= sizeof(pud_t) * PTRS_PER_PUD; + } + pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; + pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; + pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; + + total = p4d_size + pud_size + pmd_size + pte_size; + + /* + * Now calculate the added pagetable structures needed to populate + * the new pagetables. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; + p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; + pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE; + pud_size *= sizeof(pud_t) * PTRS_PER_PUD; + } else { + p4d_size = 0; + pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; + pud_size *= sizeof(pud_t) * PTRS_PER_PUD; + } + pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE; + pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; + + total += p4d_size + pud_size + pmd_size; + + return total; +} + +void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) +{ + unsigned long workarea_start, workarea_end, workarea_len; + unsigned long execute_start, execute_end, execute_len; + unsigned long kernel_start, kernel_end, kernel_len; + unsigned long initrd_start, initrd_end, initrd_len; + struct sme_populate_pgd_data ppd; + unsigned long pgtable_area_len; + unsigned long decrypted_base; + + if (!sme_active()) + return; + + /* + * Prepare for encrypting the kernel and initrd by building new + * pagetables with the necessary attributes needed to encrypt the + * kernel in place. + * + * One range of virtual addresses will map the memory occupied + * by the kernel and initrd as encrypted. + * + * Another range of virtual addresses will map the memory occupied + * by the kernel and initrd as decrypted and write-protected. + * + * The use of write-protect attribute will prevent any of the + * memory from being cached. + */ + + /* Physical addresses gives us the identity mapped virtual addresses */ + kernel_start = __pa_symbol(_text); + kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE); + kernel_len = kernel_end - kernel_start; + + initrd_start = 0; + initrd_end = 0; + initrd_len = 0; +#ifdef CONFIG_BLK_DEV_INITRD + initrd_len = (unsigned long)bp->hdr.ramdisk_size | + ((unsigned long)bp->ext_ramdisk_size << 32); + if (initrd_len) { + initrd_start = (unsigned long)bp->hdr.ramdisk_image | + ((unsigned long)bp->ext_ramdisk_image << 32); + initrd_end = PAGE_ALIGN(initrd_start + initrd_len); + initrd_len = initrd_end - initrd_start; + } +#endif + + /* Set the encryption workarea to be immediately after the kernel */ + workarea_start = kernel_end; + + /* + * Calculate required number of workarea bytes needed: + * executable encryption area size: + * stack page (PAGE_SIZE) + * encryption routine page (PAGE_SIZE) + * intermediate copy buffer (PMD_PAGE_SIZE) + * pagetable structures for the encryption of the kernel + * pagetable structures for workarea (in case not currently mapped) + */ + execute_start = workarea_start; + execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE; + execute_len = execute_end - execute_start; + + /* + * One PGD for both encrypted and decrypted mappings and a set of + * PUDs and PMDs for each of the encrypted and decrypted mappings. + */ + pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD; + pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2; + if (initrd_len) + pgtable_area_len += sme_pgtable_calc(initrd_len) * 2; + + /* PUDs and PMDs needed in the current pagetables for the workarea */ + pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len); + + /* + * The total workarea includes the executable encryption area and + * the pagetable area. The start of the workarea is already 2MB + * aligned, align the end of the workarea on a 2MB boundary so that + * we don't try to create/allocate PTE entries from the workarea + * before it is mapped. + */ + workarea_len = execute_len + pgtable_area_len; + workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE); + + /* + * Set the address to the start of where newly created pagetable + * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable + * structures are created when the workarea is added to the current + * pagetables and when the new encrypted and decrypted kernel + * mappings are populated. + */ + ppd.pgtable_area = (void *)execute_end; + + /* + * Make sure the current pagetable structure has entries for + * addressing the workarea. + */ + ppd.pgd = (pgd_t *)native_read_cr3_pa(); + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); + + /* Flush the TLB - no globals so cr3 is enough */ + native_write_cr3(__native_read_cr3()); + + /* + * A new pagetable structure is being built to allow for the kernel + * and initrd to be encrypted. It starts with an empty PGD that will + * then be populated with new PUDs and PMDs as the encrypted and + * decrypted kernel mappings are created. + */ + ppd.pgd = ppd.pgtable_area; + memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD); + ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD; + + /* + * A different PGD index/entry must be used to get different + * pagetable entries for the decrypted mapping. Choose the next + * PGD index and convert it to a virtual address to be used as + * the base of the mapping. + */ + decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1); + if (initrd_len) { + unsigned long check_base; + + check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1); + decrypted_base = max(decrypted_base, check_base); + } + decrypted_base <<= PGDIR_SHIFT; + + /* Add encrypted kernel (identity) mappings */ + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start; + ppd.vaddr_end = kernel_end; + sme_map_range_encrypted(&ppd); + + /* Add decrypted, write-protected kernel (non-identity) mappings */ + ppd.paddr = kernel_start; + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); + + if (initrd_len) { + /* Add encrypted initrd (identity) mappings */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start; + ppd.vaddr_end = initrd_end; + sme_map_range_encrypted(&ppd); + /* + * Add decrypted, write-protected initrd (non-identity) mappings + */ + ppd.paddr = initrd_start; + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_map_range_decrypted_wp(&ppd); + } + + /* Add decrypted workarea mappings to both kernel mappings */ + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start; + ppd.vaddr_end = workarea_end; + sme_map_range_decrypted(&ppd); + + ppd.paddr = workarea_start; + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_map_range_decrypted(&ppd); + + /* Perform the encryption */ + sme_encrypt_execute(kernel_start, kernel_start + decrypted_base, + kernel_len, workarea_start, (unsigned long)ppd.pgd); + + if (initrd_len) + sme_encrypt_execute(initrd_start, initrd_start + decrypted_base, + initrd_len, workarea_start, + (unsigned long)ppd.pgd); + + /* + * At this point we are running encrypted. Remove the mappings for + * the decrypted areas - all that is needed for this is to remove + * the PGD entry/entries. + */ + ppd.vaddr = kernel_start + decrypted_base; + ppd.vaddr_end = kernel_end + decrypted_base; + sme_clear_pgd(&ppd); + + if (initrd_len) { + ppd.vaddr = initrd_start + decrypted_base; + ppd.vaddr_end = initrd_end + decrypted_base; + sme_clear_pgd(&ppd); + } + + ppd.vaddr = workarea_start + decrypted_base; + ppd.vaddr_end = workarea_end + decrypted_base; + sme_clear_pgd(&ppd); + + /* Flush the TLB - no globals so cr3 is enough */ + native_write_cr3(__native_read_cr3()); +} + +void __init __nostackprotector sme_enable(struct boot_params *bp) +{ + const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; + unsigned int eax, ebx, ecx, edx; + unsigned long feature_mask; + bool active_by_default; + unsigned long me_mask; + char buffer[16]; + u64 msr; + + /* Check for the SME/SEV support leaf */ + eax = 0x80000000; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (eax < 0x8000001f) + return; + +#define AMD_SME_BIT BIT(0) +#define AMD_SEV_BIT BIT(1) + /* + * Set the feature mask (SME or SEV) based on whether we are + * running under a hypervisor. + */ + eax = 1; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT; + + /* + * Check for the SME/SEV feature: + * CPUID Fn8000_001F[EAX] + * - Bit 0 - Secure Memory Encryption support + * - Bit 1 - Secure Encrypted Virtualization support + * CPUID Fn8000_001F[EBX] + * - Bits 5:0 - Pagetable bit position used to indicate encryption + */ + eax = 0x8000001f; + ecx = 0; + native_cpuid(&eax, &ebx, &ecx, &edx); + if (!(eax & feature_mask)) + return; + + me_mask = 1UL << (ebx & 0x3f); + + /* Check if memory encryption is enabled */ + if (feature_mask == AMD_SME_BIT) { + /* For SME, check the SYSCFG MSR */ + msr = __rdmsr(MSR_K8_SYSCFG); + if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT)) + return; + } else { + /* For SEV, check the SEV MSR */ + msr = __rdmsr(MSR_AMD64_SEV); + if (!(msr & MSR_AMD64_SEV_ENABLED)) + return; + + /* SEV state cannot be controlled by a command line option */ + sme_me_mask = me_mask; + sev_enabled = true; + return; + } + + /* + * Fixups have not been applied to phys_base yet and we're running + * identity mapped, so we must obtain the address to the SME command + * line argument data using rip-relative addressing. + */ + asm ("lea sme_cmdline_arg(%%rip), %0" + : "=r" (cmdline_arg) + : "p" (sme_cmdline_arg)); + asm ("lea sme_cmdline_on(%%rip), %0" + : "=r" (cmdline_on) + : "p" (sme_cmdline_on)); + asm ("lea sme_cmdline_off(%%rip), %0" + : "=r" (cmdline_off) + : "p" (sme_cmdline_off)); + + if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT)) + active_by_default = true; + else + active_by_default = false; + + cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr | + ((u64)bp->ext_cmd_line_ptr << 32)); + + cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)); + + if (!strncmp(buffer, cmdline_on, sizeof(buffer))) + sme_me_mask = me_mask; + else if (!strncmp(buffer, cmdline_off, sizeof(buffer))) + sme_me_mask = 0; + else + sme_me_mask = active_by_default ? me_mask : 0; +} -- cgit v1.2.1 From aad983913d77af2c3394f29b88d7bb75ebd7d172 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 31 Jan 2018 16:54:03 +0300 Subject: x86/mm/encrypt: Simplify sme_populate_pgd() and sme_populate_pgd_large() sme_populate_pgd() and sme_populate_pgd_large() operate on the identity mapping, which means they want virtual addresses to be equal to physical one, without PAGE_OFFSET shift. We also need to avoid paravirtualization call there. Getting this done is tricky. We cannot use usual page table helpers. It forces us to open-code a lot of things. It makes code ugly and hard to modify. We can get it work with the page table helpers, but it requires few preprocessor tricks. - Define __pa() and __va() to be compatible with identity mapping. - Undef CONFIG_PARAVIRT and CONFIG_PARAVIRT_SPINLOCKS before including any file. This way we can avoid paravirtualization calls. Now we can user normal page table helpers just fine. Tested-by: Tom Lendacky Signed-off-by: Kirill A. Shutemov Reviewed-by: Tom Lendacky Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180131135404.40692-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/mem_encrypt_identity.c | 159 +++++++++++++++++-------------------- 1 file changed, 72 insertions(+), 87 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index a28978a37bfa..4b6a2e3098c5 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -12,6 +12,24 @@ #define DISABLE_BRANCH_PROFILING +/* + * Since we're dealing with identity mappings, physical and virtual + * addresses are the same, so override these defines which are ultimately + * used by the headers in misc.h. + */ +#define __pa(x) ((unsigned long)(x)) +#define __va(x) ((void *)((unsigned long)(x))) + +/* + * Special hack: we have to be careful, because no indirections are + * allowed here, and paravirt_ops is a kind of one. As it will only run in + * baremetal anyway, we just keep it from happening. (This list needs to + * be extended when new paravirt and debugging variants are added.) + */ +#undef CONFIG_PARAVIRT +#undef CONFIG_PARAVIRT_SPINLOCKS + +#include #include #include @@ -73,116 +91,83 @@ static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd) memset(pgd_p, 0, pgd_size); } -static pmd_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) +static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd) { - pgd_t *pgd_p; - p4d_t *p4d_p; - pud_t *pud_p; - pmd_t *pmd_p; - - pgd_p = ppd->pgd + pgd_index(ppd->vaddr); - if (native_pgd_val(*pgd_p)) { - if (IS_ENABLED(CONFIG_X86_5LEVEL)) - p4d_p = (p4d_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); - else - pud_p = (pud_t *)(native_pgd_val(*pgd_p) & ~PTE_FLAGS_MASK); - } else { - pgd_t pgd; - - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p = ppd->pgtable_area; - memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D); - ppd->pgtable_area += sizeof(*p4d_p) * PTRS_PER_P4D; - - pgd = native_make_pgd((pgdval_t)p4d_p + PGD_FLAGS); - } else { - pud_p = ppd->pgtable_area; - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; - - pgd = native_make_pgd((pgdval_t)pud_p + PGD_FLAGS); - } - native_set_pgd(pgd_p, pgd); + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + pgd = ppd->pgd + pgd_index(ppd->vaddr); + if (pgd_none(*pgd)) { + p4d = ppd->pgtable_area; + memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D); + ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D; + set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d))); } - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_p += p4d_index(ppd->vaddr); - if (native_p4d_val(*p4d_p)) { - pud_p = (pud_t *)(native_p4d_val(*p4d_p) & ~PTE_FLAGS_MASK); - } else { - p4d_t p4d; - - pud_p = ppd->pgtable_area; - memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD); - ppd->pgtable_area += sizeof(*pud_p) * PTRS_PER_PUD; - - p4d = native_make_p4d((pudval_t)pud_p + P4D_FLAGS); - native_set_p4d(p4d_p, p4d); - } + p4d = p4d_offset(pgd, ppd->vaddr); + if (p4d_none(*p4d)) { + pud = ppd->pgtable_area; + memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD); + ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD; + set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud))); } - pud_p += pud_index(ppd->vaddr); - if (native_pud_val(*pud_p)) { - if (native_pud_val(*pud_p) & _PAGE_PSE) - return NULL; - - pmd_p = (pmd_t *)(native_pud_val(*pud_p) & ~PTE_FLAGS_MASK); - } else { - pud_t pud; - - pmd_p = ppd->pgtable_area; - memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD); - ppd->pgtable_area += sizeof(*pmd_p) * PTRS_PER_PMD; - - pud = native_make_pud((pmdval_t)pmd_p + PUD_FLAGS); - native_set_pud(pud_p, pud); + pud = pud_offset(p4d, ppd->vaddr); + if (pud_none(*pud)) { + pmd = ppd->pgtable_area; + memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD); + ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD; + set_pud(pud, __pud(PUD_FLAGS | __pa(pmd))); } - return pmd_p; + if (pud_large(*pud)) + return NULL; + + return pud; } static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd) { - pmd_t *pmd_p; + pud_t *pud; + pmd_t *pmd; - pmd_p = sme_prepare_pgd(ppd); - if (!pmd_p) + pud = sme_prepare_pgd(ppd); + if (!pud) return; - pmd_p += pmd_index(ppd->vaddr); - if (!native_pmd_val(*pmd_p) || !(native_pmd_val(*pmd_p) & _PAGE_PSE)) - native_set_pmd(pmd_p, native_make_pmd(ppd->paddr | ppd->pmd_flags)); + pmd = pmd_offset(pud, ppd->vaddr); + if (pmd_large(*pmd)) + return; + + set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags)); } static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd) { - pmd_t *pmd_p; - pte_t *pte_p; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; - pmd_p = sme_prepare_pgd(ppd); - if (!pmd_p) + pud = sme_prepare_pgd(ppd); + if (!pud) return; - pmd_p += pmd_index(ppd->vaddr); - if (native_pmd_val(*pmd_p)) { - if (native_pmd_val(*pmd_p) & _PAGE_PSE) - return; - - pte_p = (pte_t *)(native_pmd_val(*pmd_p) & ~PTE_FLAGS_MASK); - } else { - pmd_t pmd; - - pte_p = ppd->pgtable_area; - memset(pte_p, 0, sizeof(*pte_p) * PTRS_PER_PTE); - ppd->pgtable_area += sizeof(*pte_p) * PTRS_PER_PTE; - - pmd = native_make_pmd((pteval_t)pte_p + PMD_FLAGS); - native_set_pmd(pmd_p, pmd); + pmd = pmd_offset(pud, ppd->vaddr); + if (pmd_none(*pmd)) { + pte = ppd->pgtable_area; + memset(pte, 0, sizeof(pte) * PTRS_PER_PTE); + ppd->pgtable_area += sizeof(pte) * PTRS_PER_PTE; + set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte))); } - pte_p += pte_index(ppd->vaddr); - if (!native_pte_val(*pte_p)) - native_set_pte(pte_p, native_make_pte(ppd->paddr | ppd->pte_flags)); + if (pmd_large(*pmd)) + return; + + pte = pte_offset_map(pmd, ppd->vaddr); + if (pte_none(*pte)) + set_pte(pte, __pte(ppd->paddr | ppd->pte_flags)); } static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd) -- cgit v1.2.1 From 1070730c1ad2bd49b0d11112728f1f4390137728 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 31 Jan 2018 16:54:04 +0300 Subject: x86/mm/encrypt: Simplify sme_pgtable_calc() sme_pgtable_calc() is unnecessary complex. It can be re-written in a more stream-lined way. As a side effect, we would get the code ready to boot-time switching between paging modes. Tested-by: Tom Lendacky Signed-off-by: Kirill A. Shutemov Reviewed-by: Tom Lendacky Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180131135404.40692-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/mm/mem_encrypt_identity.c | 42 +++++++++++--------------------------- 1 file changed, 12 insertions(+), 30 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index 4b6a2e3098c5..b4139c5ab972 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -231,8 +231,7 @@ static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd) static unsigned long __init sme_pgtable_calc(unsigned long len) { - unsigned long p4d_size, pud_size, pmd_size, pte_size; - unsigned long total; + unsigned long entries = 0, tables = 0; /* * Perform a relatively simplistic calculation of the pagetable @@ -246,42 +245,25 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) * Incrementing the count for each covers the case where the addresses * cross entries. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = (ALIGN(len, P4D_SIZE) / P4D_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = (ALIGN(len, PGDIR_SIZE) / PGDIR_SIZE) + 1; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = (ALIGN(len, PUD_SIZE) / PUD_SIZE) + 1; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - pte_size = 2 * sizeof(pte_t) * PTRS_PER_PTE; - total = p4d_size + pud_size + pmd_size + pte_size; + /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */ + if (PTRS_PER_P4D > 1) + entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D; + entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD; + entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD; + entries += 2 * sizeof(pte_t) * PTRS_PER_PTE; /* * Now calculate the added pagetable structures needed to populate * the new pagetables. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { - p4d_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - p4d_size *= sizeof(p4d_t) * PTRS_PER_P4D; - pud_size = ALIGN(total, P4D_SIZE) / P4D_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } else { - p4d_size = 0; - pud_size = ALIGN(total, PGDIR_SIZE) / PGDIR_SIZE; - pud_size *= sizeof(pud_t) * PTRS_PER_PUD; - } - pmd_size = ALIGN(total, PUD_SIZE) / PUD_SIZE; - pmd_size *= sizeof(pmd_t) * PTRS_PER_PMD; - total += p4d_size + pud_size + pmd_size; + if (PTRS_PER_P4D > 1) + tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D; + tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD; + tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD; - return total; + return entries + tables; } void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) -- cgit v1.2.1 From 116fef6408599dd6ff6996235c50aa692e9b5631 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Wed, 31 Jan 2018 07:56:22 -0800 Subject: x86/mm/dump_pagetables: Add the EFI pagetable to the debugfs 'page_tables' directory EFI is complicated enough that being able to view its pagetables is quite helpful. Rather than requiring users to fish it out of dmesg on an appropriately configured kernel, let users view it in debugfs as well. Signed-off-by: Andy Lutomirski Acked-by: Ard Biesheuvel Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/ba158a93f3250e6fca752cff2cfb1fcdd9f2b50c.1517414050.git.luto@kernel.org [ Fixed trivial whitespace damage and fixed missing export. ] Signed-off-by: Ingo Molnar --- arch/x86/mm/debug_pagetables.c | 32 ++++++++++++++++++++++++++++++++ arch/x86/platform/efi/efi_64.c | 4 +++- 2 files changed, 35 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index 421f2664ffa0..51a6f92da2bf 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -72,6 +72,31 @@ static const struct file_operations ptdump_curusr_fops = { }; #endif +#if defined(CONFIG_EFI) && defined(CONFIG_X86_64) +extern pgd_t *efi_pgd; +static struct dentry *pe_efi; + +static int ptdump_show_efi(struct seq_file *m, void *v) +{ + if (efi_pgd) + ptdump_walk_pgd_level_debugfs(m, efi_pgd, false); + return 0; +} + +static int ptdump_open_efi(struct inode *inode, struct file *filp) +{ + return single_open(filp, ptdump_show_efi, NULL); +} + +static const struct file_operations ptdump_efi_fops = { + .owner = THIS_MODULE, + .open = ptdump_open_efi, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; +#endif + static struct dentry *dir, *pe_knl, *pe_curknl; static int __init pt_dump_debug_init(void) @@ -96,6 +121,13 @@ static int __init pt_dump_debug_init(void) if (!pe_curusr) goto err; #endif + +#if defined(CONFIG_EFI) && defined(CONFIG_X86_64) + pe_efi = debugfs_create_file("efi", 0400, dir, NULL, &ptdump_efi_fops); + if (!pe_efi) + goto err; +#endif + return 0; err: debugfs_remove_recursive(dir); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index c310a8284358..780460aa5ea5 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -27,6 +27,7 @@ #include #include #include +#include #include #include #include @@ -190,7 +191,8 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } -static pgd_t *efi_pgd; +pgd_t *efi_pgd; +EXPORT_SYMBOL_GPL(efi_pgd); /* * We need our own copy of the higher levels of the page tables -- cgit v1.2.1 From fd0e786d9d09024f67bd71ec094b110237dc3840 Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Thu, 25 Jan 2018 14:23:48 -0800 Subject: x86/mm, mm/hwpoison: Don't unconditionally unmap kernel 1:1 pages In the following commit: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") ... we added code to memory_failure() to unmap the page from the kernel 1:1 virtual address space to avoid speculative access to the page logging additional errors. But memory_failure() may not always succeed in taking the page offline, especially if the page belongs to the kernel. This can happen if there are too many corrected errors on a page and either mcelog(8) or drivers/ras/cec.c asks to take a page offline. Since we remove the 1:1 mapping early in memory_failure(), we can end up with the page unmapped, but still in use. On the next access the kernel crashes :-( There are also various debug paths that call memory_failure() to simulate occurrence of an error. Since there is no actual error in memory, we don't need to map out the page for those cases. Revert most of the previous attempt and keep the solution local to arch/x86/kernel/cpu/mcheck/mce.c. Unmap the page only when: 1) there is a real error 2) memory_failure() succeeds. All of this only applies to 64-bit systems. 32-bit kernel doesn't map all of memory into kernel space. It isn't worth adding the code to unmap the piece that is mapped because nobody would run a 32-bit kernel on a machine that has recoverable machine checks. Signed-off-by: Tony Luck Cc: Andrew Morton Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Dave Cc: Denys Vlasenko Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Naoya Horiguchi Cc: Peter Zijlstra Cc: Robert (Persistent Memory) Cc: Thomas Gleixner Cc: linux-mm@kvack.org Cc: stable@vger.kernel.org #v4.14 Fixes: ce0fa3e56ad2 ("x86/mm, mm/hwpoison: Clear PRESENT bit for kernel 1:1 mappings of poison pages") Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64.h | 4 ---- arch/x86/kernel/cpu/mcheck/mce-internal.h | 15 +++++++++++++++ arch/x86/kernel/cpu/mcheck/mce.c | 17 +++++++++++------ 3 files changed, 26 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..d652a3808065 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -52,10 +52,6 @@ static inline void clear_page(void *page) void copy_page(void *to, void *from); -#ifdef CONFIG_X86_MCE -#define arch_unmap_kpfn arch_unmap_kpfn -#endif - #endif /* !__ASSEMBLY__ */ #ifdef CONFIG_X86_VSYSCALL_EMULATION diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index aa0d5df9dc60..e956eb267061 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -115,4 +115,19 @@ static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } extern struct mca_config mca_cfg; +#ifndef CONFIG_X86_64 +/* + * On 32-bit systems it would be difficult to safely unmap a poison page + * from the kernel 1:1 map because there are no non-canonical addresses that + * we can use to refer to the address without risking a speculative access. + * However, this isn't much of an issue because: + * 1) Few unmappable pages are in the 1:1 map. Most are in HIGHMEM which + * are only mapped into the kernel as needed + * 2) Few people would run a 32-bit kernel on a machine that supports + * recoverable errors because they have too much memory to boot 32-bit. + */ +static inline void mce_unmap_kpfn(unsigned long pfn) {} +#define mce_unmap_kpfn mce_unmap_kpfn +#endif + #endif /* __X86_MCE_INTERNAL_H__ */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 75f405ac085c..8ff94d1e2dce 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -105,6 +105,10 @@ static struct irq_work mce_irq_work; static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn); +#endif + /* * CPU/chipset specific EDAC code can register a notifier call here to print * MCE errors in a human-readable form. @@ -590,7 +594,8 @@ static int srao_decode_notifier(struct notifier_block *nb, unsigned long val, if (mce_usable_address(mce) && (mce->severity == MCE_AO_SEVERITY)) { pfn = mce->addr >> PAGE_SHIFT; - memory_failure(pfn, 0); + if (!memory_failure(pfn, 0)) + mce_unmap_kpfn(pfn); } return NOTIFY_OK; @@ -1057,12 +1062,13 @@ static int do_memory_failure(struct mce *m) ret = memory_failure(m->addr >> PAGE_SHIFT, flags); if (ret) pr_err("Memory error not recovered"); + else + mce_unmap_kpfn(m->addr >> PAGE_SHIFT); return ret; } -#if defined(arch_unmap_kpfn) && defined(CONFIG_MEMORY_FAILURE) - -void arch_unmap_kpfn(unsigned long pfn) +#ifndef mce_unmap_kpfn +static void mce_unmap_kpfn(unsigned long pfn) { unsigned long decoy_addr; @@ -1073,7 +1079,7 @@ void arch_unmap_kpfn(unsigned long pfn) * We would like to just call: * set_memory_np((unsigned long)pfn_to_kaddr(pfn), 1); * but doing that would radically increase the odds of a - * speculative access to the posion page because we'd have + * speculative access to the poison page because we'd have * the virtual address of the kernel 1:1 mapping sitting * around in registers. * Instead we get tricky. We create a non-canonical address @@ -1098,7 +1104,6 @@ void arch_unmap_kpfn(unsigned long pfn) if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); - } #endif -- cgit v1.2.1 From afed7d172091040f38c75a2927fed170ea11585f Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 17 Jan 2018 11:45:43 +0800 Subject: x86/x2apic: Mark set_x2apic_phys_mode() as __init set_x2apic_phys_mode() is only called as part of early_param() initialization - so mark it as __init. Signed-off-by: Dou Liyang Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180117034543.26723-1-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_phys.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c index f8d9d69994e6..e2829bf40e4a 100644 --- a/arch/x86/kernel/apic/x2apic_phys.c +++ b/arch/x86/kernel/apic/x2apic_phys.c @@ -14,7 +14,7 @@ int x2apic_phys; static struct apic apic_x2apic_phys; -static int set_x2apic_phys_mode(char *arg) +static int __init set_x2apic_phys_mode(char *arg) { x2apic_phys = 1; return 0; -- cgit v1.2.1 From ccf5355d05cd891522267f04b2723002e7f061de Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 17 Jan 2018 15:37:48 +0800 Subject: x86/apic: Simplify init_bsp_APIC() usage Since CONFIG_X86_64 selects CONFIG_X86_LOCAL_APIC, the following condition: #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) is equivalent to: #if defined(CONFIG_X86_LOCAL_APIC) ... and we can eliminate that #ifdef by providing an empty init_bsp_APIC() stub in the !CONFIG_X86_LOCAL_APIC case. Also add some comments to explain why we call init_bsp_APIC(). Signed-off-by: Dou Liyang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: mroos@linux.ee Cc: ville.syrjala@linux.intel.com Link: http://lkml.kernel.org/r/20180117073748.23905-1-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 1 + arch/x86/kernel/irqinit.c | 9 +++++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 98722773391d..6e1990d69865 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -183,6 +183,7 @@ static inline void disable_local_APIC(void) { } # define setup_boot_APIC_clock x86_init_noop # define setup_secondary_APIC_clock x86_init_noop static inline void lapic_update_tsc_freq(void) { } +static inline void init_bsp_APIC(void) { } static inline void apic_intr_mode_init(void) { } static inline void lapic_assign_system_vectors(void) { } static inline void lapic_assign_legacy_vector(unsigned int i, bool r) { } diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index a539410c4ea9..772196c1b8c4 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -61,9 +61,14 @@ void __init init_ISA_irqs(void) struct irq_chip *chip = legacy_pic->chip; int i; -#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) + /* + * Try to set up the through-local-APIC virtual wire mode earlier. + * + * On some 32-bit UP machines, whose APIC has been disabled by BIOS + * and then got re-enabled by "lapic", it hangs at boot time without this. + */ init_bsp_APIC(); -#endif + legacy_pic->init(0); for (i = 0; i < nr_legacy_irqs(); i++) -- cgit v1.2.1 From 1acdbf7ea8a80706561013346d281d282a3c00f9 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 13 Jan 2018 21:50:48 +0300 Subject: x86/asm: Clobber flags in clear_page() All clear_page() implementations use XOR which resets flags. Judging by allyesconfig disassembly no code is affected. Signed-off-by: Alexey Dobriyan Reviewed-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180113185048.GA23111@avx2 Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..f8a85c65f150 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -47,7 +47,7 @@ static inline void clear_page(void *page) clear_page_erms, X86_FEATURE_ERMS, "=D" (page), "0" (page) - : "memory", "rax", "rcx"); + : "cc", "memory", "rax", "rcx"); } void copy_page(void *to, void *from); -- cgit v1.2.1 From 16c5055a5ff5e1ff13c5211e44514a2b1d2c1dae Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Sat, 13 Jan 2018 22:06:48 +0300 Subject: x86/asm: Trim clear_page.S includes After alternatives were shifted to the call site, only 2 headers are necessary. Signed-off-by: Alexey Dobriyan Reviewed-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180113190648.GB23111@avx2 Signed-off-by: Ingo Molnar --- arch/x86/lib/clear_page_64.S | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S index 81b1635d67de..88acd349911b 100644 --- a/arch/x86/lib/clear_page_64.S +++ b/arch/x86/lib/clear_page_64.S @@ -1,6 +1,4 @@ #include -#include -#include #include /* -- cgit v1.2.1 From b83ce5ee91471d19c403ff91227204fb37c95fb2 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:48 +0300 Subject: x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52 __PHYSICAL_MASK_SHIFT is used to define the mask that helps to extract physical address from a page table entry. Although real physical address space available may differ between machines, it's safe to use 52 as __PHYSICAL_MASK_SHIFT. Unused bits above log2(MAXPHYADDR) up to bit 51 are reserved and must be 0. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64_types.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index e1407312c412..f68e6526891d 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -52,11 +52,12 @@ #define __START_KERNEL_map _AC(0xffffffff80000000, UL) /* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */ -#ifdef CONFIG_X86_5LEVEL + #define __PHYSICAL_MASK_SHIFT 52 + +#ifdef CONFIG_X86_5LEVEL #define __VIRTUAL_MASK_SHIFT 56 #else -#define __PHYSICAL_MASK_SHIFT 46 #define __VIRTUAL_MASK_SHIFT 47 #endif -- cgit v1.2.1 From 02390b87a9459937cdb299e6b34ff33992512ec7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:49 +0300 Subject: mm/zsmalloc: Prepare to variable MAX_PHYSMEM_BITS With boot-time switching between paging mode we will have variable MAX_PHYSMEM_BITS. Let's use the maximum variable possible for CONFIG_X86_5LEVEL=y configuration to define zsmalloc data structures. The patch introduces MAX_POSSIBLE_PHYSMEM_BITS to cover such case. It also suits well to handle PAE special case. Signed-off-by: Kirill A. Shutemov Reviewed-by: Nitin Gupta Acked-by: Minchan Kim Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sergey Senozhatsky Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable-3level_types.h | 1 + arch/x86/include/asm/pgtable_64_types.h | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable-3level_types.h b/arch/x86/include/asm/pgtable-3level_types.h index 876b4c77d983..6a59a6d0cc50 100644 --- a/arch/x86/include/asm/pgtable-3level_types.h +++ b/arch/x86/include/asm/pgtable-3level_types.h @@ -44,5 +44,6 @@ typedef union { */ #define PTRS_PER_PTE 512 +#define MAX_POSSIBLE_PHYSMEM_BITS 36 #endif /* _ASM_X86_PGTABLE_3LEVEL_DEFS_H */ diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 6b8f73dcbc2c..7168de7d34eb 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -40,6 +40,8 @@ typedef struct { pteval_t pte; } pte_t; #define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) #define P4D_MASK (~(P4D_SIZE - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 52 + #else /* CONFIG_X86_5LEVEL */ /* -- cgit v1.2.1 From eedb92abb9bb03ef21442614a6f5867eaac6e77f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:50 +0300 Subject: x86/mm: Make virtual memory layout dynamic for CONFIG_X86_5LEVEL=y We need to be able to adjust virtual memory layout at runtime to be able to switch between 4- and 5-level paging at boot-time. KASLR already has movable __VMALLOC_BASE, __VMEMMAP_BASE and __PAGE_OFFSET. Let's re-use it. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 8 ++++++++ arch/x86/include/asm/kaslr.h | 4 ---- arch/x86/include/asm/page_64.h | 4 ++++ arch/x86/include/asm/page_64_types.h | 4 ++-- arch/x86/include/asm/pgtable_64_types.h | 4 ++-- arch/x86/kernel/head64.c | 9 +++++++++ arch/x86/mm/kaslr.c | 8 -------- 7 files changed, 25 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 63bf349b2b24..92256489b8a4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1430,6 +1430,7 @@ config X86_PAE config X86_5LEVEL bool "Enable 5-level page tables support" + select DYNAMIC_MEMORY_LAYOUT depends on X86_64 ---help--- 5-level paging enables access to larger address space: @@ -2143,10 +2144,17 @@ config PHYSICAL_ALIGN Don't change this unless you know what you are doing. +config DYNAMIC_MEMORY_LAYOUT + bool + ---help--- + This option makes base addresses of vmalloc and vmemmap as well as + __PAGE_OFFSET movable during boot. + config RANDOMIZE_MEMORY bool "Randomize the kernel memory sections" depends on X86_64 depends on RANDOMIZE_BASE + select DYNAMIC_MEMORY_LAYOUT default RANDOMIZE_BASE ---help--- Randomizes the base virtual address of kernel memory sections diff --git a/arch/x86/include/asm/kaslr.h b/arch/x86/include/asm/kaslr.h index 460991e3b529..db7ba2feb947 100644 --- a/arch/x86/include/asm/kaslr.h +++ b/arch/x86/include/asm/kaslr.h @@ -5,10 +5,6 @@ unsigned long kaslr_get_random_long(const char *purpose); #ifdef CONFIG_RANDOMIZE_MEMORY -extern unsigned long page_offset_base; -extern unsigned long vmalloc_base; -extern unsigned long vmemmap_base; - void kernel_randomize_memory(void); #else static inline void kernel_randomize_memory(void) { } diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h index 4baa6bceb232..096378650142 100644 --- a/arch/x86/include/asm/page_64.h +++ b/arch/x86/include/asm/page_64.h @@ -11,6 +11,10 @@ extern unsigned long max_pfn; extern unsigned long phys_base; +extern unsigned long page_offset_base; +extern unsigned long vmalloc_base; +extern unsigned long vmemmap_base; + static inline unsigned long __phys_addr_nodebug(unsigned long x) { unsigned long y = x - __START_KERNEL_map; diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index f68e6526891d..d54a3d5b5b3b 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -43,11 +43,11 @@ #define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL) #endif -#ifdef CONFIG_RANDOMIZE_MEMORY +#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base #else #define __PAGE_OFFSET __PAGE_OFFSET_BASE -#endif /* CONFIG_RANDOMIZE_MEMORY */ +#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define __START_KERNEL_map _AC(0xffffffff80000000, UL) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 7168de7d34eb..a0db91ab63b8 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -100,13 +100,13 @@ typedef struct { pteval_t pte; } pte_t; # define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif -#ifdef CONFIG_RANDOMIZE_MEMORY +#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base # define VMEMMAP_START vmemmap_base #else # define VMALLOC_START __VMALLOC_BASE # define VMEMMAP_START __VMEMMAP_BASE -#endif /* CONFIG_RANDOMIZE_MEMORY */ +#endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 7ba5d819ebe3..bf5c9ba63ba1 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -39,6 +39,15 @@ extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); +#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT +unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE; +EXPORT_SYMBOL(page_offset_base); +unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE; +EXPORT_SYMBOL(vmalloc_base); +unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE; +EXPORT_SYMBOL(vmemmap_base); +#endif + #define __head __section(.head.text) static void __head *fixup_pointer(void *ptr, unsigned long physaddr) diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index aedebd2ebf1e..515b98a8ccee 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -43,14 +43,6 @@ static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; -/* Default values */ -unsigned long page_offset_base = __PAGE_OFFSET_BASE; -EXPORT_SYMBOL(page_offset_base); -unsigned long vmalloc_base = __VMALLOC_BASE; -EXPORT_SYMBOL(vmalloc_base); -unsigned long vmemmap_base = __VMEMMAP_BASE; -EXPORT_SYMBOL(vmemmap_base); - /* * Memory regions randomized by KASLR (except modules that use a separate logic * earlier during boot). The list is ordered based on virtual addresses. This -- cgit v1.2.1 From e626e6bb0dfaca41487241d49ce0ae827716101a Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:51 +0300 Subject: x86/mm: Introduce 'pgtable_l5_enabled' The new flag would indicate what paging mode we are in. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/kaslr.c | 4 ++++ arch/x86/include/asm/pgtable_32_types.h | 2 ++ arch/x86/include/asm/pgtable_64_types.h | 6 ++++++ arch/x86/kernel/head64.c | 5 +++++ 4 files changed, 17 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index 8199a6187251..bd69e1830fbe 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -46,6 +46,10 @@ #define STATIC #include +#ifdef CONFIG_X86_5LEVEL +unsigned int pgtable_l5_enabled __ro_after_init = 1; +#endif + extern unsigned long get_cmd_line_ptr(void); /* Simplified build-specific string for starting entropy. */ diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h index 0777e18a1d23..e3225e83db7d 100644 --- a/arch/x86/include/asm/pgtable_32_types.h +++ b/arch/x86/include/asm/pgtable_32_types.h @@ -15,6 +15,8 @@ # include #endif +#define pgtable_l5_enabled 0 + #define PGDIR_SIZE (1UL << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE - 1)) diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index a0db91ab63b8..5e2d724f8f47 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -20,6 +20,12 @@ typedef unsigned long pgprotval_t; typedef struct { pteval_t pte; } pte_t; +#ifdef CONFIG_X86_5LEVEL +extern unsigned int pgtable_l5_enabled; +#else +#define pgtable_l5_enabled 0 +#endif + #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index bf5c9ba63ba1..17d00d1886de 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -39,6 +39,11 @@ extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD]; static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); +#ifdef CONFIG_X86_5LEVEL +unsigned int pgtable_l5_enabled __ro_after_init = 1; +EXPORT_SYMBOL(pgtable_l5_enabled); +#endif + #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE; EXPORT_SYMBOL(page_offset_base); -- cgit v1.2.1 From 5c7919bb1994f8dc7fed219a5db09e6bb9d473a5 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:52 +0300 Subject: x86/mm: Make LDT_BASE_ADDR dynamic LDT_BASE_ADDR has different value in 4- and 5-level paging configurations. We need to make it dynamic in preparation for boot-time switching between paging modes. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_64_types.h | 9 +++++---- arch/x86/mm/dump_pagetables.c | 5 ++++- 2 files changed, 9 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 5e2d724f8f47..903e4d054bcb 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -92,18 +92,19 @@ extern unsigned int pgtable_l5_enabled; */ #define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +#define LDT_PGD_ENTRY_L4 -3UL +#define LDT_PGD_ENTRY_L5 -112UL +#define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) +#define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) + #ifdef CONFIG_X86_5LEVEL # define VMALLOC_SIZE_TB _AC(12800, UL) # define __VMALLOC_BASE _AC(0xffa0000000000000, UL) # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) -# define LDT_PGD_ENTRY _AC(-112, UL) -# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #else # define VMALLOC_SIZE_TB _AC(32, UL) # define __VMALLOC_BASE _AC(0xffffc90000000000, UL) # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) -# define LDT_PGD_ENTRY _AC(-3, UL) -# define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) #endif #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 2a4849e92831..a89f2dbc3531 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = { [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, #endif #ifdef CONFIG_MODIFY_LDT_SYSCALL - [LDT_NR] = { LDT_BASE_ADDR, "LDT remap" }, + [LDT_NR] = { 0UL, "LDT remap" }, #endif [CPU_ENTRY_AREA_NR] = { CPU_ENTRY_AREA_BASE,"CPU entry Area" }, #ifdef CONFIG_X86_ESPFIX64 @@ -570,6 +570,9 @@ static int __init pt_dump_init(void) address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET; address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START; +#ifdef CONFIG_MODIFY_LDT_SYSCALL + address_markers[LDT_NR].start_address = LDT_BASE_ADDR; +#endif #endif #ifdef CONFIG_X86_32 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; -- cgit v1.2.1 From c65e774fb3f6af212641538694b9778ff9ab4300 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:53 +0300 Subject: x86/mm: Make PGDIR_SHIFT and PTRS_PER_P4D variable For boot-time switching between 4- and 5-level paging we need to be able to fold p4d page table level at runtime. It requires variable PGDIR_SHIFT and PTRS_PER_P4D. The change doesn't affect the kernel image size much: text data bss dec hex filename 8628091 4734304 1368064 14730459 e0c4db vmlinux.before 8628393 4734340 1368064 14730797 e0c62d vmlinux.after Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-7-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/kaslr.c | 2 ++ arch/x86/include/asm/pgtable_32.h | 2 ++ arch/x86/include/asm/pgtable_64_types.h | 19 ++++++++++++------- arch/x86/kernel/cpu/mcheck/mce.c | 18 ++++++------------ arch/x86/kernel/head64.c | 6 +++++- arch/x86/mm/dump_pagetables.c | 12 +++++------- arch/x86/mm/init_64.c | 2 +- arch/x86/mm/kasan_init_64.c | 2 +- arch/x86/platform/efi/efi_64.c | 4 ++-- 9 files changed, 36 insertions(+), 31 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index bd69e1830fbe..b18e8f9512de 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -48,6 +48,8 @@ #ifdef CONFIG_X86_5LEVEL unsigned int pgtable_l5_enabled __ro_after_init = 1; +unsigned int pgdir_shift __ro_after_init = 48; +unsigned int ptrs_per_p4d __ro_after_init = 512; #endif extern unsigned long get_cmd_line_ptr(void); diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h index e67c0620aec2..d829360e26bd 100644 --- a/arch/x86/include/asm/pgtable_32.h +++ b/arch/x86/include/asm/pgtable_32.h @@ -33,6 +33,8 @@ static inline void pgtable_cache_init(void) { } static inline void check_pgt_cache(void) { } void paging_init(void); +static inline int pgd_large(pgd_t pgd) { return 0; } + /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 903e4d054bcb..0c48d80e11d4 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -26,6 +26,9 @@ extern unsigned int pgtable_l5_enabled; #define pgtable_l5_enabled 0 #endif +extern unsigned int pgdir_shift; +extern unsigned int ptrs_per_p4d; + #endif /* !__ASSEMBLY__ */ #define SHARED_KERNEL_PMD 0 @@ -35,16 +38,17 @@ extern unsigned int pgtable_l5_enabled; /* * PGDIR_SHIFT determines what a top-level page table entry can map */ -#define PGDIR_SHIFT 48 +#define PGDIR_SHIFT pgdir_shift #define PTRS_PER_PGD 512 /* * 4th level page in 5-level paging case */ -#define P4D_SHIFT 39 -#define PTRS_PER_P4D 512 -#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) -#define P4D_MASK (~(P4D_SIZE - 1)) +#define P4D_SHIFT 39 +#define MAX_PTRS_PER_P4D 512 +#define PTRS_PER_P4D ptrs_per_p4d +#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE - 1)) #define MAX_POSSIBLE_PHYSMEM_BITS 52 @@ -53,8 +57,9 @@ extern unsigned int pgtable_l5_enabled; /* * PGDIR_SHIFT determines what a top-level page table entry can map */ -#define PGDIR_SHIFT 39 -#define PTRS_PER_PGD 512 +#define PGDIR_SHIFT 39 +#define PTRS_PER_PGD 512 +#define MAX_PTRS_PER_P4D 1 #endif /* CONFIG_X86_5LEVEL */ diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 3a8e88a611eb..cbb3af721291 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1082,19 +1082,7 @@ void arch_unmap_kpfn(unsigned long pfn) * a legal address. */ -/* - * Build time check to see if we have a spare virtual bit. Don't want - * to leave this until run time because most developers don't have a - * system that can exercise this code path. This will only become a - * problem if/when we move beyond 5-level page tables. - * - * Hard code "9" here because cpp doesn't grok ilog2(PTRS_PER_PGD) - */ -#if PGDIR_SHIFT + 9 < 63 decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63)); -#else -#error "no unused virtual bit available" -#endif if (set_memory_np(decoy_addr, 1)) pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn); @@ -2328,6 +2316,12 @@ static __init int mcheck_init_device(void) { int err; + /* + * Check if we have a spare virtual bit. This will only become + * a problem if/when we move beyond 5-level page tables. + */ + MAYBE_BUILD_BUG_ON(__VIRTUAL_MASK_SHIFT >= 63); + if (!mce_available(&boot_cpu_data)) { err = -EIO; goto err_out; diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 17d00d1886de..98b0ff49b220 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,6 +42,10 @@ pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL unsigned int pgtable_l5_enabled __ro_after_init = 1; EXPORT_SYMBOL(pgtable_l5_enabled); +unsigned int pgdir_shift __ro_after_init = 48; +EXPORT_SYMBOL(pgdir_shift); +unsigned int ptrs_per_p4d __ro_after_init = 512; +EXPORT_SYMBOL(ptrs_per_p4d); #endif #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT @@ -336,7 +340,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data) BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0); BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0); BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL)); - BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == + MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == (__START_KERNEL & PGDIR_MASK))); BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END); diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index a89f2dbc3531..420058b05d39 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -428,14 +428,15 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, #define p4d_none(a) pud_none(__pud(p4d_val(a))) #endif -#if PTRS_PER_P4D > 1 - static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P) { int i; p4d_t *start, *p4d_start; pgprotval_t prot; + if (PTRS_PER_P4D == 1) + return walk_pud_level(m, st, __p4d(pgd_val(addr)), P); + p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); for (i = 0; i < PTRS_PER_P4D; i++) { @@ -455,11 +456,8 @@ static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, } } -#else -#define walk_p4d_level(m,s,a,p) walk_pud_level(m,s,__p4d(pgd_val(a)),p) -#define pgd_large(a) p4d_large(__p4d(pgd_val(a))) -#define pgd_none(a) p4d_none(__p4d(pgd_val(a))) -#endif +#define pgd_large(a) (pgtable_l5_enabled ? pgd_large(a) : p4d_large(__p4d(pgd_val(a)))) +#define pgd_none(a) (pgtable_l5_enabled ? pgd_none(a) : p4d_none(__p4d(pgd_val(a)))) static inline bool is_hypervisor_range(int idx) { diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1ab42c852069..6a4b20bc7527 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -143,7 +143,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) * With folded p4d, pgd_none() is always false, we need to * handle synchonization on p4d level. */ - BUILD_BUG_ON(pgd_none(*pgd_ref)); + MAYBE_BUILD_BUG_ON(pgd_none(*pgd_ref)); p4d_ref = p4d_offset(pgd_ref, addr); if (p4d_none(*p4d_ref)) diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index af6f2f9c6a26..12ec90f62457 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -19,7 +19,7 @@ extern struct range pfn_mapped[E820_MAX_ENTRIES]; -static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); +static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); static __init void *early_alloc(size_t size, int nid, bool panic) { diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 780460aa5ea5..d52aaa7dc088 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -257,8 +257,8 @@ void efi_sync_low_kernel_mappings(void) * only span a single PGD entry and that the entry also maps * other important kernel regions. */ - BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); - BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != + MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END)); + MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) != (EFI_VA_END & PGDIR_MASK)); pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET); -- cgit v1.2.1 From 162434e7f58b21f0b6c9cc5fb02222cd7d9064cc Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:54 +0300 Subject: x86/mm: Make MAX_PHYSADDR_BITS and MAX_PHYSMEM_BITS dynamic For boot-time switching between paging modes, we need to be able to adjust size of physical address space at runtime. As part of making physical address space size variable, we have to make X86_5LEVEL dependent on SPARSEMEM_VMEMMAP. !SPARSEMEM_VMEMMAP configuration doesn't build with variable MAX_PHYSMEM_BITS. For !SPARSEMEM_VMEMMAP SECTIONS_WIDTH depends on MAX_PHYSMEM_BITS: SECTIONS_WIDTH SECTIONS_SHIFT MAX_PHYSMEM_BITS And SECTIONS_WIDTH is used on pre-processor stage, it doesn't work if it's dyncamic. See include/linux/page-flags-layout.h. Effect on kernel image size: text data bss dec hex filename 8628393 4734340 1368064 14730797 e0c62d vmlinux.before 8628892 4734340 1368064 14731296 e0c820 vmlinux.after Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-8-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/include/asm/pgtable_64_types.h | 2 +- arch/x86/include/asm/sparsemem.h | 9 ++------- arch/x86/kernel/setup.c | 5 ++--- 4 files changed, 6 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 92256489b8a4..fcc3f88996b3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1431,6 +1431,7 @@ config X86_PAE config X86_5LEVEL bool "Enable 5-level page tables support" select DYNAMIC_MEMORY_LAYOUT + select SPARSEMEM_VMEMMAP depends on X86_64 ---help--- 5-level paging enables access to larger address space: diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 0c48d80e11d4..59d971c85de5 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -95,7 +95,7 @@ extern unsigned int ptrs_per_p4d; * range must not overlap with anything except the KASAN shadow area, which * is correct as KASAN disables KASLR. */ -#define MAXMEM _AC(__AC(1, UL) << MAX_PHYSMEM_BITS, UL) +#define MAXMEM (1UL << MAX_PHYSMEM_BITS) #define LDT_PGD_ENTRY_L4 -3UL #define LDT_PGD_ENTRY_L5 -112UL diff --git a/arch/x86/include/asm/sparsemem.h b/arch/x86/include/asm/sparsemem.h index 4fc1e9d3c43e..4617a2bf123c 100644 --- a/arch/x86/include/asm/sparsemem.h +++ b/arch/x86/include/asm/sparsemem.h @@ -27,13 +27,8 @@ # endif #else /* CONFIG_X86_32 */ # define SECTION_SIZE_BITS 27 /* matt - 128 is convenient right now */ -# ifdef CONFIG_X86_5LEVEL -# define MAX_PHYSADDR_BITS 52 -# define MAX_PHYSMEM_BITS 52 -# else -# define MAX_PHYSADDR_BITS 44 -# define MAX_PHYSMEM_BITS 46 -# endif +# define MAX_PHYSADDR_BITS (pgtable_l5_enabled ? 52 : 44) +# define MAX_PHYSMEM_BITS (pgtable_l5_enabled ? 52 : 46) #endif #endif /* CONFIG_SPARSEMEM */ diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 1ae67e982af7..399d0f7fa8f1 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -189,9 +189,7 @@ struct ist_info ist_info; #endif #else -struct cpuinfo_x86 boot_cpu_data __read_mostly = { - .x86_phys_bits = MAX_PHYSMEM_BITS, -}; +struct cpuinfo_x86 boot_cpu_data __read_mostly; EXPORT_SYMBOL(boot_cpu_data); #endif @@ -851,6 +849,7 @@ void __init setup_arch(char **cmdline_p) __flush_tlb_all(); #else printk(KERN_INFO "Command line: %s\n", boot_command_line); + boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS; #endif /* -- cgit v1.2.1 From 09e61a779e7f171c50325e6d7108a593afb2e5d4 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 14:16:55 +0300 Subject: x86/mm: Make __VIRTUAL_MASK_SHIFT dynamic For boot-time switching between paging modes, we need to be able to adjust virtual mask shifts. The change doesn't affect the kernel image size much: text data bss dec hex filename 8628892 4734340 1368064 14731296 e0c820 vmlinux.before 8628966 4734340 1368064 14731370 e0c86a vmlinux.after Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214111656.88514-9-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/entry/entry_64.S | 12 ++++++++++++ arch/x86/include/asm/page_64_types.h | 2 +- arch/x86/mm/dump_pagetables.c | 12 ++++++++++-- arch/x86/mm/kaslr.c | 4 +++- 4 files changed, 26 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 30c8c5344c4a..2c06348b7807 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -274,8 +274,20 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) * Change top bits to match most significant bit (47th or 56th bit * depending on paging mode) in the address. */ +#ifdef CONFIG_X86_5LEVEL + testl $1, pgtable_l5_enabled(%rip) + jz 1f + shl $(64 - 57), %rcx + sar $(64 - 57), %rcx + jmp 2f +1: + shl $(64 - 48), %rcx + sar $(64 - 48), %rcx +2: +#else shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx +#endif /* If this changed %rcx, it was not canonical */ cmpq %rcx, %r11 diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index d54a3d5b5b3b..fa7dc7cd8c19 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -56,7 +56,7 @@ #define __PHYSICAL_MASK_SHIFT 52 #ifdef CONFIG_X86_5LEVEL -#define __VIRTUAL_MASK_SHIFT 56 +#define __VIRTUAL_MASK_SHIFT (pgtable_l5_enabled ? 56 : 47) #else #define __VIRTUAL_MASK_SHIFT 47 #endif diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 420058b05d39..9efee6f464ab 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -85,8 +85,12 @@ static struct addr_marker address_markers[] = { [VMALLOC_START_NR] = { 0UL, "vmalloc() Area" }, [VMEMMAP_START_NR] = { 0UL, "Vmemmap" }, #ifdef CONFIG_KASAN - [KASAN_SHADOW_START_NR] = { KASAN_SHADOW_START, "KASAN shadow" }, - [KASAN_SHADOW_END_NR] = { KASAN_SHADOW_END, "KASAN shadow end" }, + /* + * These fields get initialized with the (dynamic) + * KASAN_SHADOW_{START,END} values in pt_dump_init(). + */ + [KASAN_SHADOW_START_NR] = { 0UL, "KASAN shadow" }, + [KASAN_SHADOW_END_NR] = { 0UL, "KASAN shadow end" }, #endif #ifdef CONFIG_MODIFY_LDT_SYSCALL [LDT_NR] = { 0UL, "LDT remap" }, @@ -571,6 +575,10 @@ static int __init pt_dump_init(void) #ifdef CONFIG_MODIFY_LDT_SYSCALL address_markers[LDT_NR].start_address = LDT_BASE_ADDR; #endif +#ifdef CONFIG_KASAN + address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START; + address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END; +#endif #endif #ifdef CONFIG_X86_32 address_markers[VMALLOC_START_NR].start_address = VMALLOC_START; diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 515b98a8ccee..d079878c6cbc 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region { unsigned long *base; unsigned long size_tb; } kaslr_regions[] = { - { &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ }, + { &page_offset_base, 0 }, { &vmalloc_base, VMALLOC_SIZE_TB }, { &vmemmap_base, 1 }, }; @@ -93,6 +93,8 @@ void __init kernel_randomize_memory(void) if (!kaslr_memory_enabled()) return; + kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); + /* * Update Physical memory mapping to available and * add padding if needed (especially for memory hotplug support). -- cgit v1.2.1 From 3130451e270960065e8de684c60b898e970c940c Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 14 Feb 2018 17:43:16 +0200 Subject: x86/platform/atom: Re-use DEFINE_SHOW_ATTRIBUTE() macro ...instead of open coding file operations followed by custom ->open() callbacks per each attribute. While here, replace permissions by explicit octal value. Signed-off-by: Andy Shevchenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180214154317.52290-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/platform/atom/punit_atom_debug.c | 17 +++-------------- 1 file changed, 3 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c index d49d3be81953..034813d4ab1e 100644 --- a/arch/x86/platform/atom/punit_atom_debug.c +++ b/arch/x86/platform/atom/punit_atom_debug.c @@ -109,18 +109,7 @@ static int punit_dev_state_show(struct seq_file *seq_file, void *unused) return 0; } - -static int punit_dev_state_open(struct inode *inode, struct file *file) -{ - return single_open(file, punit_dev_state_show, inode->i_private); -} - -static const struct file_operations punit_dev_state_ops = { - .open = punit_dev_state_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(punit_dev_state); static struct dentry *punit_dbg_file; @@ -132,9 +121,9 @@ static int punit_dbgfs_register(struct punit_device *punit_device) if (!punit_dbg_file) return -ENXIO; - dev_state = debugfs_create_file("dev_power_state", S_IFREG | S_IRUGO, + dev_state = debugfs_create_file("dev_power_state", 0444, punit_dbg_file, punit_device, - &punit_dev_state_ops); + &punit_dev_state_fops); if (!dev_state) { pr_err("punit_dev_state register failed\n"); debugfs_remove(punit_dbg_file); -- cgit v1.2.1 From a66b86f0026b07b0ea7340e3690ac9fd5ac1499a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 14 Feb 2018 17:43:17 +0200 Subject: x86/platform/quark: Re-use DEFINE_SHOW_ATTRIBUTE() macro ...instead of open coding file operations followed by custom ->open() callbacks per each attribute. While here, replace permissions by explicit octal value. Signed-off-by: Andy Shevchenko Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180214154317.52290-2-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/platform/intel-quark/imr.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c index 17d6d2296e4d..49828c2707ac 100644 --- a/arch/x86/platform/intel-quark/imr.c +++ b/arch/x86/platform/intel-quark/imr.c @@ -224,25 +224,7 @@ static int imr_dbgfs_state_show(struct seq_file *s, void *unused) mutex_unlock(&idev->lock); return ret; } - -/** - * imr_state_open - debugfs open callback. - * - * @inode: pointer to struct inode. - * @file: pointer to struct file. - * @return: result of single open. - */ -static int imr_state_open(struct inode *inode, struct file *file) -{ - return single_open(file, imr_dbgfs_state_show, inode->i_private); -} - -static const struct file_operations imr_state_ops = { - .open = imr_state_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state); /** * imr_debugfs_register - register debugfs hooks. @@ -252,8 +234,8 @@ static const struct file_operations imr_state_ops = { */ static int imr_debugfs_register(struct imr_device *idev) { - idev->file = debugfs_create_file("imr_state", S_IFREG | S_IRUGO, NULL, - idev, &imr_state_ops); + idev->file = debugfs_create_file("imr_state", 0444, NULL, idev, + &imr_dbgfs_state_fops); return PTR_ERR_OR_ZERO(idev->file); } -- cgit v1.2.1 From f960cfd12650fad43c1cde07a1f7642cf2c57f97 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 15 Feb 2018 11:54:54 -0500 Subject: x86/Kconfig: Add missing i586-class CPUs to the X86_CMPXCHG64 Kconfig group Several i586-class CPUs supporting this instruction are missing from the X86_CMPXCHG64 config group. Using a configuration with either M586TSC or M586MMX currently sets X86_MINIMUM_CPU_FAMILY=4 instead of the correct value of 5. Booting on an i486 it will fail to generate the "This kernel requires an i586 CPU, but only detected an i486 CPU" message and intentional halt as expected. It will instead just silently hang when it hits i586-specific instructions. The M586 CPU is not in this list because at least the Cyrix 5x86 lacks this instruction, and perhaps others. Signed-off-by: Matthew Whitehead Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1518713696-11360-1-git-send-email-tedheadster@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index 65a9a4716e34..ec64aa728727 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -374,7 +374,7 @@ config X86_TSC config X86_CMPXCHG64 def_bool y - depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM + depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8 # this should be set for all -march=.. options where the compiler # generates cmov. -- cgit v1.2.1 From 69b8d3fcabdc81d9efd82b4a506c8279cbaba692 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 15 Feb 2018 11:54:55 -0500 Subject: x86/Kconfig: Exclude i586-class CPUs lacking PAE support from the HIGHMEM64G Kconfig group i586-class machines also lack support for Physical Address Extension (PAE), so add them to the exclusion list. Signed-off-by: Matthew Whitehead Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1518713696-11360-2-git-send-email-tedheadster@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a528c14d45a5..c1236b187824 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1404,7 +1404,7 @@ config HIGHMEM4G config HIGHMEM64G bool "64GB" - depends on !M486 + depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6 select X86_PAE ---help--- Select this if you have a 32-bit processor and more than 4 -- cgit v1.2.1 From 25d76ac888216c369dea91768764728b83769799 Mon Sep 17 00:00:00 2001 From: Matthew Whitehead Date: Thu, 15 Feb 2018 11:54:56 -0500 Subject: x86/Kconfig: Explicitly enumerate i686-class CPUs in Kconfig The X86_P6_NOP config class leaves out many i686-class CPUs. Instead, explicitly enumerate all these CPUs. Using a configuration with M686 currently sets X86_MINIMUM_CPU_FAMILY=5 instead of the correct value of 6. Booting on an i586 it will fail to generate the "This kernel requires an i686 CPU, but only detected an i586 CPU" message and intentional halt as expected. It will instead just silently hang when it hits i686-specific instructions. Signed-off-by: Matthew Whitehead Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1518713696-11360-3-git-send-email-tedheadster@gmail.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig.cpu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index ec64aa728727..8b8d2297d486 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -385,7 +385,7 @@ config X86_CMOV config X86_MINIMUM_CPU_FAMILY int default "64" if X86_64 - default "6" if X86_32 && X86_P6_NOP + default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8) default "5" if X86_32 && X86_CMPXCHG64 default "4" -- cgit v1.2.1 From b753a2b79a5bbad35dfaf8d3dba964727c30654a Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 14 Feb 2018 14:25:54 +0800 Subject: x86/apic: Make setup_local_APIC() static This function isn't used outside of apic.c, so let's mark it static. Signed-off-by: Dou Liyang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bhe@redhat.com Cc: ebiederm@xmission.com Link: http://lkml.kernel.org/r/20180214062554.21020-1-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 1 - arch/x86/kernel/apic/apic.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 6e1990d69865..c6a32019ff54 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -138,7 +138,6 @@ extern void lapic_shutdown(void); extern void sync_Arb_IDs(void); extern void init_bsp_APIC(void); extern void apic_intr_mode_init(void); -extern void setup_local_APIC(void); extern void init_apic_mappings(void); void register_lapic_address(unsigned long address); extern void setup_boot_APIC_clock(void); diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 25ddf02598d2..871018dd49a3 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1414,7 +1414,7 @@ static void lapic_setup_esr(void) * Used to setup local APIC while initializing BSP or bringing up APs. * Always called with preemption disabled. */ -void setup_local_APIC(void) +static void setup_local_APIC(void) { int cpu = smp_processor_id(); unsigned int value, queued; -- cgit v1.2.1 From 4c2b4058ab32581931c2caf760b689fd4b019a87 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:34 +0300 Subject: x86/mm: Initialize 'pgtable_l5_enabled' at boot-time 'pgtable_l5_enabled' indicates which paging mode we are using. We need to initialize it at boot-time according to machine capability. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/kaslr.c | 8 +++++++- arch/x86/kernel/head64.c | 24 +++++++++++++++++++++++- 2 files changed, 30 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index b18e8f9512de..d02a838c0ce4 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -47,7 +47,7 @@ #include #ifdef CONFIG_X86_5LEVEL -unsigned int pgtable_l5_enabled __ro_after_init = 1; +unsigned int pgtable_l5_enabled __ro_after_init; unsigned int pgdir_shift __ro_after_init = 48; unsigned int ptrs_per_p4d __ro_after_init = 512; #endif @@ -729,6 +729,12 @@ void choose_random_location(unsigned long input, return; } +#ifdef CONFIG_X86_5LEVEL + if (__read_cr4() & X86_CR4_LA57) { + pgtable_l5_enabled = 1; + } +#endif + boot_params->hdr.loadflags |= KASLR_FLAG; /* Prepare to add new identity pagetables on demand. */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 98b0ff49b220..ffb31c50d515 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -40,7 +40,7 @@ static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL -unsigned int pgtable_l5_enabled __ro_after_init = 1; +unsigned int pgtable_l5_enabled __ro_after_init; EXPORT_SYMBOL(pgtable_l5_enabled); unsigned int pgdir_shift __ro_after_init = 48; EXPORT_SYMBOL(pgdir_shift); @@ -64,6 +64,26 @@ static void __head *fixup_pointer(void *ptr, unsigned long physaddr) return ptr - (void *)_text + (void *)physaddr; } +#ifdef CONFIG_X86_5LEVEL +static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) +{ + return fixup_pointer(ptr, physaddr); +} + +static void __head check_la57_support(unsigned long physaddr) +{ + if (native_cpuid_eax(0) < 7) + return; + + if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + return; + + *fixup_int(&pgtable_l5_enabled, physaddr) = 1; +} +#else +static void __head check_la57_support(unsigned long physaddr) {} +#endif + unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { @@ -76,6 +96,8 @@ unsigned long __head __startup_64(unsigned long physaddr, int i; unsigned int *next_pgt_ptr; + check_la57_support(physaddr); + /* Is the address too large? */ if (physaddr >> MAX_PHYSMEM_BITS) for (;;); -- cgit v1.2.1 From b16e770bfa5344f1cd4f7b4ecd7bbae25001e120 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:35 +0300 Subject: x86/mm: Initialize 'pgdir_shift' and 'ptrs_per_p4d' at boot-time Switching between paging modes requires the folding of the p4d page table level when we only have 4 paging levels, which means we need to adjust 'pgdir_shift' and 'ptrs_per_p4d' during early boot according to paging mode. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/kaslr.c | 6 ++++-- arch/x86/kernel/head64.c | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c index d02a838c0ce4..66e42a098d70 100644 --- a/arch/x86/boot/compressed/kaslr.c +++ b/arch/x86/boot/compressed/kaslr.c @@ -48,8 +48,8 @@ #ifdef CONFIG_X86_5LEVEL unsigned int pgtable_l5_enabled __ro_after_init; -unsigned int pgdir_shift __ro_after_init = 48; -unsigned int ptrs_per_p4d __ro_after_init = 512; +unsigned int pgdir_shift __ro_after_init = 39; +unsigned int ptrs_per_p4d __ro_after_init = 1; #endif extern unsigned long get_cmd_line_ptr(void); @@ -732,6 +732,8 @@ void choose_random_location(unsigned long input, #ifdef CONFIG_X86_5LEVEL if (__read_cr4() & X86_CR4_LA57) { pgtable_l5_enabled = 1; + pgdir_shift = 48; + ptrs_per_p4d = 512; } #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index ffb31c50d515..8a0a485524da 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -42,9 +42,9 @@ pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL unsigned int pgtable_l5_enabled __ro_after_init; EXPORT_SYMBOL(pgtable_l5_enabled); -unsigned int pgdir_shift __ro_after_init = 48; +unsigned int pgdir_shift __ro_after_init = 39; EXPORT_SYMBOL(pgdir_shift); -unsigned int ptrs_per_p4d __ro_after_init = 512; +unsigned int ptrs_per_p4d __ro_after_init = 1; EXPORT_SYMBOL(ptrs_per_p4d); #endif @@ -79,6 +79,8 @@ static void __head check_la57_support(unsigned long physaddr) return; *fixup_int(&pgtable_l5_enabled, physaddr) = 1; + *fixup_int(&pgdir_shift, physaddr) = 48; + *fixup_int(&ptrs_per_p4d, physaddr) = 512; } #else static void __head check_la57_support(unsigned long physaddr) {} -- cgit v1.2.1 From 4fa5662b6b49611f11856db8be346710217473ef Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:36 +0300 Subject: x86/mm: Initialize 'page_offset_base' at boot-time For 4- and 5-level paging we have different 'page_offset_base'. Let's initialize it at boot-time accordingly to machine capability. We also have to split __PAGE_OFFSET_BASE into two constants -- for 4- and 5-level paging. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/page_64_types.h | 9 +++------ arch/x86/kernel/head64.c | 13 +++++++++---- arch/x86/kernel/head_64.S | 2 +- arch/x86/mm/kaslr.c | 8 ++++---- 4 files changed, 17 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h index fa7dc7cd8c19..2c5a966dc222 100644 --- a/arch/x86/include/asm/page_64_types.h +++ b/arch/x86/include/asm/page_64_types.h @@ -37,16 +37,13 @@ * hypervisor to fit. Choosing 16 slots here is arbitrary, but it's * what Xen requires. */ -#ifdef CONFIG_X86_5LEVEL -#define __PAGE_OFFSET_BASE _AC(0xff10000000000000, UL) -#else -#define __PAGE_OFFSET_BASE _AC(0xffff880000000000, UL) -#endif +#define __PAGE_OFFSET_BASE_L5 _AC(0xff10000000000000, UL) +#define __PAGE_OFFSET_BASE_L4 _AC(0xffff880000000000, UL) #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT #define __PAGE_OFFSET page_offset_base #else -#define __PAGE_OFFSET __PAGE_OFFSET_BASE +#define __PAGE_OFFSET __PAGE_OFFSET_BASE_L4 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define __START_KERNEL_map _AC(0xffffffff80000000, UL) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8a0a485524da..876d3bf2b23a 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -49,7 +49,7 @@ EXPORT_SYMBOL(ptrs_per_p4d); #endif #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT -unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE; +unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; EXPORT_SYMBOL(page_offset_base); unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE; EXPORT_SYMBOL(vmalloc_base); @@ -64,6 +64,11 @@ static void __head *fixup_pointer(void *ptr, unsigned long physaddr) return ptr - (void *)_text + (void *)physaddr; } +static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr) +{ + return fixup_pointer(ptr, physaddr); +} + #ifdef CONFIG_X86_5LEVEL static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) { @@ -81,6 +86,7 @@ static void __head check_la57_support(unsigned long physaddr) *fixup_int(&pgtable_l5_enabled, physaddr) = 1; *fixup_int(&pgdir_shift, physaddr) = 48; *fixup_int(&ptrs_per_p4d, physaddr) = 512; + *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; } #else static void __head check_la57_support(unsigned long physaddr) {} @@ -89,7 +95,7 @@ static void __head check_la57_support(unsigned long physaddr) {} unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { - unsigned long load_delta, *p; + unsigned long load_delta; unsigned long pgtable_flags; pgdval_t *pgd; p4dval_t *p4d; @@ -196,8 +202,7 @@ unsigned long __head __startup_64(unsigned long physaddr, * Fixup phys_base - remove the memory encryption mask to obtain * the true physical address. */ - p = fixup_pointer(&phys_base, physaddr); - *p += load_delta - sme_get_me_mask(); + *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask(); /* Encrypt the kernel and related (if SME is active) */ sme_encrypt_kernel(bp); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 04a625f0fcda..d3f8b43d541a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -41,7 +41,7 @@ #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) -PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE) +PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE_L4) PGD_START_KERNEL = pgd_index(__START_KERNEL_map) #endif L3_START_KERNEL = pud_index(__START_KERNEL_map) diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index d079878c6cbc..7828a7ca3bba 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -34,13 +34,10 @@ #define TB_SHIFT 40 /* - * Virtual address start and end range for randomization. - * * The end address could depend on more configuration options to make the * highest amount of space for randomization available, but that's too hard * to keep straight and caused issues already. */ -static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; static const unsigned long vaddr_end = CPU_ENTRY_AREA_BASE; /* @@ -76,11 +73,14 @@ static inline bool kaslr_memory_enabled(void) void __init kernel_randomize_memory(void) { size_t i; - unsigned long vaddr = vaddr_start; + unsigned long vaddr_start, vaddr; unsigned long rand, memory_tb; struct rnd_state rand_state; unsigned long remain_entropy; + vaddr_start = pgtable_l5_enabled ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4; + vaddr = vaddr_start; + /* * These BUILD_BUG_ON checks ensure the memory layout is consistent * with the vaddr_start/vaddr_end variables. These checks are very -- cgit v1.2.1 From a7412546d8cb5ad578805060b4006f2a021b5868 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:37 +0300 Subject: x86/mm: Adjust vmalloc base and size at boot-time vmalloc area has different placement and size depending on paging mode. Let's adjust it during early boot accodring to machine capability. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_64_types.h | 16 ++++++++++------ arch/x86/kernel/head64.c | 3 ++- arch/x86/mm/kaslr.c | 3 ++- 3 files changed, 14 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 59d971c85de5..686329994ade 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -102,25 +102,29 @@ extern unsigned int ptrs_per_p4d; #define LDT_PGD_ENTRY (pgtable_l5_enabled ? LDT_PGD_ENTRY_L5 : LDT_PGD_ENTRY_L4) #define LDT_BASE_ADDR (LDT_PGD_ENTRY << PGDIR_SHIFT) +#define __VMALLOC_BASE_L4 0xffffc90000000000 +#define __VMALLOC_BASE_L5 0xffa0000000000000 + +#define VMALLOC_SIZE_TB_L4 32UL +#define VMALLOC_SIZE_TB_L5 12800UL + #ifdef CONFIG_X86_5LEVEL -# define VMALLOC_SIZE_TB _AC(12800, UL) -# define __VMALLOC_BASE _AC(0xffa0000000000000, UL) # define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) #else -# define VMALLOC_SIZE_TB _AC(32, UL) -# define __VMALLOC_BASE _AC(0xffffc90000000000, UL) # define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) #endif #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base +# define VMALLOC_SIZE_TB (pgtable_l5_enabled ? VMALLOC_SIZE_TB_L5 : VMALLOC_SIZE_TB_L4) # define VMEMMAP_START vmemmap_base #else -# define VMALLOC_START __VMALLOC_BASE +# define VMALLOC_START __VMALLOC_BASE_L4 +# define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4 # define VMEMMAP_START __VMEMMAP_BASE #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ -#define VMALLOC_END (VMALLOC_START + _AC((VMALLOC_SIZE_TB << 40) - 1, UL)) +#define VMALLOC_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1) #define MODULES_VADDR (__START_KERNEL_map + KERNEL_IMAGE_SIZE) /* The module sections ends with the start of the fixmap */ diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 876d3bf2b23a..22bf2015254c 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -51,7 +51,7 @@ EXPORT_SYMBOL(ptrs_per_p4d); #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; EXPORT_SYMBOL(page_offset_base); -unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE; +unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; EXPORT_SYMBOL(vmalloc_base); unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE; EXPORT_SYMBOL(vmemmap_base); @@ -87,6 +87,7 @@ static void __head check_la57_support(unsigned long physaddr) *fixup_int(&pgdir_shift, physaddr) = 48; *fixup_int(&ptrs_per_p4d, physaddr) = 512; *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; + *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5; } #else static void __head check_la57_support(unsigned long physaddr) {} diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 7828a7ca3bba..641169d38184 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -50,7 +50,7 @@ static __initdata struct kaslr_memory_region { unsigned long size_tb; } kaslr_regions[] = { { &page_offset_base, 0 }, - { &vmalloc_base, VMALLOC_SIZE_TB }, + { &vmalloc_base, 0 }, { &vmemmap_base, 1 }, }; @@ -94,6 +94,7 @@ void __init kernel_randomize_memory(void) return; kaslr_regions[0].size_tb = 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT); + kaslr_regions[1].size_tb = VMALLOC_SIZE_TB; /* * Update Physical memory mapping to available and -- cgit v1.2.1 From 9b46a051e43461a9afda2bdd50e0e0ae349341df Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:38 +0300 Subject: x86/mm: Initialize vmemmap_base at boot-time vmemmap area has different placement depending on paging mode. Let's adjust it during early boot accodring to machine capability. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_64_types.h | 9 +++------ arch/x86/kernel/head64.c | 3 ++- 2 files changed, 5 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 686329994ade..68909a68e5b9 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -108,11 +108,8 @@ extern unsigned int ptrs_per_p4d; #define VMALLOC_SIZE_TB_L4 32UL #define VMALLOC_SIZE_TB_L5 12800UL -#ifdef CONFIG_X86_5LEVEL -# define __VMEMMAP_BASE _AC(0xffd4000000000000, UL) -#else -# define __VMEMMAP_BASE _AC(0xffffea0000000000, UL) -#endif +#define __VMEMMAP_BASE_L4 0xffffea0000000000 +#define __VMEMMAP_BASE_L5 0xffd4000000000000 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT # define VMALLOC_START vmalloc_base @@ -121,7 +118,7 @@ extern unsigned int ptrs_per_p4d; #else # define VMALLOC_START __VMALLOC_BASE_L4 # define VMALLOC_SIZE_TB VMALLOC_SIZE_TB_L4 -# define VMEMMAP_START __VMEMMAP_BASE +# define VMEMMAP_START __VMEMMAP_BASE_L4 #endif /* CONFIG_DYNAMIC_MEMORY_LAYOUT */ #define VMALLOC_END (VMALLOC_START + (VMALLOC_SIZE_TB << 40) - 1) diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 22bf2015254c..795e762f3c66 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -53,7 +53,7 @@ unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4; EXPORT_SYMBOL(page_offset_base); unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4; EXPORT_SYMBOL(vmalloc_base); -unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE; +unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4; EXPORT_SYMBOL(vmemmap_base); #endif @@ -88,6 +88,7 @@ static void __head check_la57_support(unsigned long physaddr) *fixup_int(&ptrs_per_p4d, physaddr) = 512; *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5; + *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5; } #else static void __head check_la57_support(unsigned long physaddr) {} -- cgit v1.2.1 From 6f9dd329717f696f578347c0781a0247db957596 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:39 +0300 Subject: x86/mm: Support boot-time switching of paging modes in the early boot code Early boot code should be able to initialize page tables for both 4- and 5-level paging modes. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-7-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/head64.c | 33 ++++++++++++++++++++++----------- arch/x86/kernel/head_64.S | 10 ++++------ 2 files changed, 26 insertions(+), 17 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 795e762f3c66..8161e719a20f 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -75,13 +75,13 @@ static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr) return fixup_pointer(ptr, physaddr); } -static void __head check_la57_support(unsigned long physaddr) +static bool __head check_la57_support(unsigned long physaddr) { if (native_cpuid_eax(0) < 7) - return; + return false; if (!(native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) - return; + return false; *fixup_int(&pgtable_l5_enabled, physaddr) = 1; *fixup_int(&pgdir_shift, physaddr) = 48; @@ -89,24 +89,30 @@ static void __head check_la57_support(unsigned long physaddr) *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5; *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5; *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5; + + return true; } #else -static void __head check_la57_support(unsigned long physaddr) {} +static bool __head check_la57_support(unsigned long physaddr) +{ + return false; +} #endif unsigned long __head __startup_64(unsigned long physaddr, struct boot_params *bp) { - unsigned long load_delta; + unsigned long load_delta, *p; unsigned long pgtable_flags; pgdval_t *pgd; p4dval_t *p4d; pudval_t *pud; pmdval_t *pmd, pmd_entry; + bool la57; int i; unsigned int *next_pgt_ptr; - check_la57_support(physaddr); + la57 = check_la57_support(physaddr); /* Is the address too large? */ if (physaddr >> MAX_PHYSMEM_BITS) @@ -131,9 +137,14 @@ unsigned long __head __startup_64(unsigned long physaddr, /* Fixup the physical addresses in the page table */ pgd = fixup_pointer(&early_top_pgt, physaddr); - pgd[pgd_index(__START_KERNEL_map)] += load_delta; - - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + p = pgd + pgd_index(__START_KERNEL_map); + if (la57) + *p = (unsigned long)level4_kernel_pgt; + else + *p = (unsigned long)level3_kernel_pgt; + *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta; + + if (la57) { p4d = fixup_pointer(&level4_kernel_pgt, physaddr); p4d[511] += load_delta; } @@ -158,7 +169,7 @@ unsigned long __head __startup_64(unsigned long physaddr, pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask(); - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + if (la57) { p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr); i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD; @@ -255,7 +266,7 @@ again: * critical -- __PAGE_OFFSET would point us back into the dynamic * range and we might end up looping forever... */ - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + if (!pgtable_l5_enabled) p4d_p = pgd_p; else if (pgd) p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base); diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index d3f8b43d541a..145d7b95ae29 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -124,7 +124,10 @@ ENTRY(secondary_startup_64) /* Enable PAE mode, PGE and LA57 */ movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx #ifdef CONFIG_X86_5LEVEL + testl $1, pgtable_l5_enabled(%rip) + jz 1f orl $X86_CR4_LA57, %ecx +1: #endif movq %rcx, %cr4 @@ -372,12 +375,7 @@ GLOBAL(name) __INITDATA NEXT_PGD_PAGE(early_top_pgt) - .fill 511,8,0 -#ifdef CONFIG_X86_5LEVEL - .quad level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC -#else - .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC -#endif + .fill 512,8,0 .fill PTI_USER_PGD_FILL,8,0 NEXT_PAGE(early_dynamic_pgts) -- cgit v1.2.1 From 98219dda2ab56ce2a967fdebf81e838d676d9ddc Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:40 +0300 Subject: x86/mm: Fold p4d page table layer at runtime Change page table helpers to fold p4d at runtime. The logic is the same as in . Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-8-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt.h | 10 ++++++---- arch/x86/include/asm/pgalloc.h | 5 ++++- arch/x86/include/asm/pgtable.h | 11 ++++++++++- 3 files changed, 20 insertions(+), 6 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 892df375b615..3fbaad238a94 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -569,14 +569,16 @@ static inline p4dval_t p4d_val(p4d_t p4d) static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { - pgdval_t val = native_pgd_val(pgd); - - PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, val); + if (pgtable_l5_enabled) + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); + else + set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd }); } static inline void pgd_clear(pgd_t *pgdp) { - set_pgd(pgdp, __pgd(0)); + if (pgtable_l5_enabled) + set_pgd(pgdp, __pgd(0)); } #endif /* CONFIG_PGTABLE_LEVELS == 5 */ diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h index aff42e1da6ee..263c142a6a6c 100644 --- a/arch/x86/include/asm/pgalloc.h +++ b/arch/x86/include/asm/pgalloc.h @@ -167,6 +167,8 @@ static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, #if CONFIG_PGTABLE_LEVELS > 4 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) { + if (!pgtable_l5_enabled) + return; paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT); set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d))); } @@ -191,7 +193,8 @@ extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d); static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d, unsigned long address) { - ___p4d_free_tlb(tlb, p4d); + if (pgtable_l5_enabled) + ___p4d_free_tlb(tlb, p4d); } #endif /* CONFIG_PGTABLE_LEVELS > 4 */ diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 63c2552b6b65..c8baa7f12d1b 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -65,7 +65,7 @@ extern pmdval_t early_pmd_flags; #ifndef __PAGETABLE_P4D_FOLDED #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd) -#define pgd_clear(pgd) native_pgd_clear(pgd) +#define pgd_clear(pgd) (pgtable_l5_enabled ? native_pgd_clear(pgd) : 0) #endif #ifndef set_p4d @@ -859,6 +859,8 @@ static inline unsigned long p4d_index(unsigned long address) #if CONFIG_PGTABLE_LEVELS > 4 static inline int pgd_present(pgd_t pgd) { + if (!pgtable_l5_enabled) + return 1; return pgd_flags(pgd) & _PAGE_PRESENT; } @@ -876,6 +878,8 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd) /* to find an entry in a page-table-directory. */ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) { + if (!pgtable_l5_enabled) + return (p4d_t *)pgd; return (p4d_t *)pgd_page_vaddr(*pgd) + p4d_index(address); } @@ -883,6 +887,9 @@ static inline int pgd_bad(pgd_t pgd) { unsigned long ignore_flags = _PAGE_USER; + if (!pgtable_l5_enabled) + return 0; + if (IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) ignore_flags |= _PAGE_NX; @@ -891,6 +898,8 @@ static inline int pgd_bad(pgd_t pgd) static inline int pgd_none(pgd_t pgd) { + if (!pgtable_l5_enabled) + return 0; /* * There is no need to do a workaround for the KNL stray * A/D bit erratum here. PGDs only point to page tables -- cgit v1.2.1 From 91f606a8fa68264224cbc76888fa8649cdbe9990 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:41 +0300 Subject: x86/mm: Replace compile-time checks for 5-level paging with runtime-time checks This patch converts the of CONFIG_X86_5LEVEL check to runtime checks for p4d folding. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-9-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_64.h | 23 ++++++++++------------- arch/x86/mm/dump_pagetables.c | 4 +--- arch/x86/mm/fault.c | 4 ++-- arch/x86/mm/ident_map.c | 2 +- arch/x86/mm/init_64.c | 30 ++++++++++++++++++------------ arch/x86/mm/kasan_init_64.c | 12 ++++++------ arch/x86/mm/kaslr.c | 6 +++--- arch/x86/mm/tlb.c | 2 +- arch/x86/platform/efi/efi_64.c | 2 +- arch/x86/power/hibernate_64.c | 6 +++--- 10 files changed, 46 insertions(+), 45 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 81462e9a34f6..81dda8d1d0bd 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -217,29 +217,26 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { -#if defined(CONFIG_PAGE_TABLE_ISOLATION) && !defined(CONFIG_X86_5LEVEL) - p4dp->pgd = pti_set_user_pgd(&p4dp->pgd, p4d.pgd); -#else - *p4dp = p4d; -#endif + pgd_t pgd; + + if (pgtable_l5_enabled || !IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION)) { + *p4dp = p4d; + return; + } + + pgd = native_make_pgd(p4d_val(p4d)); + pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd); + *p4dp = native_make_p4d(pgd_val(pgd)); } static inline void native_p4d_clear(p4d_t *p4d) { -#ifdef CONFIG_X86_5LEVEL native_set_p4d(p4d, native_make_p4d(0)); -#else - native_set_p4d(p4d, (p4d_t) { .pgd = native_make_pgd(0)}); -#endif } static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) { -#ifdef CONFIG_PAGE_TABLE_ISOLATION *pgdp = pti_set_user_pgd(pgdp, pgd); -#else - *pgdp = pgd; -#endif } static inline void native_pgd_clear(pgd_t *pgd) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 9efee6f464ab..0d6d67d18ad6 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -348,9 +348,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, void *pt) { if (__pa(pt) == __pa(kasan_zero_pmd) || -#ifdef CONFIG_X86_5LEVEL - __pa(pt) == __pa(kasan_zero_p4d) || -#endif + (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) || __pa(pt) == __pa(kasan_zero_pud)) { pgprotval_t prot = pte_flags(kasan_zero_pte[0]); note_page(m, st, __pgprot(prot), 5); diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 800de815519c..321b78060e93 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -439,7 +439,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (CONFIG_PGTABLE_LEVELS > 4) { + if (pgtable_l5_enabled) { if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); arch_flush_lazy_mmu_mode(); @@ -454,7 +454,7 @@ static noinline int vmalloc_fault(unsigned long address) if (p4d_none(*p4d_ref)) return -1; - if (p4d_none(*p4d) && CONFIG_PGTABLE_LEVELS == 4) { + if (p4d_none(*p4d) && !pgtable_l5_enabled) { set_p4d(p4d, *p4d_ref); arch_flush_lazy_mmu_mode(); } else { diff --git a/arch/x86/mm/ident_map.c b/arch/x86/mm/ident_map.c index ab33a32df2a8..9aa22be8331e 100644 --- a/arch/x86/mm/ident_map.c +++ b/arch/x86/mm/ident_map.c @@ -120,7 +120,7 @@ int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, result = ident_p4d_init(info, p4d, addr, next); if (result) return result; - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + if (pgtable_l5_enabled) { set_pgd(pgd, __pgd(__pa(p4d) | info->kernpg_flag)); } else { /* diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 6a4b20bc7527..3186e6836036 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -88,12 +88,7 @@ static int __init nonx32_setup(char *str) } __setup("noexec32=", nonx32_setup); -/* - * When memory was added make sure all the processes MM have - * suitable PGD entries in the local PGD level page. - */ -#ifdef CONFIG_X86_5LEVEL -void sync_global_pgds(unsigned long start, unsigned long end) +static void sync_global_pgds_l5(unsigned long start, unsigned long end) { unsigned long addr; @@ -129,8 +124,8 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(&pgd_lock); } } -#else -void sync_global_pgds(unsigned long start, unsigned long end) + +static void sync_global_pgds_l4(unsigned long start, unsigned long end) { unsigned long addr; @@ -173,7 +168,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) spin_unlock(&pgd_lock); } } -#endif + +/* + * When memory was added make sure all the processes MM have + * suitable PGD entries in the local PGD level page. + */ +void sync_global_pgds(unsigned long start, unsigned long end) +{ + if (pgtable_l5_enabled) + sync_global_pgds_l5(start, end); + else + sync_global_pgds_l4(start, end); +} /* * NOTE: This function is marked __ref because it calls __init function @@ -632,7 +638,7 @@ phys_p4d_init(p4d_t *p4d_page, unsigned long paddr, unsigned long paddr_end, unsigned long vaddr = (unsigned long)__va(paddr); int i = p4d_index(vaddr); - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + if (!pgtable_l5_enabled) return phys_pud_init((pud_t *) p4d_page, paddr, paddr_end, page_size_mask); for (; i < PTRS_PER_P4D; i++, paddr = paddr_next) { @@ -712,7 +718,7 @@ kernel_physical_mapping_init(unsigned long paddr_start, page_size_mask); spin_lock(&init_mm.page_table_lock); - if (IS_ENABLED(CONFIG_X86_5LEVEL)) + if (pgtable_l5_enabled) pgd_populate(&init_mm, pgd, p4d); else p4d_populate(&init_mm, p4d_offset(pgd, vaddr), (pud_t *) p4d); @@ -1093,7 +1099,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, * 5-level case we should free them. This code will have to change * to adapt for boot-time switching between 4 and 5 level page tables. */ - if (CONFIG_PGTABLE_LEVELS == 5) + if (pgtable_l5_enabled) free_pud_table(pud_base, p4d, altmap); } diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 12ec90f62457..0df0dd13a71d 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -176,10 +176,10 @@ static void __init clear_pgds(unsigned long start, * With folded p4d, pgd_clear() is nop, use p4d_clear() * instead. */ - if (CONFIG_PGTABLE_LEVELS < 5) - p4d_clear(p4d_offset(pgd, start)); - else + if (pgtable_l5_enabled) pgd_clear(pgd); + else + p4d_clear(p4d_offset(pgd, start)); } pgd = pgd_offset_k(start); @@ -191,7 +191,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) { unsigned long p4d; - if (!IS_ENABLED(CONFIG_X86_5LEVEL)) + if (!pgtable_l5_enabled) return (p4d_t *)pgd; p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; @@ -272,7 +272,7 @@ void __init kasan_early_init(void) for (i = 0; i < PTRS_PER_PUD; i++) kasan_zero_pud[i] = __pud(pud_val); - for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) + for (i = 0; pgtable_l5_enabled && i < PTRS_PER_P4D; i++) kasan_zero_p4d[i] = __p4d(p4d_val); kasan_map_early_shadow(early_top_pgt); @@ -303,7 +303,7 @@ void __init kasan_init(void) * bunch of things like kernel code, modules, EFI mapping, etc. * We need to take extra steps to not overwrite them. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + if (pgtable_l5_enabled) { void *ptr; ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c index 641169d38184..615cc03ced84 100644 --- a/arch/x86/mm/kaslr.c +++ b/arch/x86/mm/kaslr.c @@ -124,7 +124,7 @@ void __init kernel_randomize_memory(void) */ entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); prandom_bytes_state(&rand_state, &rand, sizeof(rand)); - if (IS_ENABLED(CONFIG_X86_5LEVEL)) + if (pgtable_l5_enabled) entropy = (rand % (entropy + 1)) & P4D_MASK; else entropy = (rand % (entropy + 1)) & PUD_MASK; @@ -136,7 +136,7 @@ void __init kernel_randomize_memory(void) * randomization alignment. */ vaddr += get_padding(&kaslr_regions[i]); - if (IS_ENABLED(CONFIG_X86_5LEVEL)) + if (pgtable_l5_enabled) vaddr = round_up(vaddr + 1, P4D_SIZE); else vaddr = round_up(vaddr + 1, PUD_SIZE); @@ -212,7 +212,7 @@ void __meminit init_trampoline(void) return; } - if (IS_ENABLED(CONFIG_X86_5LEVEL)) + if (pgtable_l5_enabled) init_trampoline_p4d(); else init_trampoline_pud(); diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index 6550d37d0f06..92cb8a901c36 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c @@ -157,7 +157,7 @@ static void sync_current_stack_to_mm(struct mm_struct *mm) unsigned long sp = current_stack_pointer; pgd_t *pgd = pgd_offset(mm, sp); - if (CONFIG_PGTABLE_LEVELS > 4) { + if (pgtable_l5_enabled) { if (unlikely(pgd_none(*pgd))) { pgd_t *pgd_ref = pgd_offset_k(sp); diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index d52aaa7dc088..4845871a2006 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -227,7 +227,7 @@ int __init efi_alloc_page_tables(void) pud = pud_alloc(&init_mm, p4d, EFI_VA_END); if (!pud) { - if (CONFIG_PGTABLE_LEVELS > 4) + if (pgtable_l5_enabled) free_page((unsigned long) pgd_page_vaddr(*pgd)); free_page((unsigned long)efi_pgd); return -ENOMEM; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 0ef5e5204968..74a532989308 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -50,7 +50,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) { pmd_t *pmd; pud_t *pud; - p4d_t *p4d; + p4d_t *p4d = NULL; /* * The new mapping only has to cover the page containing the image @@ -66,7 +66,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) * tables used by the image kernel. */ - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + if (pgtable_l5_enabled) { p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); if (!p4d) return -ENOMEM; @@ -84,7 +84,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) __pmd((jump_address_phys & PMD_MASK) | __PAGE_KERNEL_LARGE_EXEC)); set_pud(pud + pud_index(restore_jump_address), __pud(__pa(pmd) | _KERNPG_TABLE)); - if (IS_ENABLED(CONFIG_X86_5LEVEL)) { + if (p4d) { set_p4d(p4d + p4d_index(restore_jump_address), __p4d(__pa(pud) | _KERNPG_TABLE)); set_pgd(pgd + pgd_index(restore_jump_address), __pgd(__pa(p4d) | _KERNPG_TABLE)); } else { -- cgit v1.2.1 From 6657fca06e3ffab8d0b3f9d8b397f5ee498952d7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 14 Feb 2018 21:25:42 +0300 Subject: x86/mm: Allow to boot without LA57 if CONFIG_X86_5LEVEL=y All pieces of the puzzle are in place and we can now allow to boot with CONFIG_X86_5LEVEL=y on a machine without LA57 support. Kernel will detect that LA57 is missing and fold p4d at runtime. Update the documentation and the Kconfig option description to reflect the change. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180214182542.69302-10-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ++-- arch/x86/boot/compressed/misc.c | 16 ---------------- arch/x86/include/asm/required-features.h | 8 +------- 3 files changed, 3 insertions(+), 25 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index fcc3f88996b3..1c4f7b6a94f4 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1440,8 +1440,8 @@ config X86_5LEVEL It will be supported by future Intel CPUs. - Note: a kernel with this option enabled can only be booted - on machines that support the feature. + A kernel with the option enabled can be booted on machines that + support 4- or 5-level paging. See Documentation/x86/x86_64/5level-paging.txt for more information. diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index 98761a1576ce..b50c42455e25 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -169,16 +169,6 @@ void __puthex(unsigned long value) } } -static bool l5_supported(void) -{ - /* Check if leaf 7 is supported. */ - if (native_cpuid_eax(0) < 7) - return 0; - - /* Check if la57 is supported. */ - return native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)); -} - #if CONFIG_X86_NEED_RELOCS static void handle_relocations(void *output, unsigned long output_len, unsigned long virt_addr) @@ -372,12 +362,6 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, console_init(); debug_putstr("early console in extract_kernel\n"); - if (IS_ENABLED(CONFIG_X86_5LEVEL) && !l5_supported()) { - error("This linux kernel as configured requires 5-level paging\n" - "This CPU does not support the required 'cr4.la57' feature\n" - "Unable to boot - please use a kernel appropriate for your CPU\n"); - } - free_mem_ptr = heap; /* Heap */ free_mem_end_ptr = heap + BOOT_HEAP_SIZE; diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index fb3a6de7440b..6847d85400a8 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -53,12 +53,6 @@ # define NEED_MOVBE 0 #endif -#ifdef CONFIG_X86_5LEVEL -# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) -#else -# define NEED_LA57 0 -#endif - #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT /* Paravirtualized systems may not have PSE or PGE available */ @@ -104,7 +98,7 @@ #define REQUIRED_MASK13 0 #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 -#define REQUIRED_MASK16 (NEED_LA57) +#define REQUIRED_MASK16 0 #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) -- cgit v1.2.1 From 63e708f826bb21470155d37b103a75d8a9e25b18 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Wed, 7 Feb 2018 18:49:23 -0500 Subject: x86/xen: Calculate __max_logical_packages on PV domains The kernel panics on PV domains because native_smp_cpus_done() is only called for HVM domains. Calculate __max_logical_packages for PV domains. Fixes: b4c0a7326f5d ("x86/smpboot: Fix __max_logical_packages estimate") Signed-off-by: Prarit Bhargava Tested-and-reported-by: Simon Gaiser Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: x86@kernel.org Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Dou Liyang Cc: Prarit Bhargava Cc: Kate Stewart Cc: Greg Kroah-Hartman Cc: Andy Lutomirski Cc: Andi Kleen Cc: Vitaly Kuznetsov Cc: xen-devel@lists.xenproject.org Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross --- arch/x86/include/asm/smp.h | 1 + arch/x86/kernel/smpboot.c | 10 ++++++++-- arch/x86/xen/smp.c | 2 ++ 3 files changed, 11 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 461f53d27708..a4189762b266 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) void cpu_disable_common(void); void native_smp_prepare_boot_cpu(void); void native_smp_prepare_cpus(unsigned int max_cpus); +void calculate_max_logical_packages(void); void native_smp_cpus_done(unsigned int max_cpus); void common_cpu_up(unsigned int cpunum, struct task_struct *tidle); int native_cpu_up(unsigned int cpunum, struct task_struct *tidle); diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index cfc61e1d45e2..9eee25d07586 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void) cpu_set_state_online(me); } -void __init native_smp_cpus_done(unsigned int max_cpus) +void __init calculate_max_logical_packages(void) { int ncpus; - pr_debug("Boot done\n"); /* * Today neither Intel nor AMD support heterogenous systems so * extrapolate the boot cpu's data to all packages. @@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus) ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); __max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus); pr_info("Max logical packages: %u\n", __max_logical_packages); +} + +void __init native_smp_cpus_done(unsigned int max_cpus) +{ + pr_debug("Boot done\n"); + + calculate_max_logical_packages(); if (x86_has_numa_in_package) set_sched_topology(x86_numa_in_package_topology); diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index 77c959cf81e7..7a43b2ae19f1 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c @@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus) if (xen_hvm_domain()) native_smp_cpus_done(max_cpus); + else + calculate_max_logical_packages(); if (xen_have_vcpu_info_placement) return; -- cgit v1.2.1 From ce279cdc04aafd5c41ae49f941ee2c3342e35e3e Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:51 +0800 Subject: x86/apic: Split out restore_boot_irq_mode() from disable_IO_APIC() This is a preparation patch. Split out the code which restores boot irq mode from disable_IO_APIC() into the new restore_boot_irq_mode() function. No functional changes. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: uobergfe@redhat.com Link: http://lkml.kernel.org/r/20180214054656.3780-2-bhe@redhat.com [ Build fix for !CONFIG_IO_APIC and rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 2 ++ arch/x86/kernel/apic/io_apic.c | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index a8834dd546cd..4e3bb13e02a6 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -193,6 +193,7 @@ static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) extern void setup_IO_APIC(void); extern void enable_IO_APIC(void); extern void disable_IO_APIC(void); +extern void restore_boot_irq_mode(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); extern void print_IO_APICs(void); #else /* !CONFIG_X86_IO_APIC */ @@ -232,6 +233,7 @@ static inline void io_apic_init_mappings(void) { } static inline void setup_IO_APIC(void) { } static inline void enable_IO_APIC(void) { } +static inline void restore_boot_irq_mode(void) { } #endif diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 8ad2e410974f..7b73b6b9b4b6 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1448,6 +1448,11 @@ void disable_IO_APIC(void) */ clear_IO_APIC(); + restore_boot_irq_mode(); +} + +void restore_boot_irq_mode(void) +{ if (!nr_legacy_irqs()) return; -- cgit v1.2.1 From 3c9e76dbea004b2c7c3ce872022ceaf5ff0dae79 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:52 +0800 Subject: x86/apic: Split disable_IO_APIC() into two functions to fix CONFIG_KEXEC_JUMP=y Split following patches disable_IO_APIC() will be broken up into clear_IO_APIC() and restore_boot_irq_mode(). These two functions will be called separately where they are needed to fix a regression introduced by: 522e66464467 ("x86/apic: Disable I/O APIC before shutdown of the local APIC"). While the CONFIG_KEXEC_JUMP=y code doesn't call lapic_shutdown() before jump like kexec/kdump, so it's not impacted by commit 522e66464467. Hence here change clear_IO_APIC() as public, and replace disable_IO_APIC() with clear_IO_APIC() and restore_boot_irq_mode() to keep CONFIG_KEXEC_JUMP=y code unchanged in essence. No functional change. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: uobergfe@redhat.com Link: http://lkml.kernel.org/r/20180214054656.3780-3-bhe@redhat.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 1 + arch/x86/kernel/apic/io_apic.c | 2 +- arch/x86/kernel/machine_kexec_32.c | 3 ++- arch/x86/kernel/machine_kexec_64.c | 3 ++- 4 files changed, 6 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 4e3bb13e02a6..2ae1b424c1bd 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -193,6 +193,7 @@ static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) extern void setup_IO_APIC(void); extern void enable_IO_APIC(void); extern void disable_IO_APIC(void); +extern void clear_IO_APIC(void); extern void restore_boot_irq_mode(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); extern void print_IO_APICs(void); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 7b73b6b9b4b6..2d7cd2db77f5 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -587,7 +587,7 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin) mpc_ioapic_id(apic), pin); } -static void clear_IO_APIC (void) +void clear_IO_APIC (void) { int apic, pin; diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index edfede768688..4cd79d88a4ac 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -199,7 +199,8 @@ void machine_kexec(struct kimage *image) * one form or other. kexec jump path also need * one. */ - disable_IO_APIC(); + clear_IO_APIC(); + restore_boot_irq_mode(); #endif } diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 1f790cf9d38f..2ab14b9c1a89 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -297,7 +297,8 @@ void machine_kexec(struct kimage *image) * one form or other. kexec jump path also need * one. */ - disable_IO_APIC(); + clear_IO_APIC(); + restore_boot_irq_mode(); #endif } -- cgit v1.2.1 From 339b2ae0cd5d4a58f9efe06e4ee36adbeca59228 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:53 +0800 Subject: x86/apic: Fix restoring boot IRQ mode in reboot and kexec/kdump This is a regression fix. Before, to fix erratum AVR31, the following commit: 522e66464467 ("x86/apic: Disable I/O APIC before shutdown of the local APIC") ... moved the lapic_shutdown() call to after disable_IO_APIC() in the reboot and kexec/kdump code paths. This introduced the following regression: disable_IO_APIC() not only clears the IO-APIC, but it also restores boot IRQ mode by setting the LAPIC/APIC/IMCR, calling lapic_shutdown() after disable_IO_APIC() will disable LAPIC and ruin the possible virtual wire mode setting which the code has been trying to do all along. The consequence is that a KVM guest kernel always prints the warning below during kexec/kdump as the kernel boots up: [ 0.001000] WARNING: CPU: 0 PID: 0 at arch/x86/kernel/apic/apic.c:1467 setup_local_APIC+0x228/0x330 [ ........] [ 0.001000] Call Trace: [ 0.001000] apic_bsp_setup+0x56/0x74 [ 0.001000] x86_late_time_init+0x11/0x16 [ 0.001000] start_kernel+0x3c9/0x486 [ 0.001000] secondary_startup_64+0xa5/0xb0 [ ........] [ 0.001000] masked ExtINT on CPU#0 To fix this, just call clear_IO_APIC() to stop the IO-APIC where disable_IO_APIC() was called, and call restore_boot_irq_mode() to restore boot IRQ mode before a reboot or a kexec/kdump jump. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: stable@vger.kernel.org Cc: uobergfe@redhat.com Fixes: commit 522e66464467 ("x86/apic: Disable I/O APIC before shutdown of the local APIC") Link: http://lkml.kernel.org/r/20180214054656.3780-4-bhe@redhat.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/crash.c | 3 ++- arch/x86/kernel/reboot.c | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 10e74d4778a1..1f6680427ff0 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -199,9 +199,10 @@ void native_machine_crash_shutdown(struct pt_regs *regs) #ifdef CONFIG_X86_IO_APIC /* Prevent crash_kexec() from deadlocking on ioapic_lock. */ ioapic_zap_locks(); - disable_IO_APIC(); + clear_IO_APIC(); #endif lapic_shutdown(); + restore_boot_irq_mode(); #ifdef CONFIG_HPET_TIMER hpet_disable(); #endif diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 2126b9d27c34..725624b6c0c0 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -666,7 +666,7 @@ void native_machine_shutdown(void) * Even without the erratum, it still makes sense to quiet IO APIC * before disabling Local APIC. */ - disable_IO_APIC(); + clear_IO_APIC(); #endif #ifdef CONFIG_SMP @@ -680,6 +680,7 @@ void native_machine_shutdown(void) #endif lapic_shutdown(); + restore_boot_irq_mode(); #ifdef CONFIG_HPET_TIMER hpet_disable(); -- cgit v1.2.1 From 50374b96d2d30c03c8d42b3f8846d8938748d454 Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:54 +0800 Subject: x86/apic: Remove the (now) unused disable_IO_APIC() function No one uses it anymore. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: uobergfe@redhat.com Link: http://lkml.kernel.org/r/20180214054656.3780-5-bhe@redhat.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 1 - arch/x86/kernel/apic/io_apic.c | 13 ------------- arch/x86/kernel/machine_kexec_32.c | 5 ++--- arch/x86/kernel/machine_kexec_64.c | 5 ++--- 4 files changed, 4 insertions(+), 20 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 2ae1b424c1bd..8018fc48f18d 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -192,7 +192,6 @@ static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) extern void setup_IO_APIC(void); extern void enable_IO_APIC(void); -extern void disable_IO_APIC(void); extern void clear_IO_APIC(void); extern void restore_boot_irq_mode(void); extern int IO_APIC_get_PCI_irq_vector(int bus, int devfn, int pin); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 2d7cd2db77f5..9d86b10c2121 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1438,19 +1438,6 @@ void native_disable_io_apic(void) disconnect_bsp_APIC(ioapic_i8259.pin != -1); } -/* - * Not an __init, needed by the reboot code - */ -void disable_IO_APIC(void) -{ - /* - * Clear the IO-APIC before rebooting: - */ - clear_IO_APIC(); - - restore_boot_irq_mode(); -} - void restore_boot_irq_mode(void) { if (!nr_legacy_irqs()) diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c index 4cd79d88a4ac..60cdec6628b0 100644 --- a/arch/x86/kernel/machine_kexec_32.c +++ b/arch/x86/kernel/machine_kexec_32.c @@ -195,9 +195,8 @@ void machine_kexec(struct kimage *image) /* * We need to put APICs in legacy mode so that we can * get timer interrupts in second kernel. kexec/kdump - * paths already have calls to disable_IO_APIC() in - * one form or other. kexec jump path also need - * one. + * paths already have calls to restore_boot_irq_mode() + * in one form or other. kexec jump path also need one. */ clear_IO_APIC(); restore_boot_irq_mode(); diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 2ab14b9c1a89..5ffbc55ea80f 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -293,9 +293,8 @@ void machine_kexec(struct kimage *image) /* * We need to put APICs in legacy mode so that we can * get timer interrupts in second kernel. kexec/kdump - * paths already have calls to disable_IO_APIC() in - * one form or other. kexec jump path also need - * one. + * paths already have calls to restore_boot_irq_mode() + * in one form or other. kexec jump path also need one. */ clear_IO_APIC(); restore_boot_irq_mode(); -- cgit v1.2.1 From 51b146c572201e3c368e0baa3e565760aefcf25f Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:55 +0800 Subject: x86/apic: Rename variables and functions related to x86_io_apic_ops The names of x86_io_apic_ops and its two member variables are misleading: The ->read() member is to read IO_APIC reg, while ->disable() which is called by native_disable_io_apic()/irq_remapping_disable_io_apic() is actually used to restore boot IRQ mode, not to disable the IO-APIC. So rename x86_io_apic_ops to 'x86_apic_ops' since it doesn't only handle the IO-APIC, but also the local APIC. Also rename its member variables and the related callbacks. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: uobergfe@redhat.com Link: http://lkml.kernel.org/r/20180214054656.3780-6-bhe@redhat.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/include/asm/io_apic.h | 6 +++--- arch/x86/include/asm/x86_init.h | 8 ++++---- arch/x86/kernel/apic/io_apic.c | 4 ++-- arch/x86/kernel/x86_init.c | 6 +++--- arch/x86/xen/apic.c | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 8018fc48f18d..fd20a2334885 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -183,11 +183,11 @@ extern void disable_ioapic_support(void); extern void __init io_apic_init_mappings(void); extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg); -extern void native_disable_io_apic(void); +extern void native_restore_boot_irq_mode(void); static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg) { - return x86_io_apic_ops.read(apic, reg); + return x86_apic_ops.io_apic_read(apic, reg); } extern void setup_IO_APIC(void); @@ -229,7 +229,7 @@ static inline void mp_save_irq(struct mpc_intsrc *m) { } static inline void disable_ioapic_support(void) { } static inline void io_apic_init_mappings(void) { } #define native_io_apic_read NULL -#define native_disable_io_apic NULL +#define native_restore_boot_irq_mode NULL static inline void setup_IO_APIC(void) { } static inline void enable_IO_APIC(void) { } diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index fc2f082ac635..88306054bd98 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -274,16 +274,16 @@ struct x86_msi_ops { void (*restore_msi_irqs)(struct pci_dev *dev); }; -struct x86_io_apic_ops { - unsigned int (*read) (unsigned int apic, unsigned int reg); - void (*disable)(void); +struct x86_apic_ops { + unsigned int (*io_apic_read) (unsigned int apic, unsigned int reg); + void (*restore)(void); }; extern struct x86_init_ops x86_init; extern struct x86_cpuinit_ops x86_cpuinit; extern struct x86_platform_ops x86_platform; extern struct x86_msi_ops x86_msi; -extern struct x86_io_apic_ops x86_io_apic_ops; +extern struct x86_apic_ops x86_apic_ops; extern void x86_early_init_platform_quirks(void); extern void x86_init_noop(void); diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 9d86b10c2121..68129f11e7db 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1410,7 +1410,7 @@ void __init enable_IO_APIC(void) clear_IO_APIC(); } -void native_disable_io_apic(void) +void native_restore_boot_irq_mode(void) { /* * If the i8259 is routed through an IOAPIC @@ -1443,7 +1443,7 @@ void restore_boot_irq_mode(void) if (!nr_legacy_irqs()) return; - x86_io_apic_ops.disable(); + x86_apic_ops.restore(); } #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 1151ccd72ce9..2bccd03bd654 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -146,7 +146,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev) } #endif -struct x86_io_apic_ops x86_io_apic_ops __ro_after_init = { - .read = native_io_apic_read, - .disable = native_disable_io_apic, +struct x86_apic_ops x86_apic_ops __ro_after_init = { + .io_apic_read = native_io_apic_read, + .restore = native_restore_boot_irq_mode, }; diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c index de58533d3664..2163888497d3 100644 --- a/arch/x86/xen/apic.c +++ b/arch/x86/xen/apic.c @@ -215,7 +215,7 @@ static void __init xen_apic_check(void) } void __init xen_init_apic(void) { - x86_io_apic_ops.read = xen_io_apic_read; + x86_apic_ops.io_apic_read = xen_io_apic_read; /* On PV guests the APIC CPUID bit is disabled so none of the * routines end up executing. */ if (!xen_initial_domain()) -- cgit v1.2.1 From bee3204ec3c49f6f53add9c3962c9012a5c036fa Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Wed, 14 Feb 2018 13:46:56 +0800 Subject: x86/apic: Set up through-local-APIC mode on the boot CPU if 'noapic' specified Currently the kdump kernel becomes very slow if 'noapic' is specified. Normal kernel doesn't have this bug. Kernel parameter 'noapic' is used to disable IO-APIC in system for testing or special purpose. Here the root cause is that in kdump kernel LAPIC is disabled since commit: 522e664644 ("x86/apic: Disable I/O APIC before shutdown of the local APIC") In this case we need set up through-local-APIC on boot CPU in setup_local_APIC(). In normal kernel the legacy irq mode is enabled by the BIOS. If it is virtual wire mode, the local-APIC has been enabled and set as through-local-APIC. Though we fixed the regression introduced by commit 522e664644, to further improve robustness set up the through-local-APIC mode explicitly, do not rely on the default boot IRQ mode. Signed-off-by: Baoquan He Reviewed-by: Eric W. Biederman Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: joro@8bytes.org Cc: prarit@redhat.com Cc: uobergfe@redhat.com Link: http://lkml.kernel.org/r/20180214054656.3780-7-bhe@redhat.com [ Rewrote the changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/apic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 871018dd49a3..2ceac9f6c5d7 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1570,7 +1570,7 @@ static void setup_local_APIC(void) * TODO: set up through-local-APIC from through-I/O-APIC? --macro */ value = apic_read(APIC_LVT0) & APIC_LVT_MASKED; - if (!cpu && (pic_mode || !value)) { + if (!cpu && (pic_mode || !value || skip_ioapic_setup)) { value = APIC_DM_EXTINT; apic_printk(APIC_VERBOSE, "enabled ExtINT on CPU#%d\n", cpu); } else { -- cgit v1.2.1 From 894266466aa74a226e58e23975118ff6231dd2e4 Mon Sep 17 00:00:00 2001 From: KarimAllah Ahmed Date: Tue, 20 Feb 2018 08:39:51 +0100 Subject: x86/headers/UAPI: Use __u64 instead of u64 in ... since u64 has a hidden header dependency that was not there before using it (i.e. it breaks our VMM build). Also, __u64 is the right way to expose data types through UAPI. Signed-off-by: KarimAllah Ahmed Acked-by: Thomas Gleixner Cc: Haiyang Zhang Cc: K. Y. Srinivasan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephen Hemminger Cc: devel@linuxdriverproject.org Fixes: 93286261 ("x86/hyperv: Reenlightenment notifications support") Link: http://lkml.kernel.org/r/1519112391-23773-1-git-send-email-karahmed@amazon.de Signed-off-by: Ingo Molnar --- arch/x86/include/uapi/asm/hyperv.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h index 197c2e6c7376..099414345865 100644 --- a/arch/x86/include/uapi/asm/hyperv.h +++ b/arch/x86/include/uapi/asm/hyperv.h @@ -241,24 +241,24 @@ #define HV_X64_MSR_REENLIGHTENMENT_CONTROL 0x40000106 struct hv_reenlightenment_control { - u64 vector:8; - u64 reserved1:8; - u64 enabled:1; - u64 reserved2:15; - u64 target_vp:32; + __u64 vector:8; + __u64 reserved1:8; + __u64 enabled:1; + __u64 reserved2:15; + __u64 target_vp:32; }; #define HV_X64_MSR_TSC_EMULATION_CONTROL 0x40000107 #define HV_X64_MSR_TSC_EMULATION_STATUS 0x40000108 struct hv_tsc_emulation_control { - u64 enabled:1; - u64 reserved:63; + __u64 enabled:1; + __u64 reserved:63; }; struct hv_tsc_emulation_status { - u64 inprogress:1; - u64 reserved:63; + __u64 inprogress:1; + __u64 reserved:63; }; #define HV_X64_MSR_HYPERCALL_ENABLE 0x00000001 -- cgit v1.2.1 From 85c615eb52222bc5fab6c7190d146bc59fac289e Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 20 Feb 2018 21:58:21 +0100 Subject: x86/oprofile: Fix bogus GCC-8 warning in nmi_setup() GCC-8 shows a warning for the x86 oprofile code that copies per-CPU data from CPU 0 to all other CPUs, which when building a non-SMP kernel turns into a memcpy() with identical source and destination pointers: arch/x86/oprofile/nmi_int.c: In function 'mux_clone': arch/x86/oprofile/nmi_int.c:285:2: error: 'memcpy' source argument is the same as destination [-Werror=restrict] memcpy(per_cpu(cpu_msrs, cpu).multiplex, ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ per_cpu(cpu_msrs, 0).multiplex, ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ sizeof(struct op_msr) * model->num_virt_counters); ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ arch/x86/oprofile/nmi_int.c: In function 'nmi_setup': arch/x86/oprofile/nmi_int.c:466:3: error: 'memcpy' source argument is the same as destination [-Werror=restrict] arch/x86/oprofile/nmi_int.c:470:3: error: 'memcpy' source argument is the same as destination [-Werror=restrict] I have analyzed a number of such warnings now: some are valid and the GCC warning is welcome. Others turned out to be false-positives, and GCC was changed to not warn about those any more. This is a corner case that is a false-positive but the GCC developers feel it's better to keep warning about it. In this case, it seems best to work around it by telling GCC a little more clearly that this code path is never hit with an IS_ENABLED() configuration check. Cc:stable as we also want old kernels to build cleanly with GCC-8. Signed-off-by: Arnd Bergmann Cc: Jessica Yu Cc: Kees Cook Cc: Linus Torvalds Cc: Martin Sebor Cc: Peter Zijlstra Cc: Robert Richter Cc: Thomas Gleixner Cc: oprofile-list@lists.sf.net Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180220205826.2008875-1-arnd@arndb.de Link: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=84095 Signed-off-by: Ingo Molnar --- arch/x86/oprofile/nmi_int.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 174c59774cc9..a7a7677265b6 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -460,7 +460,7 @@ static int nmi_setup(void) goto fail; for_each_possible_cpu(cpu) { - if (!cpu) + if (!IS_ENABLED(CONFIG_SMP) || !cpu) continue; memcpy(per_cpu(cpu_msrs, cpu).counters, -- cgit v1.2.1 From b9952ec78778aa7ae5b8df672668aece6fc93d2a Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 16 Feb 2018 14:49:46 +0300 Subject: x86/xen: Allow XEN_PV and XEN_PVH to be enabled with X86_5LEVEL With boot-time switching between paging modes, XEN_PV and XEN_PVH can be boot into 4-level paging mode. Tested-by: Juergen Gross Signed-off-by: Kirill A. Shutemov Reviewed-by: Juergen Gross Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180216114948.68868-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/head_64.S | 12 ++++++------ arch/x86/xen/Kconfig | 5 ----- arch/x86/xen/mmu_pv.c | 21 +++++++++++++++++++++ 3 files changed, 27 insertions(+), 11 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 145d7b95ae29..3e9de0fc97de 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -38,12 +38,12 @@ * */ +#define l4_index(x) (((x) >> 39) & 511) #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) -#if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) -PGD_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE_L4) -PGD_START_KERNEL = pgd_index(__START_KERNEL_map) -#endif +L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4) +L4_START_KERNEL = l4_index(__START_KERNEL_map) + L3_START_KERNEL = pud_index(__START_KERNEL_map) .text @@ -386,9 +386,9 @@ NEXT_PAGE(early_dynamic_pgts) #if defined(CONFIG_XEN_PV) || defined(CONFIG_XEN_PVH) NEXT_PGD_PAGE(init_top_pgt) .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC - .org init_top_pgt + PGD_PAGE_OFFSET*8, 0 + .org init_top_pgt + L4_PAGE_OFFSET*8, 0 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC - .org init_top_pgt + PGD_START_KERNEL*8, 0 + .org init_top_pgt + L4_START_KERNEL*8, 0 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC .fill PTI_USER_PGD_FILL,8,0 diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index f605825a04ab..c1f98f32c45f 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig @@ -18,9 +18,6 @@ config XEN_PV bool "Xen PV guest support" default y depends on XEN - # XEN_PV is not ready to work with 5-level paging. - # Changes to hypervisor are also required. - depends on !X86_5LEVEL select XEN_HAVE_PVMMU select XEN_HAVE_VPMU help @@ -79,6 +76,4 @@ config XEN_DEBUG_FS config XEN_PVH bool "Support for running as a PVH guest" depends on XEN && XEN_PVHVM && ACPI - # Pre-built page tables are not ready to handle 5-level paging. - depends on !X86_5LEVEL def_bool n diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c index d85076223a69..3f4fec59af09 100644 --- a/arch/x86/xen/mmu_pv.c +++ b/arch/x86/xen/mmu_pv.c @@ -538,6 +538,22 @@ static void xen_set_p4d(p4d_t *ptr, p4d_t val) xen_mc_issue(PARAVIRT_LAZY_MMU); } + +#if CONFIG_PGTABLE_LEVELS >= 5 +__visible p4dval_t xen_p4d_val(p4d_t p4d) +{ + return pte_mfn_to_pfn(p4d.p4d); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val); + +__visible p4d_t xen_make_p4d(p4dval_t p4d) +{ + p4d = pte_pfn_to_mfn(p4d); + + return native_make_p4d(p4d); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); +#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ #endif /* CONFIG_X86_64 */ static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, @@ -2411,6 +2427,11 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .alloc_pud = xen_alloc_pmd_init, .release_pud = xen_release_pmd_init, + +#if CONFIG_PGTABLE_LEVELS >= 5 + .p4d_val = PV_CALLEE_SAVE(xen_p4d_val), + .make_p4d = PV_CALLEE_SAVE(xen_make_p4d), +#endif #endif /* CONFIG_X86_64 */ .activate_mm = xen_activate_mm, -- cgit v1.2.1 From 92e1c5b3f7bf5407cfdbf13613e7101831216dc5 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 16 Feb 2018 14:49:47 +0300 Subject: x86/mm: Redefine some of page table helpers as macros This is preparation for the next patch, which would change pgtable_l5_enabled to be cpu_feature_enabled(X86_FEATURE_LA57). The change makes few helpers in paravirt.h dependent on cpu_feature_enabled() definition from cpufeature.h. And cpufeature.h is dependent on paravirt.h. Let's re-define some of helpers as macros to break this dependency loop. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180216114948.68868-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/paravirt.h | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 3fbaad238a94..2c0c8c9e9516 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -567,19 +567,22 @@ static inline p4dval_t p4d_val(p4d_t p4d) return PVOP_CALLEE1(p4dval_t, pv_mmu_ops.p4d_val, p4d.p4d); } -static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd) { - if (pgtable_l5_enabled) - PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); - else - set_p4d((p4d_t *)(pgdp), (p4d_t) { pgd.pgd }); + PVOP_VCALL2(pv_mmu_ops.set_pgd, pgdp, native_pgd_val(pgd)); } -static inline void pgd_clear(pgd_t *pgdp) -{ - if (pgtable_l5_enabled) - set_pgd(pgdp, __pgd(0)); -} +#define set_pgd(pgdp, pgdval) do { \ + if (pgtable_l5_enabled) \ + __set_pgd(pgdp, pgdval); \ + else \ + set_p4d((p4d_t *)(pgdp), (p4d_t) { (pgdval).pgd }); \ +} while (0) + +#define pgd_clear(pgdp) do { \ + if (pgtable_l5_enabled) \ + set_pgd(pgdp, __pgd(0)); \ +} while (0) #endif /* CONFIG_PGTABLE_LEVELS == 5 */ -- cgit v1.2.1 From 39b9552281abfcdfc54162897018890dafe7ffef Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 16 Feb 2018 14:49:48 +0300 Subject: x86/mm: Optimize boot-time paging mode switching cost By this point we have functioning boot-time switching between 4- and 5-level paging mode. But naive approach comes with cost. Numbers below are for kernel build, allmodconfig, 5 times. CONFIG_X86_5LEVEL=n: Performance counter stats for 'sh -c make -j100 -B -k >/dev/null' (5 runs): 17308719.892691 task-clock:u (msec) # 26.772 CPUs utilized ( +- 0.11% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 331,993,164 page-faults:u # 0.019 M/sec ( +- 0.01% ) 43,614,978,867,455 cycles:u # 2.520 GHz ( +- 0.01% ) 39,371,534,575,126 stalled-cycles-frontend:u # 90.27% frontend cycles idle ( +- 0.09% ) 28,363,350,152,428 instructions:u # 0.65 insn per cycle # 1.39 stalled cycles per insn ( +- 0.00% ) 6,316,784,066,413 branches:u # 364.948 M/sec ( +- 0.00% ) 250,808,144,781 branch-misses:u # 3.97% of all branches ( +- 0.01% ) 646.531974142 seconds time elapsed ( +- 1.15% ) CONFIG_X86_5LEVEL=y: Performance counter stats for 'sh -c make -j100 -B -k >/dev/null' (5 runs): 17411536.780625 task-clock:u (msec) # 26.426 CPUs utilized ( +- 0.10% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 331,868,663 page-faults:u # 0.019 M/sec ( +- 0.01% ) 43,865,909,056,301 cycles:u # 2.519 GHz ( +- 0.01% ) 39,740,130,365,581 stalled-cycles-frontend:u # 90.59% frontend cycles idle ( +- 0.05% ) 28,363,358,997,959 instructions:u # 0.65 insn per cycle # 1.40 stalled cycles per insn ( +- 0.00% ) 6,316,784,937,460 branches:u # 362.793 M/sec ( +- 0.00% ) 251,531,919,485 branch-misses:u # 3.98% of all branches ( +- 0.00% ) 658.886307752 seconds time elapsed ( +- 0.92% ) The patch tries to fix the performance regression by using cpu_feature_enabled(X86_FEATURE_LA57) instead of pgtable_l5_enabled in all hot code paths. These will statically patch the target code for additional performance. CONFIG_X86_5LEVEL=y + the patch: Performance counter stats for 'sh -c make -j100 -B -k >/dev/null' (5 runs): 17381990.268506 task-clock:u (msec) # 26.907 CPUs utilized ( +- 0.19% ) 0 context-switches:u # 0.000 K/sec 0 cpu-migrations:u # 0.000 K/sec 331,862,625 page-faults:u # 0.019 M/sec ( +- 0.01% ) 43,697,726,320,051 cycles:u # 2.514 GHz ( +- 0.03% ) 39,480,408,690,401 stalled-cycles-frontend:u # 90.35% frontend cycles idle ( +- 0.05% ) 28,363,394,221,388 instructions:u # 0.65 insn per cycle # 1.39 stalled cycles per insn ( +- 0.00% ) 6,316,794,985,573 branches:u # 363.410 M/sec ( +- 0.00% ) 251,013,232,547 branch-misses:u # 3.97% of all branches ( +- 0.01% ) 645.991174661 seconds time elapsed ( +- 1.19% ) Unfortunately, this approach doesn't help with text size: vmlinux.before .text size: 8190319 vmlinux.after .text size: 8200623 The .text section is increased by about 4k. Not sure if we can do anything about this. Signed-off-by: Kirill A. Shuemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Arjan van de Ven Cc: Borislav Petkov Cc: Borislav Petkov Cc: Dan Williams Cc: Dave Hansen Cc: David Woodhouse Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180216114948.68868-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.h | 5 +++++ arch/x86/entry/entry_64.S | 11 ++--------- arch/x86/include/asm/pgtable_64_types.h | 5 ++++- arch/x86/kernel/head64.c | 9 +++++++-- arch/x86/kernel/head_64.S | 2 +- arch/x86/mm/kasan_init_64.c | 6 ++++++ 6 files changed, 25 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 9d323dc6b159..4d369c308ed7 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -12,6 +12,11 @@ #undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_KASAN +#ifdef CONFIG_X86_5LEVEL +/* cpu_feature_enabled() cannot be used that early */ +#define pgtable_l5_enabled __pgtable_l5_enabled +#endif + #include #include #include diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 2c06348b7807..b18acdff9c3f 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -275,15 +275,8 @@ GLOBAL(entry_SYSCALL_64_after_hwframe) * depending on paging mode) in the address. */ #ifdef CONFIG_X86_5LEVEL - testl $1, pgtable_l5_enabled(%rip) - jz 1f - shl $(64 - 57), %rcx - sar $(64 - 57), %rcx - jmp 2f -1: - shl $(64 - 48), %rcx - sar $(64 - 48), %rcx -2: + ALTERNATIVE "shl $(64 - 48), %rcx; sar $(64 - 48), %rcx", \ + "shl $(64 - 57), %rcx; sar $(64 - 57), %rcx", X86_FEATURE_LA57 #else shl $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx sar $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h index 68909a68e5b9..d5c21a382475 100644 --- a/arch/x86/include/asm/pgtable_64_types.h +++ b/arch/x86/include/asm/pgtable_64_types.h @@ -21,7 +21,10 @@ typedef unsigned long pgprotval_t; typedef struct { pteval_t pte; } pte_t; #ifdef CONFIG_X86_5LEVEL -extern unsigned int pgtable_l5_enabled; +extern unsigned int __pgtable_l5_enabled; +#ifndef pgtable_l5_enabled +#define pgtable_l5_enabled cpu_feature_enabled(X86_FEATURE_LA57) +#endif #else #define pgtable_l5_enabled 0 #endif diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 8161e719a20f..0c855deee165 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -32,6 +32,11 @@ #include #include +#ifdef CONFIG_X86_5LEVEL +#undef pgtable_l5_enabled +#define pgtable_l5_enabled __pgtable_l5_enabled +#endif + /* * Manage page tables very early on. */ @@ -40,8 +45,8 @@ static unsigned int __initdata next_early_pgt; pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX); #ifdef CONFIG_X86_5LEVEL -unsigned int pgtable_l5_enabled __ro_after_init; -EXPORT_SYMBOL(pgtable_l5_enabled); +unsigned int __pgtable_l5_enabled __ro_after_init; +EXPORT_SYMBOL(__pgtable_l5_enabled); unsigned int pgdir_shift __ro_after_init = 39; EXPORT_SYMBOL(pgdir_shift); unsigned int ptrs_per_p4d __ro_after_init = 1; diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index 3e9de0fc97de..326c63129417 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -124,7 +124,7 @@ ENTRY(secondary_startup_64) /* Enable PAE mode, PGE and LA57 */ movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx #ifdef CONFIG_X86_5LEVEL - testl $1, pgtable_l5_enabled(%rip) + testl $1, __pgtable_l5_enabled(%rip) jz 1f orl $X86_CR4_LA57, %ecx 1: diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 0df0dd13a71d..d8ff013ea9d0 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,6 +1,12 @@ // SPDX-License-Identifier: GPL-2.0 #define DISABLE_BRANCH_PROFILING #define pr_fmt(fmt) "kasan: " fmt + +#ifdef CONFIG_X86_5LEVEL +/* Too early to use cpu_feature_enabled() */ +#define pgtable_l5_enabled __pgtable_l5_enabled +#endif + #include #include #include -- cgit v1.2.1 From a189c03235639a31343215f82b83b49985c55336 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 21 Feb 2018 11:18:53 +0100 Subject: x86/mce: Put private structures and definitions into the internal header ... because they don't need to be exported outside of MCE. Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-2-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 52 ---------------------------- arch/x86/kernel/cpu/mcheck/mce-internal.h | 56 +++++++++++++++++++++++++++++-- 2 files changed, 54 insertions(+), 54 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index 96ea4b5ba658..c3fb9a792e13 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -138,58 +138,6 @@ struct mce_log_buffer { struct mce entry[MCE_LOG_LEN]; }; -struct mca_config { - bool dont_log_ce; - bool cmci_disabled; - bool lmce_disabled; - bool ignore_ce; - bool disabled; - bool ser; - bool recovery; - bool bios_cmci_threshold; - u8 banks; - s8 bootlog; - int tolerant; - int monarch_timeout; - int panic_timeout; - u32 rip_msr; -}; - -struct mce_vendor_flags { - /* - * Indicates that overflow conditions are not fatal, when set. - */ - __u64 overflow_recov : 1, - - /* - * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and - * Recovery. It indicates support for data poisoning in HW and deferred - * error interrupts. - */ - succor : 1, - - /* - * (AMD) SMCA: This bit indicates support for Scalable MCA which expands - * the register space for each MCA bank and also increases number of - * banks. Also, to accommodate the new banks and registers, the MCA - * register space is moved to a new MSR range. - */ - smca : 1, - - __reserved_0 : 61; -}; - -struct mca_msr_regs { - u32 (*ctl) (int bank); - u32 (*status) (int bank); - u32 (*addr) (int bank); - u32 (*misc) (int bank); -}; - -extern struct mce_vendor_flags mce_flags; - -extern struct mca_msr_regs msr_ops; - enum mce_notifier_prios { MCE_PRIO_FIRST = INT_MAX, MCE_PRIO_SRAO = INT_MAX - 1, diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index e956eb267061..ed3c5aee22cf 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -113,8 +113,6 @@ static inline void mce_register_injector_chain(struct notifier_block *nb) { } static inline void mce_unregister_injector_chain(struct notifier_block *nb) { } #endif -extern struct mca_config mca_cfg; - #ifndef CONFIG_X86_64 /* * On 32-bit systems it would be difficult to safely unmap a poison page @@ -130,4 +128,58 @@ static inline void mce_unmap_kpfn(unsigned long pfn) {} #define mce_unmap_kpfn mce_unmap_kpfn #endif +struct mca_config { + bool dont_log_ce; + bool cmci_disabled; + bool lmce_disabled; + bool ignore_ce; + bool disabled; + bool ser; + bool recovery; + bool bios_cmci_threshold; + u8 banks; + s8 bootlog; + int tolerant; + int monarch_timeout; + int panic_timeout; + u32 rip_msr; +}; + +extern struct mca_config mca_cfg; + +struct mce_vendor_flags { + /* + * Indicates that overflow conditions are not fatal, when set. + */ + __u64 overflow_recov : 1, + + /* + * (AMD) SUCCOR stands for S/W UnCorrectable error COntainment and + * Recovery. It indicates support for data poisoning in HW and deferred + * error interrupts. + */ + succor : 1, + + /* + * (AMD) SMCA: This bit indicates support for Scalable MCA which expands + * the register space for each MCA bank and also increases number of + * banks. Also, to accommodate the new banks and registers, the MCA + * register space is moved to a new MSR range. + */ + smca : 1, + + __reserved_0 : 61; +}; + +extern struct mce_vendor_flags mce_flags; + +struct mca_msr_regs { + u32 (*ctl) (int bank); + u32 (*status) (int bank); + u32 (*addr) (int bank); + u32 (*misc) (int bank); +}; + +extern struct mca_msr_regs msr_ops; + #endif /* __X86_MCE_INTERNAL_H__ */ -- cgit v1.2.1 From 09933946643bcc8e0a9bd4ede192ed854e22db8f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 21 Feb 2018 11:18:54 +0100 Subject: x86/mce: Convert 'struct mca_config' bools to a bitfield ... to save space when future flags are added. No functionality change. Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-3-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce-internal.h | 13 ++++++++----- arch/x86/kernel/cpu/mcheck/mce.c | 16 ++++++++-------- 2 files changed, 16 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce-internal.h b/arch/x86/kernel/cpu/mcheck/mce-internal.h index ed3c5aee22cf..374d1aa66952 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-internal.h +++ b/arch/x86/kernel/cpu/mcheck/mce-internal.h @@ -131,12 +131,15 @@ static inline void mce_unmap_kpfn(unsigned long pfn) {} struct mca_config { bool dont_log_ce; bool cmci_disabled; - bool lmce_disabled; bool ignore_ce; - bool disabled; - bool ser; - bool recovery; - bool bios_cmci_threshold; + + __u64 lmce_disabled : 1, + disabled : 1, + ser : 1, + recovery : 1, + bios_cmci_threshold : 1, + __reserved : 59; + u8 banks; s8 bootlog; int tolerant; diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8ff94d1e2dce..db5b1e4ebfd8 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -1511,7 +1511,7 @@ static int __mcheck_cpu_cap_init(void) mca_cfg.rip_msr = MSR_IA32_MCG_EIP; if (cap & MCG_SER_P) - mca_cfg.ser = true; + mca_cfg.ser = 1; return 0; } @@ -1819,12 +1819,12 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c) return; if (__mcheck_cpu_cap_init() < 0 || __mcheck_cpu_apply_quirks(c) < 0) { - mca_cfg.disabled = true; + mca_cfg.disabled = 1; return; } if (mce_gen_pool_init()) { - mca_cfg.disabled = true; + mca_cfg.disabled = 1; pr_emerg("Couldn't allocate MCE records pool!\n"); return; } @@ -1902,11 +1902,11 @@ static int __init mcheck_enable(char *str) if (*str == '=') str++; if (!strcmp(str, "off")) - cfg->disabled = true; + cfg->disabled = 1; else if (!strcmp(str, "no_cmci")) cfg->cmci_disabled = true; else if (!strcmp(str, "no_lmce")) - cfg->lmce_disabled = true; + cfg->lmce_disabled = 1; else if (!strcmp(str, "dont_log_ce")) cfg->dont_log_ce = true; else if (!strcmp(str, "ignore_ce")) @@ -1914,9 +1914,9 @@ static int __init mcheck_enable(char *str) else if (!strcmp(str, "bootlog") || !strcmp(str, "nobootlog")) cfg->bootlog = (str[0] == 'b'); else if (!strcmp(str, "bios_cmci_threshold")) - cfg->bios_cmci_threshold = true; + cfg->bios_cmci_threshold = 1; else if (!strcmp(str, "recovery")) - cfg->recovery = true; + cfg->recovery = 1; else if (isdigit(str[0])) { if (get_option(&str, &cfg->tolerant) == 2) get_option(&str, &(cfg->monarch_timeout)); @@ -2381,7 +2381,7 @@ device_initcall_sync(mcheck_init_device); */ static int __init mcheck_disable(char *str) { - mca_cfg.disabled = true; + mca_cfg.disabled = 1; return 1; } __setup("nomce", mcheck_disable); -- cgit v1.2.1 From b2fbf6f282147b42d669a4bd4a7b1de2e2d6a792 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 21 Feb 2018 11:18:55 +0100 Subject: x86/mce: Issue the 'mcelog --ascii' message only on !AMD mcelog cannot decode AMD MCEs. Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-4-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index db5b1e4ebfd8..d7dff234dd28 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -268,7 +268,9 @@ static void __print_mce(struct mce *m) static void print_mce(struct mce *m) { __print_mce(m); - pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); + + if (m->cpuvendor != X86_VENDOR_AMD) + pr_emerg_ratelimited(HW_ERR "Run the above through 'mcelog --ascii'\n"); } #define PANIC_TIMEOUT 5 /* 5 seconds */ -- cgit v1.2.1 From 4b1e84276a6172980c5bf39aa091ba13e90d6dad Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Wed, 21 Feb 2018 11:18:56 +0100 Subject: x86/mce/AMD: Collect error info even if valid bits are not set The MCA banks log error info into MCA_ADDR, MCA_MISC0, and MCA_SYND even if the corresponding valid bits are not set: "Error handlers should save the values in MCA_ADDR, MCA_MISC0, and MCA_SYND even if MCA_STATUS[AddrV], MCA_STATUS[MiscV], and MCA_STATUS[SyndV] are zero." Do so by setting those bits so that code down the MCE processing path doesn't need to be changed. Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-5-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index d7dff234dd28..3c9a25b93538 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -441,6 +441,20 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) if (mca_cfg.rip_msr) m->ip = mce_rdmsrl(mca_cfg.rip_msr); } + + /* + * Error handlers should save the values in MCA_ADDR, MCA_MISC0, and + * MCA_SYND even if MCA_STATUS[AddrV], MCA_STATUS[MiscV], and + * MCA_STATUS[SyndV] are zero. + */ + if (m->cpuvendor == X86_VENDOR_AMD) { + u64 status = MCI_STATUS_ADDRV | MCI_STATUS_MISCV; + + if (mce_flags.smca) + status |= MCI_STATUS_SYNDV; + + m->status |= status; + } } int mce_available(struct cpuinfo_x86 *c) -- cgit v1.2.1 From e5d6a126d4c473499f354254a15ca0c2d8c84ca3 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Wed, 21 Feb 2018 11:18:57 +0100 Subject: x86/mce/AMD: Pass the bank number to smca_get_bank_type() Pass the bank number to smca_get_bank_type() since that's all we need. Also, we should compare the bank number to MAX_NR_BANKS (size of the smca_banks array) not the number of bank types. Bank types are reused for multiple banks, so the number of types can be different from the number of banks in a system and thus we could return an invalid bank type. Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov Cc: # 4.14.x Cc: # 4.14.x: 11cf887728a3 x86/MCE/AMD: Define a function to get SMCA bank type Cc: # 4.14.x: c6708d50f166 x86/MCE: Report only DRAM ECC as memory errors on AMD systems Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-6-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_amd.c | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 0f32ad242324..7fbb19cb1859 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -110,14 +110,14 @@ const char *smca_get_long_name(enum smca_bank_types t) } EXPORT_SYMBOL_GPL(smca_get_long_name); -static enum smca_bank_types smca_get_bank_type(struct mce *m) +static enum smca_bank_types smca_get_bank_type(unsigned int bank) { struct smca_bank *b; - if (m->bank >= N_SMCA_BANK_TYPES) + if (bank >= MAX_NR_BANKS) return N_SMCA_BANK_TYPES; - b = &smca_banks[m->bank]; + b = &smca_banks[bank]; if (!b->hwid) return N_SMCA_BANK_TYPES; @@ -760,7 +760,7 @@ bool amd_mce_is_memory_error(struct mce *m) u8 xec = (m->status >> 16) & 0x1f; if (mce_flags.smca) - return smca_get_bank_type(m) == SMCA_UMC && xec == 0x0; + return smca_get_bank_type(m->bank) == SMCA_UMC && xec == 0x0; return m->bank == 4 && xec == 0x8; } @@ -1063,7 +1063,7 @@ static struct kobj_type threshold_ktype = { static const char *get_name(unsigned int bank, struct threshold_block *b) { - unsigned int bank_type; + enum smca_bank_types bank_type; if (!mce_flags.smca) { if (b && bank == 4) @@ -1072,11 +1072,10 @@ static const char *get_name(unsigned int bank, struct threshold_block *b) return th_names[bank]; } - if (!smca_banks[bank].hwid) + bank_type = smca_get_bank_type(bank); + if (bank_type >= N_SMCA_BANK_TYPES) return NULL; - bank_type = smca_banks[bank].hwid->bank_type; - if (b && bank_type == SMCA_UMC) { if (b->block < ARRAY_SIZE(smca_umc_block_names)) return smca_umc_block_names[b->block]; -- cgit v1.2.1 From 68627a697c195937672ce07683094c72b1174786 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Wed, 21 Feb 2018 11:18:58 +0100 Subject: x86/mce/AMD, EDAC/mce_amd: Enumerate Reserved SMCA bank type Currently, bank 4 is reserved on Fam17h, so we chose not to initialize bank 4 in the smca_banks array. This means that when we check if a bank is initialized, like during boot or resume, we will see that bank 4 is not initialized and try to initialize it. This will cause a call trace, when resuming from suspend, due to rdmsr_*on_cpu() calls in the init path. The rdmsr_*on_cpu() calls issue an IPI but we're running with interrupts disabled. This triggers: WARNING: CPU: 0 PID: 11523 at kernel/smp.c:291 smp_call_function_single+0xdc/0xe0 ... Reserved banks will be read-as-zero, so their MCA_IPID register will be zero. So, like the smca_banks array, the threshold_banks array will not have an entry for a reserved bank since all its MCA_MISC* registers will be zero. Enumerate a "Reserved" bank type that matches on a HWID_MCATYPE of 0,0. Use the "Reserved" type when checking if a bank is reserved. It's possible that other bank numbers may be reserved on future systems. Don't try to find the block address on reserved banks. Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov Cc: # 4.14.x Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-7-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mce.h | 1 + arch/x86/kernel/cpu/mcheck/mce_amd.c | 7 +++++++ 2 files changed, 8 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mce.h b/arch/x86/include/asm/mce.h index c3fb9a792e13..8c7b3e5a2d01 100644 --- a/arch/x86/include/asm/mce.h +++ b/arch/x86/include/asm/mce.h @@ -294,6 +294,7 @@ enum smca_bank_types { SMCA_IF, /* Instruction Fetch */ SMCA_L2_CACHE, /* L2 Cache */ SMCA_DE, /* Decoder Unit */ + SMCA_RESERVED, /* Reserved */ SMCA_EX, /* Execution Unit */ SMCA_FP, /* Floating Point */ SMCA_L3_CACHE, /* L3 Cache */ diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 7fbb19cb1859..d8ba9d0c3f01 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -82,6 +82,7 @@ static struct smca_bank_name smca_names[] = { [SMCA_IF] = { "insn_fetch", "Instruction Fetch Unit" }, [SMCA_L2_CACHE] = { "l2_cache", "L2 Cache" }, [SMCA_DE] = { "decode_unit", "Decode Unit" }, + [SMCA_RESERVED] = { "reserved", "Reserved" }, [SMCA_EX] = { "execution_unit", "Execution Unit" }, [SMCA_FP] = { "floating_point", "Floating Point Unit" }, [SMCA_L3_CACHE] = { "l3_cache", "L3 Cache" }, @@ -127,6 +128,9 @@ static enum smca_bank_types smca_get_bank_type(unsigned int bank) static struct smca_hwid smca_hwid_mcatypes[] = { /* { bank_type, hwid_mcatype, xec_bitmap } */ + /* Reserved type */ + { SMCA_RESERVED, HWID_MCATYPE(0x00, 0x0), 0x0 }, + /* ZN Core (HWID=0xB0) MCA types */ { SMCA_LS, HWID_MCATYPE(0xB0, 0x0), 0x1FFFEF }, { SMCA_IF, HWID_MCATYPE(0xB0, 0x1), 0x3FFF }, @@ -433,6 +437,9 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi u32 addr = 0, offset = 0; if (mce_flags.smca) { + if (smca_get_bank_type(bank) == SMCA_RESERVED) + return addr; + if (!block) { addr = MSR_AMD64_SMCA_MCx_MISC(bank); } else { -- cgit v1.2.1 From 27bd59502702fe51d9eb00450a75b727ec6bfcb4 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Wed, 21 Feb 2018 11:18:59 +0100 Subject: x86/mce/AMD: Get address from already initialized block The block address is saved after the block is initialized when threshold_init_device() is called. Use the saved block address, if available, rather than trying to rediscover it. This will avoid a call trace, when resuming from suspend, due to the rdmsr_safe_on_cpu() call in get_block_address(). The rdmsr_safe_on_cpu() call issues an IPI but we're running with interrupts disabled. This triggers: WARNING: CPU: 0 PID: 11523 at kernel/smp.c:291 smp_call_function_single+0xdc/0xe0 Signed-off-by: Yazen Ghannam Signed-off-by: Borislav Petkov Cc: # 4.14.x Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180221101900.10326-8-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_amd.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index d8ba9d0c3f01..12bc2863a4d6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -436,6 +436,21 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi { u32 addr = 0, offset = 0; + if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS)) + return addr; + + /* Get address from already initialized block. */ + if (per_cpu(threshold_banks, cpu)) { + struct threshold_bank *bankp = per_cpu(threshold_banks, cpu)[bank]; + + if (bankp && bankp->blocks) { + struct threshold_block *blockp = &bankp->blocks[block]; + + if (blockp) + return blockp->address; + } + } + if (mce_flags.smca) { if (smca_get_bank_type(bank) == SMCA_RESERVED) return addr; -- cgit v1.2.1 From 8a331f4a0863bea758561c921b94b4d28f7c4029 Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Wed, 21 Feb 2018 11:19:00 +0100 Subject: x86/mce/AMD: Carve out SMCA get_block_address() code Carve out the SMCA code in get_block_address() into a separate helper function. No functional change. Signed-off-by: Yazen Ghannam [ Save an indentation level. ] Signed-off-by: Borislav Petkov Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-edac Link: http://lkml.kernel.org/r/20180215210943.11530-4-Yazen.Ghannam@amd.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/mce_amd.c | 57 ++++++++++++++++++++---------------- 1 file changed, 31 insertions(+), 26 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 12bc2863a4d6..f7666eef4a87 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c @@ -431,6 +431,35 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) wrmsr(MSR_CU_DEF_ERR, low, high); } +static u32 smca_get_block_address(unsigned int cpu, unsigned int bank, + unsigned int block) +{ + u32 low, high; + u32 addr = 0; + + if (smca_get_bank_type(bank) == SMCA_RESERVED) + return addr; + + if (!block) + return MSR_AMD64_SMCA_MCx_MISC(bank); + + /* + * For SMCA enabled processors, BLKPTR field of the first MISC register + * (MCx_MISC0) indicates presence of additional MISC regs set (MISC1-4). + */ + if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) + return addr; + + if (!(low & MCI_CONFIG_MCAX)) + return addr; + + if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && + (low & MASK_BLKPTR_LO)) + return MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); + + return addr; +} + static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, unsigned int bank, unsigned int block) { @@ -451,32 +480,8 @@ static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 hi } } - if (mce_flags.smca) { - if (smca_get_bank_type(bank) == SMCA_RESERVED) - return addr; - - if (!block) { - addr = MSR_AMD64_SMCA_MCx_MISC(bank); - } else { - /* - * For SMCA enabled processors, BLKPTR field of the - * first MISC register (MCx_MISC0) indicates presence of - * additional MISC register set (MISC1-4). - */ - u32 low, high; - - if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) - return addr; - - if (!(low & MCI_CONFIG_MCAX)) - return addr; - - if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && - (low & MASK_BLKPTR_LO)) - addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); - } - return addr; - } + if (mce_flags.smca) + return smca_get_block_address(cpu, bank, block); /* Fall back to method we used for older processors: */ switch (block) { -- cgit v1.2.1 From ed7158bae41044ff696e9aafd5ada46d391a5a2e Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Thu, 22 Feb 2018 10:54:55 +0100 Subject: treewide/trivial: Remove ';;$' typo noise On lkml suggestions were made to split up such trivial typo fixes into per subsystem patches: --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) struct efi_uga_draw_protocol *uga = NULL, *first_uga; efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; unsigned long nr_ugas; - u32 *handles = (u32 *)uga_handle;; + u32 *handles = (u32 *)uga_handle; efi_status_t status = EFI_INVALID_PARAMETER; int i; This patch is the result of the following script: $ sed -i 's/;;$/;/g' $(git grep -E ';;$' | grep "\.[ch]:" | grep -vwE 'for|ia64' | cut -d: -f1 | sort | uniq) ... followed by manual review to make sure it's all good. Splitting this up is just crazy talk, let's get over with this and just do it. Reported-by: Pavel Machek Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 353e20c3f114..886a9115af62 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -439,7 +439,7 @@ setup_uga32(void **uga_handle, unsigned long size, u32 *width, u32 *height) struct efi_uga_draw_protocol *uga = NULL, *first_uga; efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; unsigned long nr_ugas; - u32 *handles = (u32 *)uga_handle;; + u32 *handles = (u32 *)uga_handle; efi_status_t status = EFI_INVALID_PARAMETER; int i; @@ -484,7 +484,7 @@ setup_uga64(void **uga_handle, unsigned long size, u32 *width, u32 *height) struct efi_uga_draw_protocol *uga = NULL, *first_uga; efi_guid_t uga_proto = EFI_UGA_PROTOCOL_GUID; unsigned long nr_ugas; - u64 *handles = (u64 *)uga_handle;; + u64 *handles = (u64 *)uga_handle; efi_status_t status = EFI_INVALID_PARAMETER; int i; -- cgit v1.2.1 From b21ebf2fb4cde1618915a97cc773e287ff49173e Mon Sep 17 00:00:00 2001 From: "H.J. Lu" Date: Wed, 7 Feb 2018 14:20:09 -0800 Subject: x86: Treat R_X86_64_PLT32 as R_X86_64_PC32 On i386, there are 2 types of PLTs, PIC and non-PIC. PIE and shared objects must use PIC PLT. To use PIC PLT, you need to load _GLOBAL_OFFSET_TABLE_ into EBX first. There is no need for that on x86-64 since x86-64 uses PC-relative PLT. On x86-64, for 32-bit PC-relative branches, we can generate PLT32 relocation, instead of PC32 relocation, which can also be used as a marker for 32-bit PC-relative branches. Linker can always reduce PLT32 relocation to PC32 if function is defined locally. Local functions should use PC32 relocation. As far as Linux kernel is concerned, R_X86_64_PLT32 can be treated the same as R_X86_64_PC32 since Linux kernel doesn't use PLT. R_X86_64_PLT32 for 32-bit PC-relative branches has been enabled in binutils master branch which will become binutils 2.31. [ hjl is working on having better documentation on this all, but a few more notes from him: "PLT32 relocation is used as marker for PC-relative branches. Because of EBX, it looks odd to generate PLT32 relocation on i386 when EBX doesn't have GOT. As for symbol resolution, PLT32 and PC32 relocations are almost interchangeable. But when linker sees PLT32 relocation against a protected symbol, it can resolved locally at link-time since it is used on a branch instruction. Linker can't do that for PC32 relocation" but for the kernel use, the two are basically the same, and this commit gets things building and working with the current binutils master - Linus ] Signed-off-by: H.J. Lu Signed-off-by: Linus Torvalds --- arch/x86/kernel/machine_kexec_64.c | 1 + arch/x86/kernel/module.c | 1 + arch/x86/tools/relocs.c | 3 +++ 3 files changed, 5 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 1f790cf9d38f..3b7427aa7d85 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -542,6 +542,7 @@ int arch_kexec_apply_relocations_add(const Elf64_Ehdr *ehdr, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: value -= (u64)address; *(u32 *)location = value; break; diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c index da0c160e5589..f58336af095c 100644 --- a/arch/x86/kernel/module.c +++ b/arch/x86/kernel/module.c @@ -191,6 +191,7 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, goto overflow; break; case R_X86_64_PC32: + case R_X86_64_PLT32: if (*(u32 *)loc != 0) goto invalid_relocation; val -= (u64)loc; diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c index 5d73c443e778..220e97841e49 100644 --- a/arch/x86/tools/relocs.c +++ b/arch/x86/tools/relocs.c @@ -770,9 +770,12 @@ static int do_reloc64(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym, break; case R_X86_64_PC32: + case R_X86_64_PLT32: /* * PC relative relocations don't need to be adjusted unless * referencing a percpu symbol. + * + * NB: R_X86_64_PLT32 can be treated as R_X86_64_PC32. */ if (is_percpu_sym(sym, symname)) add_reloc(&relocs32neg, offset); -- cgit v1.2.1 From a493a87f38cfa48caaa95c9347be2d914c6fdf29 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 22 Feb 2018 15:12:53 +0100 Subject: bpf, x64: implement retpoline for tail call Implement a retpoline [0] for the BPF tail call JIT'ing that converts the indirect jump via jmp %rax that is used to make the long jump into another JITed BPF image. Since this is subject to speculative execution, we need to control the transient instruction sequence here as well when CONFIG_RETPOLINE is set, and direct it into a pause + lfence loop. The latter aligns also with what gcc / clang emits (e.g. [1]). JIT dump after patch: # bpftool p d x i 1 0: (18) r2 = map[id:1] 2: (b7) r3 = 0 3: (85) call bpf_tail_call#12 4: (b7) r0 = 2 5: (95) exit With CONFIG_RETPOLINE: # bpftool p d j i 1 [...] 33: cmp %edx,0x24(%rsi) 36: jbe 0x0000000000000072 |* 38: mov 0x24(%rbp),%eax 3e: cmp $0x20,%eax 41: ja 0x0000000000000072 | 43: add $0x1,%eax 46: mov %eax,0x24(%rbp) 4c: mov 0x90(%rsi,%rdx,8),%rax 54: test %rax,%rax 57: je 0x0000000000000072 | 59: mov 0x28(%rax),%rax 5d: add $0x25,%rax 61: callq 0x000000000000006d |+ 66: pause | 68: lfence | 6b: jmp 0x0000000000000066 | 6d: mov %rax,(%rsp) | 71: retq | 72: mov $0x2,%eax [...] * relative fall-through jumps in error case + retpoline for indirect jump Without CONFIG_RETPOLINE: # bpftool p d j i 1 [...] 33: cmp %edx,0x24(%rsi) 36: jbe 0x0000000000000063 |* 38: mov 0x24(%rbp),%eax 3e: cmp $0x20,%eax 41: ja 0x0000000000000063 | 43: add $0x1,%eax 46: mov %eax,0x24(%rbp) 4c: mov 0x90(%rsi,%rdx,8),%rax 54: test %rax,%rax 57: je 0x0000000000000063 | 59: mov 0x28(%rax),%rax 5d: add $0x25,%rax 61: jmpq *%rax |- 63: mov $0x2,%eax [...] * relative fall-through jumps in error case - plain indirect jump as before [0] https://support.google.com/faqs/answer/7625886 [1] https://github.com/gcc-mirror/gcc/commit/a31e654fa107be968b802786d747e962c2fcdb2b Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov --- arch/x86/include/asm/nospec-branch.h | 37 ++++++++++++++++++++++++++++++++++++ arch/x86/net/bpf_jit_comp.c | 9 +++++---- 2 files changed, 42 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 76b058533e47..81a1be326571 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -177,4 +177,41 @@ static inline void indirect_branch_prediction_barrier(void) } #endif /* __ASSEMBLY__ */ + +/* + * Below is used in the eBPF JIT compiler and emits the byte sequence + * for the following assembly: + * + * With retpolines configured: + * + * callq do_rop + * spec_trap: + * pause + * lfence + * jmp spec_trap + * do_rop: + * mov %rax,(%rsp) + * retq + * + * Without retpolines configured: + * + * jmp *%rax + */ +#ifdef CONFIG_RETPOLINE +# define RETPOLINE_RAX_BPF_JIT_SIZE 17 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT1_off32(0xE8, 7); /* callq do_rop */ \ + /* spec_trap: */ \ + EMIT2(0xF3, 0x90); /* pause */ \ + EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \ + EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \ + /* do_rop: */ \ + EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \ + EMIT1(0xC3); /* retq */ +#else +# define RETPOLINE_RAX_BPF_JIT_SIZE 2 +# define RETPOLINE_RAX_BPF_JIT() \ + EMIT2(0xFF, 0xE0); /* jmp *%rax */ +#endif + #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */ diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 4923d92f918d..45e4eb5bcbb2 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -13,6 +13,7 @@ #include #include #include +#include #include /* @@ -290,7 +291,7 @@ static void emit_bpf_tail_call(u8 **pprog) EMIT2(0x89, 0xD2); /* mov edx, edx */ EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */ offsetof(struct bpf_array, map.max_entries)); -#define OFFSET1 43 /* number of bytes to jump */ +#define OFFSET1 (41 + RETPOLINE_RAX_BPF_JIT_SIZE) /* number of bytes to jump */ EMIT2(X86_JBE, OFFSET1); /* jbe out */ label1 = cnt; @@ -299,7 +300,7 @@ static void emit_bpf_tail_call(u8 **pprog) */ EMIT2_off32(0x8B, 0x85, 36); /* mov eax, dword ptr [rbp + 36] */ EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ -#define OFFSET2 32 +#define OFFSET2 (30 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JA, OFFSET2); /* ja out */ label2 = cnt; EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ @@ -313,7 +314,7 @@ static void emit_bpf_tail_call(u8 **pprog) * goto out; */ EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */ -#define OFFSET3 10 +#define OFFSET3 (8 + RETPOLINE_RAX_BPF_JIT_SIZE) EMIT2(X86_JE, OFFSET3); /* je out */ label3 = cnt; @@ -326,7 +327,7 @@ static void emit_bpf_tail_call(u8 **pprog) * rdi == ctx (1st arg) * rax == prog->bpf_func + prologue_size */ - EMIT2(0xFF, 0xE0); /* jmp rax */ + RETPOLINE_RAX_BPF_JIT(); /* out: */ BUILD_BUG_ON(cnt - label1 != OFFSET1); -- cgit v1.2.1 From e84cf6aa501c58bf4bf451f1e425192ec090aed2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 22 Feb 2018 12:08:06 +0100 Subject: x86/apic/vector: Handle vector release on CPU unplug correctly When a irq vector is replaced, then the previous vector is normally released when the first interrupt happens on the new vector. If the target CPU of the previous vector is already offline when the new vector is installed, then the previous vector is silently discarded, which leads to accounting issues causing suspend failures and other problems. Adjust the logic so that the previous vector is freed in the underlying matrix allocator to ensure that the accounting stays correct. Fixes: 69cde0004a4b ("x86/vector: Use matrix allocator for vector assignment") Reported-by: Yuriy Vostrikov Signed-off-by: Thomas Gleixner Tested-by: Yuriy Vostrikov Cc: Peter Zijlstra Cc: Randy Dunlap Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180222112316.930791749@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/vector.c | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 3cc471beb50b..bb6f7a2148d7 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c @@ -134,21 +134,40 @@ static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, { struct apic_chip_data *apicd = apic_chip_data(irqd); struct irq_desc *desc = irq_data_to_desc(irqd); + bool managed = irqd_affinity_is_managed(irqd); lockdep_assert_held(&vector_lock); trace_vector_update(irqd->irq, newvec, newcpu, apicd->vector, apicd->cpu); - /* Setup the vector move, if required */ - if (apicd->vector && cpu_online(apicd->cpu)) { + /* + * If there is no vector associated or if the associated vector is + * the shutdown vector, which is associated to make PCI/MSI + * shutdown mode work, then there is nothing to release. Clear out + * prev_vector for this and the offlined target case. + */ + apicd->prev_vector = 0; + if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) + goto setnew; + /* + * If the target CPU of the previous vector is online, then mark + * the vector as move in progress and store it for cleanup when the + * first interrupt on the new vector arrives. If the target CPU is + * offline then the regular release mechanism via the cleanup + * vector is not possible and the vector can be immediately freed + * in the underlying matrix allocator. + */ + if (cpu_online(apicd->cpu)) { apicd->move_in_progress = true; apicd->prev_vector = apicd->vector; apicd->prev_cpu = apicd->cpu; } else { - apicd->prev_vector = 0; + irq_matrix_free(vector_matrix, apicd->cpu, apicd->vector, + managed); } +setnew: apicd->vector = newvec; apicd->cpu = newcpu; BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); -- cgit v1.2.1 From 36e74d355297dde6e69a39c838d24710e442babe Mon Sep 17 00:00:00 2001 From: Wang Hui Date: Thu, 22 Feb 2018 19:26:03 -0800 Subject: x86/intel_rdt: Fix incorrect returned value when creating rdgroup sub-directory in resctrl file system MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If no monitoring feature is detected because all monitoring features are disabled during boot time or there is no monitoring feature in hardware, creating rdtgroup sub-directory by "mkdir" command reports error: mkdir: cannot create directory ‘/sys/fs/resctrl/p1’: No such file or directory But the sub-directory actually is generated and content is correct: cpus cpus_list schemata tasks The error is because rdtgroup_mkdir_ctrl_mon() returns non zero value after the sub-directory is created and the returned value is reported as an error to user. Clear the returned value to report to user that the sub-directory is actually created successfully. Signed-off-by: Wang Hui Signed-off-by: Zhang Yanfei Signed-off-by: Fenghua Yu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Ravi V Shankar Cc: Thomas Gleixner Cc: Tony Luck Cc: Vikas Cc: Xiaochen Shen Link: http://lkml.kernel.org/r/1519356363-133085-1-git-send-email-fenghua.yu@intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index bdab7d2f51af..fca759d272a1 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -1804,6 +1804,7 @@ static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn, goto out_common_fail; } closid = ret; + ret = 0; rdtgrp->closid = closid; list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups); -- cgit v1.2.1 From 69c42d493db452ea87c1ac56e83c978512f4e6ec Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 22 Feb 2018 14:59:21 +0200 Subject: x86/pci: Simplify code by using the new dmi_get_bios_year() helper ...instead of open coding its functionality. No changes in functionality. Signed-off-by: Andy Shevchenko Cc: Bjorn Helgaas Cc: Jean Delvare Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J. Wysocki Cc: Thomas Gleixner Cc: linux-acpi@vger.kernel.org Cc: linux-pci@vger.kernel.org Link: http://lkml.kernel.org/r/20180222125923.57385-2-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/pci/acpi.c | 8 ++------ arch/x86/pci/direct.c | 5 ++--- arch/x86/pci/mmconfig-shared.c | 9 ++------- 3 files changed, 6 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 7df49c40665e..00e60de30328 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -140,12 +140,8 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { void __init pci_acpi_crs_quirks(void) { - int year; - - if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008) { - if (iomem_resource.end <= 0xffffffff) - pci_use_crs = false; - } + if ((dmi_get_bios_year() < 2008) && (iomem_resource.end <= 0xffffffff)) + pci_use_crs = false; dmi_check_system(pci_crs_quirks); diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c index 2d9503323d10..a51074c55982 100644 --- a/arch/x86/pci/direct.c +++ b/arch/x86/pci/direct.c @@ -195,14 +195,13 @@ static const struct pci_raw_ops pci_direct_conf2 = { static int __init pci_sanity_check(const struct pci_raw_ops *o) { u32 x = 0; - int year, devfn; + int devfn; if (pci_probe & PCI_NO_CHECKS) return 1; /* Assume Type 1 works for newer systems. This handles machines that don't have anything on PCI Bus 0. */ - dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL); - if (year >= 2001) + if (dmi_get_bios_year() >= 2001) return 1; for (devfn = 0; devfn < 0x100; devfn++) { diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 96684d0adcf9..0b40482578b8 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -547,19 +547,14 @@ static void __init pci_mmcfg_reject_broken(int early) static int __init acpi_mcfg_check_entry(struct acpi_table_mcfg *mcfg, struct acpi_mcfg_allocation *cfg) { - int year; - if (cfg->address < 0xFFFFFFFF) return 0; if (!strncmp(mcfg->header.oem_id, "SGI", 3)) return 0; - if (mcfg->header.revision >= 1) { - if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && - year >= 2010) - return 0; - } + if ((mcfg->header.revision >= 1) && (dmi_get_bios_year() >= 2010)) + return 0; pr_err(PREFIX "MCFG region for %04x [bus %02x-%02x] at %#llx " "is above 4GB, ignored\n", cfg->pci_segment, -- cgit v1.2.1 From 4596749339e06dc7a424fc08a15eded850ed78b7 Mon Sep 17 00:00:00 2001 From: Samuel Neves Date: Wed, 21 Feb 2018 20:50:36 +0000 Subject: x86/topology: Update the 'cpu cores' field in /proc/cpuinfo correctly across CPU hotplug operations Without this fix, /proc/cpuinfo will display an incorrect amount of CPU cores, after bringing them offline and online again, as exemplified below: $ cat /proc/cpuinfo | grep cores cpu cores : 4 cpu cores : 8 cpu cores : 8 cpu cores : 20 cpu cores : 4 cpu cores : 3 cpu cores : 2 cpu cores : 2 This patch fixes this by always zeroing the booted_cores variable upon turning off a logical CPU. Tested-by: Dou Liyang Signed-off-by: Samuel Neves Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: jgross@suse.com Cc: luto@kernel.org Cc: prarit@redhat.com Cc: vkuznets@redhat.com Link: http://lkml.kernel.org/r/20180221205036.5244-1-sneves@dei.uc.pt Signed-off-by: Ingo Molnar --- arch/x86/kernel/smpboot.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 9eee25d07586..ff99e2b6fc54 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -1437,6 +1437,7 @@ static void remove_siblinginfo(int cpu) cpumask_clear(topology_sibling_cpumask(cpu)); cpumask_clear(topology_core_cpumask(cpu)); c->cpu_core_id = 0; + c->booted_cores = 0; cpumask_clear_cpu(cpu, cpu_sibling_setup_mask); recompute_smt_state(); } -- cgit v1.2.1 From 0b2e9904c15963e715d33e5f3f1387f17d19333a Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Fri, 23 Feb 2018 23:29:32 +0100 Subject: KVM: x86: move LAPIC initialization after VMCS creation The initial reset of the local APIC is performed before the VMCS has been created, but it tries to do a vmwrite: vmwrite error: reg 810 value 4a00 (err 18944) CPU: 54 PID: 38652 Comm: qemu-kvm Tainted: G W I 4.16.0-0.rc2.git0.1.fc28.x86_64 #1 Hardware name: Intel Corporation S2600CW/S2600CW, BIOS SE5C610.86B.01.01.0003.090520141303 09/05/2014 Call Trace: vmx_set_rvi [kvm_intel] vmx_hwapic_irr_update [kvm_intel] kvm_lapic_reset [kvm] kvm_create_lapic [kvm] kvm_arch_vcpu_init [kvm] kvm_vcpu_init [kvm] vmx_create_vcpu [kvm_intel] kvm_vm_ioctl [kvm] Move it later, after the VMCS has been created. Fixes: 4191db26b714 ("KVM: x86: Update APICv on APIC reset") Cc: stable@vger.kernel.org Cc: Liran Alon Signed-off-by: Paolo Bonzini --- arch/x86/kvm/lapic.c | 1 - arch/x86/kvm/x86.c | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 924ac8ce9d50..cc5fe7a50dde 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2165,7 +2165,6 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) */ vcpu->arch.apic_base = MSR_IA32_APICBASE_ENABLE; static_key_slow_inc(&apic_sw_disabled.key); /* sw disabled at reset */ - kvm_lapic_reset(vcpu, false); kvm_iodevice_init(&apic->dev, &apic_mmio_ops); return 0; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c8a0b545ac20..ca90d9515137 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -7975,6 +7975,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); + kvm_lapic_reset(vcpu, false); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return 0; -- cgit v1.2.1 From 99158246208b82c0700d09a40d719bb56b32c607 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Wed, 31 Jan 2018 18:12:50 +0100 Subject: KVM: nVMX: preserve SECONDARY_EXEC_DESC without UMIP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit L1 might want to use SECONDARY_EXEC_DESC, so we must not clear the VMCS bit if UMIP is not being emulated. We must still set the bit when emulating UMIP as the feature can be passed to L2 where L0 will do the emulation and because L2 can change CR4 without a VM exit, we should clear the bit if UMIP is disabled. Fixes: 0367f205a3b7 ("KVM: vmx: add support for emulating UMIP") Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index f427723dc7db..2d2cf8c1f0f4 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4485,7 +4485,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_DESC); hw_cr4 &= ~X86_CR4_UMIP; - } else + } else if (!is_guest_mode(vcpu) || + !nested_cpu_has2(get_vmcs12(vcpu), SECONDARY_EXEC_DESC)) vmcs_clear_bits(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_DESC); -- cgit v1.2.1 From 103c763c72dd2df3e8c91f2d7ec88f98ed391111 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 31 Jan 2018 17:30:21 -0800 Subject: KVM/x86: remove WARN_ON() for when vm_munmap() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On x86, special KVM memslots such as the TSS region have anonymous memory mappings created on behalf of userspace, and these mappings are removed when the VM is destroyed. It is however possible for removing these mappings via vm_munmap() to fail. This can most easily happen if the thread receives SIGKILL while it's waiting to acquire ->mmap_sem. This triggers the 'WARN_ON(r < 0)' in __x86_set_memory_region(). syzkaller was able to hit this, using 'exit()' to send the SIGKILL. Note that while the vm_munmap() failure results in the mapping not being removed immediately, it is not leaked forever but rather will be freed when the process exits. It's not really possible to handle this failure properly, so almost every other caller of vm_munmap() doesn't check the return value. It's a limitation of having the kernel manage these mappings rather than userspace. So just remove the WARN_ON() so that users can't spam the kernel log with this warning. Fixes: f0d648bdf0a5 ("KVM: x86: map/unmap private slots in __x86_set_memory_region") Reported-by: syzbot Signed-off-by: Eric Biggers Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index ca90d9515137..96edda878dbf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8461,10 +8461,8 @@ int __x86_set_memory_region(struct kvm *kvm, int id, gpa_t gpa, u32 size) return r; } - if (!size) { - r = vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); - WARN_ON(r < 0); - } + if (!size) + vm_munmap(old.userspace_addr, old.npages * PAGE_SIZE); return 0; } -- cgit v1.2.1 From 135a06c3a515bbd17729eb04f4f26316d48363d7 Mon Sep 17 00:00:00 2001 From: Chao Gao Date: Sun, 11 Feb 2018 10:06:30 +0800 Subject: KVM: nVMX: Don't halt vcpu when L1 is injecting events to L2 Although L2 is in halt state, it will be in the active state after VM entry if the VM entry is vectoring according to SDM 26.6.2 Activity State. Halting the vcpu here means the event won't be injected to L2 and this decision isn't reported to L1. Thus L0 drops an event that should be injected to L2. Cc: Liran Alon Reviewed-by: Liran Alon Signed-off-by: Chao Gao Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2d2cf8c1f0f4..67b028d8e726 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11197,7 +11197,12 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch) if (ret) return ret; - if (vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) + /* + * If we're entering a halted L2 vcpu and the L2 vcpu won't be woken + * by event injection, halt vcpu. + */ + if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) && + !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK)) return kvm_vcpu_halt(vcpu); vmx->nested.nested_run_pending = 1; -- cgit v1.2.1 From 95e057e25892eaa48cad1e2d637b80d0f1a4fac5 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 8 Feb 2018 15:32:45 +0800 Subject: KVM: X86: Fix SMRAM accessing even if VM is shutdown MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by syzkaller: WARNING: CPU: 6 PID: 2434 at arch/x86/kvm/vmx.c:6660 handle_ept_misconfig+0x54/0x1e0 [kvm_intel] CPU: 6 PID: 2434 Comm: repro_test Not tainted 4.15.0+ #4 RIP: 0010:handle_ept_misconfig+0x54/0x1e0 [kvm_intel] Call Trace: vmx_handle_exit+0xbd/0xe20 [kvm_intel] kvm_arch_vcpu_ioctl_run+0xdaf/0x1d50 [kvm] kvm_vcpu_ioctl+0x3e9/0x720 [kvm] do_vfs_ioctl+0xa4/0x6a0 SyS_ioctl+0x79/0x90 entry_SYSCALL_64_fastpath+0x25/0x9c The testcase creates a first thread to issue KVM_SMI ioctl, and then creates a second thread to mmap and operate on the same vCPU. This triggers a race condition when running the testcase with multiple threads. Sometimes one thread exits with a triple fault while another thread mmaps and operates on the same vCPU. Because CS=0x3000/IP=0x8000 is not mapped, accessing the SMI handler results in an EPT misconfig. This patch fixes it by returning RET_PF_EMULATE in kvm_handle_bad_page(), which will go on to cause an emulation failure and an exit with KVM_EXIT_INTERNAL_ERROR. Reported-by: syzbot+c1d9517cab094dae65e446c0c5b4de6c40f4dc58@syzkaller.appspotmail.com Cc: Paolo Bonzini Cc: Radim Krčmář Cc: stable@vger.kernel.org Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 8eca1d04aeb8..6c5a82c74750 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -3029,7 +3029,7 @@ static int kvm_handle_bad_page(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn) return RET_PF_RETRY; } - return -EFAULT; + return RET_PF_EMULATE; } static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, -- cgit v1.2.1 From f75e4924f0152be747bf04c9d16bb23fd8baf5f9 Mon Sep 17 00:00:00 2001 From: Sebastian Ott Date: Thu, 22 Feb 2018 13:04:39 +0100 Subject: kvm: fix warning for non-x86 builds Fix the following sparse warning by moving the prototype of kvm_arch_mmu_notifier_invalidate_range() to linux/kvm_host.h . CHECK arch/s390/kvm/../../../virt/kvm/kvm_main.c arch/s390/kvm/../../../virt/kvm/kvm_main.c:138:13: warning: symbol 'kvm_arch_mmu_notifier_invalidate_range' was not declared. Should it be static? Signed-off-by: Sebastian Ott Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_host.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index dd6f57a54a26..0a9e330b34f0 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1464,7 +1464,4 @@ static inline int kvm_cpu_get_apicid(int mps_cpu) #define put_smstate(type, buf, offset, val) \ *(type *)((buf) + (offset) - 0x7e00) = val -void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, - unsigned long start, unsigned long end); - #endif /* _ASM_X86_KVM_HOST_H */ -- cgit v1.2.1 From fe2a3027e74e40a3ece3a4c1e4e51403090a907a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 1 Feb 2018 22:16:21 +0100 Subject: KVM: x86: fix backward migration with async_PF MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Guests on new hypersiors might set KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT bit when enabling async_PF, but this bit is reserved on old hypervisors, which results in a failure upon migration. To avoid breaking different cases, we are checking for CPUID feature bit before enabling the feature and nothing else. Fixes: 52a5c155cf79 ("KVM: async_pf: Let guest support delivery of async_pf from guest mode") Cc: Reviewed-by: Wanpeng Li Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář Signed-off-by: Paolo Bonzini --- arch/x86/include/uapi/asm/kvm_para.h | 1 + arch/x86/kernel/kvm.c | 8 ++++---- arch/x86/kvm/cpuid.c | 3 ++- 3 files changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/uapi/asm/kvm_para.h b/arch/x86/include/uapi/asm/kvm_para.h index 7a2ade4aa235..6cfa9c8cb7d6 100644 --- a/arch/x86/include/uapi/asm/kvm_para.h +++ b/arch/x86/include/uapi/asm/kvm_para.h @@ -26,6 +26,7 @@ #define KVM_FEATURE_PV_EOI 6 #define KVM_FEATURE_PV_UNHALT 7 #define KVM_FEATURE_PV_TLB_FLUSH 9 +#define KVM_FEATURE_ASYNC_PF_VMEXIT 10 /* The last 8 bits are used to indicate how to interpret the flags field * in pvclock structure. If no bits are set, all flags are ignored. diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 4e37d1a851a6..971babe964d2 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -341,10 +341,10 @@ static void kvm_guest_cpu_init(void) #endif pa |= KVM_ASYNC_PF_ENABLED; - /* Async page fault support for L1 hypervisor is optional */ - if (wrmsr_safe(MSR_KVM_ASYNC_PF_EN, - (pa | KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT) & 0xffffffff, pa >> 32) < 0) - wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_VMEXIT)) + pa |= KVM_ASYNC_PF_DELIVERY_AS_PF_VMEXIT; + + wrmsrl(MSR_KVM_ASYNC_PF_EN, pa); __this_cpu_write(apf_reason.enabled, 1); printk(KERN_INFO"KVM setup async PF for cpu %d\n", smp_processor_id()); diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index a0c5a69bc7c4..b671fc2d0422 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -607,7 +607,8 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, (1 << KVM_FEATURE_PV_EOI) | (1 << KVM_FEATURE_CLOCKSOURCE_STABLE_BIT) | (1 << KVM_FEATURE_PV_UNHALT) | - (1 << KVM_FEATURE_PV_TLB_FLUSH); + (1 << KVM_FEATURE_PV_TLB_FLUSH) | + (1 << KVM_FEATURE_ASYNC_PF_VMEXIT); if (sched_info_on()) entry->eax |= (1 << KVM_FEATURE_STEAL_TIME); -- cgit v1.2.1 From afdc3f588850a6fbc996205ee2d472eb4426afb3 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 17 Jan 2018 11:46:54 +0800 Subject: x86/kvm: Make parse_no_xxx __init for kvm The early_param() is only called during kernel initialization, So Linux marks the functions of it with __init macro to save memory. But it forgot to mark the parse_no_kvmapf/stealacc/kvmclock_vsyscall, So, Make them __init as well. Cc: Paolo Bonzini Cc: rkrcmar@redhat.com Cc: kvm@vger.kernel.org Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Juergen Gross Cc: x86@kernel.org Signed-off-by: Dou Liyang Signed-off-by: Paolo Bonzini --- arch/x86/kernel/kvm.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 971babe964d2..ee7d5c951864 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -49,7 +49,7 @@ static int kvmapf = 1; -static int parse_no_kvmapf(char *arg) +static int __init parse_no_kvmapf(char *arg) { kvmapf = 0; return 0; @@ -58,7 +58,7 @@ static int parse_no_kvmapf(char *arg) early_param("no-kvmapf", parse_no_kvmapf); static int steal_acc = 1; -static int parse_no_stealacc(char *arg) +static int __init parse_no_stealacc(char *arg) { steal_acc = 0; return 0; @@ -67,7 +67,7 @@ static int parse_no_stealacc(char *arg) early_param("no-steal-acc", parse_no_stealacc); static int kvmclock_vsyscall = 1; -static int parse_no_kvmclock_vsyscall(char *arg) +static int __init parse_no_kvmclock_vsyscall(char *arg) { kvmclock_vsyscall = 0; return 0; -- cgit v1.2.1 From 4f2f61fc507176edd65826fbedc8987dea29b9d5 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Sun, 4 Feb 2018 22:57:58 -0800 Subject: KVM: X86: Avoid traversing all the cpus for pv tlb flush when steal time is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Avoid traversing all the cpus for pv tlb flush when steal time is disabled since pv tlb flush depends on the field in steal time for shared data. Cc: Paolo Bonzini Cc: Radim KrÄmář Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kernel/kvm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index ee7d5c951864..bc1a27280c4b 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -545,7 +545,8 @@ static void __init kvm_guest_init(void) pv_time_ops.steal_clock = kvm_steal_clock; } - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) + if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && + !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -633,7 +634,8 @@ static __init int kvm_setup_pv_tlb_flush(void) { int cpu; - if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH)) { + if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && + !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); -- cgit v1.2.1 From 3e233385ef4a217a2812115ed84d4be36eb16817 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Fri, 23 Feb 2018 12:36:50 -0600 Subject: KVM: SVM: no need to call access_ok() in LAUNCH_MEASURE command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Using the access_ok() to validate the input before issuing the SEV command does not buy us anything in this case. If userland is giving us a garbage pointer then copy_to_user() will catch it when we try to return the measurement. Suggested-by: Al Viro Fixes: 0d0736f76347 (KVM: SVM: Add support for KVM_SEV_LAUNCH_MEASURE ...) Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Borislav Petkov Cc: Tom Lendacky Cc: linux-kernel@vger.kernel.org Cc: Joerg Roedel Signed-off-by: Brijesh Singh Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index b3e488a74828..ca69d53d7e6d 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6236,16 +6236,18 @@ e_free: static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) { + void __user *measure = (void __user *)(uintptr_t)argp->data; struct kvm_sev_info *sev = &kvm->arch.sev_info; struct sev_data_launch_measure *data; struct kvm_sev_launch_measure params; + void __user *p = NULL; void *blob = NULL; int ret; if (!sev_guest(kvm)) return -ENOTTY; - if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params))) + if (copy_from_user(¶ms, measure, sizeof(params))) return -EFAULT; data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -6256,17 +6258,13 @@ static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp) if (!params.len) goto cmd; - if (params.uaddr) { + p = (void __user *)(uintptr_t)params.uaddr; + if (p) { if (params.len > SEV_FW_BLOB_MAX_SIZE) { ret = -EINVAL; goto e_free; } - if (!access_ok(VERIFY_WRITE, params.uaddr, params.len)) { - ret = -EFAULT; - goto e_free; - } - ret = -ENOMEM; blob = kmalloc(params.len, GFP_KERNEL); if (!blob) @@ -6290,13 +6288,13 @@ cmd: goto e_free_blob; if (blob) { - if (copy_to_user((void __user *)(uintptr_t)params.uaddr, blob, params.len)) + if (copy_to_user(p, blob, params.len)) ret = -EFAULT; } done: params.len = data->len; - if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) + if (copy_to_user(measure, ¶ms, sizeof(params))) ret = -EFAULT; e_free_blob: kfree(blob); -- cgit v1.2.1 From 7607b7174405aec7441ff6c970833c463114040a Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 19 Feb 2018 10:14:44 -0600 Subject: KVM: SVM: install RSM intercept MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RSM instruction is used by the SMM handler to return from SMM mode. Currently, rsm causes a #UD - which results in instruction fetch, decode, and emulate. By installing the RSM intercept we can avoid the instruction fetch since we know that #VMEXIT was due to rsm. The patch is required for the SEV guest, because in case of SEV guest memory is encrypted with guest-specific key and hypervisor will not able to fetch the instruction bytes from the guest memory. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Joerg Roedel Cc: Borislav Petkov Cc: Tom Lendacky Signed-off-by: Brijesh Singh Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index ca69d53d7e6d..4aeb665ffbb0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -300,6 +300,8 @@ module_param(vgif, int, 0444); static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT); module_param(sev, int, 0444); +static u8 rsm_ins_bytes[] = "\x0f\xaa"; + static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa); static void svm_complete_interrupts(struct vcpu_svm *svm); @@ -1383,6 +1385,7 @@ static void init_vmcb(struct vcpu_svm *svm) set_intercept(svm, INTERCEPT_SKINIT); set_intercept(svm, INTERCEPT_WBINVD); set_intercept(svm, INTERCEPT_XSETBV); + set_intercept(svm, INTERCEPT_RSM); if (!kvm_mwait_in_guest()) { set_intercept(svm, INTERCEPT_MONITOR); @@ -3699,6 +3702,12 @@ static int emulate_on_interception(struct vcpu_svm *svm) return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE; } +static int rsm_interception(struct vcpu_svm *svm) +{ + return x86_emulate_instruction(&svm->vcpu, 0, 0, + rsm_ins_bytes, 2) == EMULATE_DONE; +} + static int rdpmc_interception(struct vcpu_svm *svm) { int err; @@ -4541,7 +4550,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_MWAIT] = mwait_interception, [SVM_EXIT_XSETBV] = xsetbv_interception, [SVM_EXIT_NPF] = npf_interception, - [SVM_EXIT_RSM] = emulate_on_interception, + [SVM_EXIT_RSM] = rsm_interception, [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, }; -- cgit v1.2.1 From 9c5e0afaf15788bcbd1c3469da701ac3da826886 Mon Sep 17 00:00:00 2001 From: Brijesh Singh Date: Mon, 19 Feb 2018 10:13:25 -0600 Subject: KVM: SVM: Fix SEV LAUNCH_SECRET command MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The SEV LAUNCH_SECRET command fails with error code 'invalid param' because we missed filling the guest and header system physical address while issuing the command. Fixes: 9f5b5b950aa9 (KVM: SVM: Add support for SEV LAUNCH_SECRET command) Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Borislav Petkov Cc: Tom Lendacky Cc: linux-kernel@vger.kernel.org Cc: Joerg Roedel Signed-off-by: Brijesh Singh Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 4aeb665ffbb0..3d8377f75eda 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -6604,7 +6604,7 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) struct page **pages; void *blob, *hdr; unsigned long n; - int ret; + int ret, offset; if (!sev_guest(kvm)) return -ENOTTY; @@ -6630,6 +6630,10 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) if (!data) goto e_unpin_memory; + offset = params.guest_uaddr & (PAGE_SIZE - 1); + data->guest_address = __sme_page_pa(pages[0]) + offset; + data->guest_len = params.guest_len; + blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len); if (IS_ERR(blob)) { ret = PTR_ERR(blob); @@ -6644,8 +6648,8 @@ static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp) ret = PTR_ERR(hdr); goto e_free_blob; } - data->trans_address = __psp_pa(blob); - data->trans_len = params.trans_len; + data->hdr_address = __psp_pa(hdr); + data->hdr_len = params.hdr_len; data->handle = sev->handle; ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error); -- cgit v1.2.1 From 038bac2b02989acf1fc938cedcb7944c02672b9f Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 19 Feb 2018 11:09:05 +0100 Subject: x86/acpi: Add a new x86_init_acpi structure to x86_init_ops Add a new struct x86_init_acpi to x86_init_ops. For now it contains only one init function to get the RSDP table address. Signed-off-by: Juergen Gross Reviewed-by: Andy Shevchenko Acked-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Cc: Borislav Petkov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: boris.ostrovsky@oracle.com Cc: lenb@kernel.org Cc: linux-acpi@vger.kernel.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20180219100906.14265-3-jgross@suse.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/acpi.h | 7 +++++++ arch/x86/include/asm/x86_init.h | 9 +++++++++ arch/x86/kernel/x86_init.c | 5 +++++ 3 files changed, 21 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 11881726ed37..6609dd7289b5 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -31,6 +31,7 @@ #include #include #include +#include #ifdef CONFIG_ACPI_APEI # include @@ -133,6 +134,12 @@ static inline bool acpi_has_cpu_in_madt(void) return !!acpi_lapic; } +#define ACPI_HAVE_ARCH_GET_ROOT_POINTER +static inline u64 acpi_arch_get_root_pointer(void) +{ + return x86_init.acpi.get_root_pointer(); +} + #else /* !CONFIG_ACPI */ #define acpi_lapic 0 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index fc2f082ac635..2e2c34d2bb00 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -130,6 +130,14 @@ struct x86_hyper_init { void (*init_mem_mapping)(void); }; +/** + * struct x86_init_acpi - x86 ACPI init functions + * @get_root_pointer: get RSDP address + */ +struct x86_init_acpi { + u64 (*get_root_pointer)(void); +}; + /** * struct x86_init_ops - functions for platform specific setup * @@ -144,6 +152,7 @@ struct x86_init_ops { struct x86_init_iommu iommu; struct x86_init_pci pci; struct x86_hyper_init hyper; + struct x86_init_acpi acpi; }; /** diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 1151ccd72ce9..9e4e994a4836 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -30,6 +30,7 @@ int __init iommu_init_noop(void) { return 0; } void iommu_shutdown_noop(void) { } bool __init bool_x86_init_noop(void) { return false; } void x86_op_int_noop(int cpu) { } +u64 u64_x86_init_noop(void) { return 0; } /* * The platform setup functions are preset with the default functions @@ -91,6 +92,10 @@ struct x86_init_ops x86_init __initdata = { .x2apic_available = bool_x86_init_noop, .init_mem_mapping = x86_init_noop, }, + + .acpi = { + .get_root_pointer = u64_x86_init_noop, + }, }; struct x86_cpuinit_ops x86_cpuinit = { -- cgit v1.2.1 From b17d9d1df3c33a4f1d2bf397e2257aecf9dc56d4 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 19 Feb 2018 11:09:06 +0100 Subject: x86/xen: Add pvh specific rsdp address retrieval function Add pvh_get_root_pointer() for Xen PVH guests to communicate the address of the RSDP table given to the kernel via Xen start info. This makes the kernel boot again in PVH mode after on recent Xen the RSDP was moved to higher addresses. So up to that change it was pure luck that the legacy method to locate the RSDP was working when running as PVH mode. Signed-off-by: Juergen Gross Reviewed-by: Andy Shevchenko Acked-by: Thomas Gleixner Acked-by: Rafael J. Wysocki Cc: Borislav Petkov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Kees Cook Cc: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: boris.ostrovsky@oracle.com Cc: lenb@kernel.org Cc: linux-acpi@vger.kernel.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20180219100906.14265-4-jgross@suse.com Signed-off-by: Ingo Molnar --- arch/x86/xen/enlighten_pvh.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index 436c4f003e17..aa1c6a6831a9 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -6,6 +6,7 @@ #include #include #include +#include #include #include @@ -16,15 +17,20 @@ /* * PVH variables. * - * xen_pvh and pvh_bootparams need to live in data segment since they - * are used after startup_{32|64}, which clear .bss, are invoked. + * xen_pvh pvh_bootparams and pvh_start_info need to live in data segment + * since they are used after startup_{32|64}, which clear .bss, are invoked. */ bool xen_pvh __attribute__((section(".data"))) = 0; struct boot_params pvh_bootparams __attribute__((section(".data"))); +struct hvm_start_info pvh_start_info __attribute__((section(".data"))); -struct hvm_start_info pvh_start_info; unsigned int pvh_start_info_sz = sizeof(pvh_start_info); +static u64 pvh_get_root_pointer(void) +{ + return pvh_start_info.rsdp_paddr; +} + static void __init init_pvh_bootparams(void) { struct xen_memory_map memmap; @@ -71,6 +77,8 @@ static void __init init_pvh_bootparams(void) */ pvh_bootparams.hdr.version = 0x212; pvh_bootparams.hdr.type_of_loader = (9 << 4) | 0; /* Xen loader */ + + x86_init.acpi.get_root_pointer = pvh_get_root_pointer; } /* -- cgit v1.2.1 From c46dacb75cd59a50a2380dcba5e7edf4fde86845 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Wed, 21 Feb 2018 10:42:32 +0100 Subject: x86/boot: Make the x86_init noop functions static Make the noop functions in x86_init.c static in case they are used locally only. Reported-by: kbuild test robot Signed-off-by: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180221094232.23462-1-jgross@suse.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/x86_init.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 9e4e994a4836..b8cff22a8785 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -26,11 +26,11 @@ void x86_init_noop(void) { } void __init x86_init_uint_noop(unsigned int unused) { } -int __init iommu_init_noop(void) { return 0; } -void iommu_shutdown_noop(void) { } -bool __init bool_x86_init_noop(void) { return false; } -void x86_op_int_noop(int cpu) { } -u64 u64_x86_init_noop(void) { return 0; } +static int __init iommu_init_noop(void) { return 0; } +static void iommu_shutdown_noop(void) { } +static bool __init bool_x86_init_noop(void) { return false; } +static void x86_op_int_noop(int cpu) { } +static u64 u64_x86_init_noop(void) { return 0; } /* * The platform setup functions are preset with the default functions -- cgit v1.2.1 From 672c0ae09b33a11d8f31fc61526632e96301164c Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Fri, 23 Feb 2018 01:27:37 -0700 Subject: x86/mm: Consider effective protection attributes in W+X check Using just the leaf page table entry flags would cause a false warning in case _PAGE_RW is clear or _PAGE_NX is set in a higher level entry. Hand through both the current entry's flags as well as the accumulated effective value (the latter as pgprotval_t instead of pgprot_t, as it's not an actual entry's value). This in particular eliminates the false W+X warning when running under Xen, as commit: 2cc42bac1c ("x86-64/Xen: eliminate W+X mappings") had to make the necessary adjustment in L2 rather than L1 (the reason is explained there). I.e. _PAGE_RW is clear there in L1, but _PAGE_NX is set in L2. Signed-off-by: Jan Beulich Reviewed-by: Juergen Gross Cc: Alexander Potapenko Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Boris Ostrovsky Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: Dmitry Vyukov Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/5A8FDE8902000078001AABBB@prv-mh.provo.novell.com Signed-off-by: Ingo Molnar --- arch/x86/mm/dump_pagetables.c | 94 ++++++++++++++++++++++++++----------------- 1 file changed, 58 insertions(+), 36 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 0d6d67d18ad6..62a7e9f65dec 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -29,6 +29,7 @@ struct pg_state { int level; pgprot_t current_prot; + pgprotval_t effective_prot; unsigned long start_address; unsigned long current_address; const struct addr_marker *marker; @@ -235,9 +236,9 @@ static unsigned long normalize_addr(unsigned long u) * print what we collected so far. */ static void note_page(struct seq_file *m, struct pg_state *st, - pgprot_t new_prot, int level) + pgprot_t new_prot, pgprotval_t new_eff, int level) { - pgprotval_t prot, cur; + pgprotval_t prot, cur, eff; static const char units[] = "BKMGTPE"; /* @@ -247,23 +248,24 @@ static void note_page(struct seq_file *m, struct pg_state *st, */ prot = pgprot_val(new_prot); cur = pgprot_val(st->current_prot); + eff = st->effective_prot; if (!st->level) { /* First entry */ st->current_prot = new_prot; + st->effective_prot = new_eff; st->level = level; st->marker = address_markers; st->lines = 0; pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n", st->marker->name); - } else if (prot != cur || level != st->level || + } else if (prot != cur || new_eff != eff || level != st->level || st->current_address >= st->marker[1].start_address) { const char *unit = units; unsigned long delta; int width = sizeof(unsigned long) * 2; - pgprotval_t pr = pgprot_val(st->current_prot); - if (st->check_wx && (pr & _PAGE_RW) && !(pr & _PAGE_NX)) { + if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX)) { WARN_ONCE(1, "x86/mm: Found insecure W+X mapping at address %p/%pS\n", (void *)st->start_address, @@ -317,21 +319,30 @@ static void note_page(struct seq_file *m, struct pg_state *st, st->start_address = st->current_address; st->current_prot = new_prot; + st->effective_prot = new_eff; st->level = level; } } -static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, unsigned long P) +static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2) +{ + return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) | + ((prot1 | prot2) & _PAGE_NX); +} + +static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, + pgprotval_t eff_in, unsigned long P) { int i; pte_t *start; - pgprotval_t prot; + pgprotval_t prot, eff; start = (pte_t *)pmd_page_vaddr(addr); for (i = 0; i < PTRS_PER_PTE; i++) { prot = pte_flags(*start); + eff = effective_prot(eff_in, prot); st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); - note_page(m, st, __pgprot(prot), 5); + note_page(m, st, __pgprot(prot), eff, 5); start++; } } @@ -351,7 +362,7 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, (pgtable_l5_enabled && __pa(pt) == __pa(kasan_zero_p4d)) || __pa(pt) == __pa(kasan_zero_pud)) { pgprotval_t prot = pte_flags(kasan_zero_pte[0]); - note_page(m, st, __pgprot(prot), 5); + note_page(m, st, __pgprot(prot), 0, 5); return true; } return false; @@ -366,42 +377,45 @@ static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st, #if PTRS_PER_PMD > 1 -static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, unsigned long P) +static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr, + pgprotval_t eff_in, unsigned long P) { int i; pmd_t *start, *pmd_start; - pgprotval_t prot; + pgprotval_t prot, eff; pmd_start = start = (pmd_t *)pud_page_vaddr(addr); for (i = 0; i < PTRS_PER_PMD; i++) { st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT); if (!pmd_none(*start)) { + prot = pmd_flags(*start); + eff = effective_prot(eff_in, prot); if (pmd_large(*start) || !pmd_present(*start)) { - prot = pmd_flags(*start); - note_page(m, st, __pgprot(prot), 4); + note_page(m, st, __pgprot(prot), eff, 4); } else if (!kasan_page_table(m, st, pmd_start)) { - walk_pte_level(m, st, *start, + walk_pte_level(m, st, *start, eff, P + i * PMD_LEVEL_MULT); } } else - note_page(m, st, __pgprot(0), 4); + note_page(m, st, __pgprot(0), 0, 4); start++; } } #else -#define walk_pmd_level(m,s,a,p) walk_pte_level(m,s,__pmd(pud_val(a)),p) +#define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p) #define pud_large(a) pmd_large(__pmd(pud_val(a))) #define pud_none(a) pmd_none(__pmd(pud_val(a))) #endif #if PTRS_PER_PUD > 1 -static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, unsigned long P) +static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, + pgprotval_t eff_in, unsigned long P) { int i; pud_t *start, *pud_start; - pgprotval_t prot; + pgprotval_t prot, eff; pud_t *prev_pud = NULL; pud_start = start = (pud_t *)p4d_page_vaddr(addr); @@ -409,15 +423,16 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, for (i = 0; i < PTRS_PER_PUD; i++) { st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT); if (!pud_none(*start)) { + prot = pud_flags(*start); + eff = effective_prot(eff_in, prot); if (pud_large(*start) || !pud_present(*start)) { - prot = pud_flags(*start); - note_page(m, st, __pgprot(prot), 3); + note_page(m, st, __pgprot(prot), eff, 3); } else if (!kasan_page_table(m, st, pud_start)) { - walk_pmd_level(m, st, *start, + walk_pmd_level(m, st, *start, eff, P + i * PUD_LEVEL_MULT); } } else - note_page(m, st, __pgprot(0), 3); + note_page(m, st, __pgprot(0), 0, 3); prev_pud = start; start++; @@ -425,34 +440,36 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr, } #else -#define walk_pud_level(m,s,a,p) walk_pmd_level(m,s,__pud(p4d_val(a)),p) +#define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p) #define p4d_large(a) pud_large(__pud(p4d_val(a))) #define p4d_none(a) pud_none(__pud(p4d_val(a))) #endif -static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, unsigned long P) +static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr, + pgprotval_t eff_in, unsigned long P) { int i; p4d_t *start, *p4d_start; - pgprotval_t prot; + pgprotval_t prot, eff; if (PTRS_PER_P4D == 1) - return walk_pud_level(m, st, __p4d(pgd_val(addr)), P); + return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P); p4d_start = start = (p4d_t *)pgd_page_vaddr(addr); for (i = 0; i < PTRS_PER_P4D; i++) { st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT); if (!p4d_none(*start)) { + prot = p4d_flags(*start); + eff = effective_prot(eff_in, prot); if (p4d_large(*start) || !p4d_present(*start)) { - prot = p4d_flags(*start); - note_page(m, st, __pgprot(prot), 2); + note_page(m, st, __pgprot(prot), eff, 2); } else if (!kasan_page_table(m, st, p4d_start)) { - walk_pud_level(m, st, *start, + walk_pud_level(m, st, *start, eff, P + i * P4D_LEVEL_MULT); } } else - note_page(m, st, __pgprot(0), 2); + note_page(m, st, __pgprot(0), 0, 2); start++; } @@ -483,7 +500,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, #else pgd_t *start = swapper_pg_dir; #endif - pgprotval_t prot; + pgprotval_t prot, eff; int i; struct pg_state st = {}; @@ -499,15 +516,20 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, for (i = 0; i < PTRS_PER_PGD; i++) { st.current_address = normalize_addr(i * PGD_LEVEL_MULT); if (!pgd_none(*start) && !is_hypervisor_range(i)) { + prot = pgd_flags(*start); +#ifdef CONFIG_X86_PAE + eff = _PAGE_USER | _PAGE_RW; +#else + eff = prot; +#endif if (pgd_large(*start) || !pgd_present(*start)) { - prot = pgd_flags(*start); - note_page(m, &st, __pgprot(prot), 1); + note_page(m, &st, __pgprot(prot), eff, 1); } else { - walk_p4d_level(m, &st, *start, + walk_p4d_level(m, &st, *start, eff, i * PGD_LEVEL_MULT); } } else - note_page(m, &st, __pgprot(0), 1); + note_page(m, &st, __pgprot(0), 0, 1); cond_resched(); start++; @@ -515,7 +537,7 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd, /* Flush out the last page */ st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT); - note_page(m, &st, __pgprot(0), 0); + note_page(m, &st, __pgprot(0), 0, 0); if (!checkwx) return; if (st.wx_pages) -- cgit v1.2.1 From ef61f8a340fd6d49df6b367785743febc47320c1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= Date: Sat, 3 Feb 2018 00:10:20 +0100 Subject: x86/boot/e820: Implement a range manipulation operator MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a more versatile memmap= operator, which -- in addition to all the things that were possible before -- allows you to: - redeclare existing ranges -- before, you were limited to adding ranges; - drop any range -- like a mem= for any location; - use any e820 memory type -- not just some predefined ones. The syntax is: memmap=%-+ Size and offset work as usual. The "-" and "+" are optional and their existence determine the behavior: The command works on the specified range of memory limited to type (if specified). This memory is then configured to show up as . If is not specified, the memory is removed from the e820 map. Signed-off-by: Jan H. Schönherr Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180202231020.15608-1-jschoenh@amazon.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/e820.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c index 71c11ad5643e..6a2cb1442e05 100644 --- a/arch/x86/kernel/e820.c +++ b/arch/x86/kernel/e820.c @@ -924,6 +924,24 @@ static int __init parse_memmap_one(char *p) } else if (*p == '!') { start_at = memparse(p+1, &p); e820__range_add(start_at, mem_size, E820_TYPE_PRAM); + } else if (*p == '%') { + enum e820_type from = 0, to = 0; + + start_at = memparse(p + 1, &p); + if (*p == '-') + from = simple_strtoull(p + 1, &p, 0); + if (*p == '+') + to = simple_strtoull(p + 1, &p, 0); + if (*p != '\0') + return -EINVAL; + if (from && to) + e820__range_update(start_at, mem_size, from, to); + else if (to) + e820__range_add(start_at, mem_size, to); + else if (from) + e820__range_remove(start_at, mem_size, from, 1); + else + e820__range_remove(start_at, mem_size, 0, 0); } else { e820__range_remove(mem_size, ULLONG_MAX - mem_size, E820_TYPE_RAM, 1); } -- cgit v1.2.1 From cb097be7036aa325adba33d8c41fe77b980b0e77 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Sun, 25 Feb 2018 08:50:56 -0800 Subject: x86/refcounts: Switch to UD2 for exceptions As done in commit 3b3a371cc9bc ("x86/debug: Use UD2 for WARN()"), this switches to UD2 from UD0 to keep disassembly readable. Signed-off-by: Kees Cook Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/20180225165056.GA11719@beast --- arch/x86/include/asm/refcount.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/refcount.h b/arch/x86/include/asm/refcount.h index d65171120e90..4cf11d88d3b3 100644 --- a/arch/x86/include/asm/refcount.h +++ b/arch/x86/include/asm/refcount.h @@ -17,7 +17,7 @@ #define _REFCOUNT_EXCEPTION \ ".pushsection .text..refcount\n" \ "111:\tlea %[counter], %%" _ASM_CX "\n" \ - "112:\t" ASM_UD0 "\n" \ + "112:\t" ASM_UD2 "\n" \ ASM_UNREACHABLE \ ".popsection\n" \ "113:\n" \ -- cgit v1.2.1 From a368d7fd2a3c6babb852fe974018dd97916bcd3b Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Feb 2018 04:11:21 -0700 Subject: x86/entry/64: Add instruction suffix Omitting suffixes from instructions in AT&T mode is bad practice when operand size cannot be determined by the assembler from register operands, and is likely going to be warned about by upstream gas in the future (mine does already). Add the single missing suffix here. Signed-off-by: Jan Beulich Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/5A93F96902000078001ABAC8@prv-mh.provo.novell.com --- arch/x86/entry/entry_64.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index d5c7f18f79ac..805f52703ee3 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -55,7 +55,7 @@ END(native_usergs_sysret64) .macro TRACE_IRQS_FLAGS flags:req #ifdef CONFIG_TRACE_IRQFLAGS - bt $9, \flags /* interrupts off? */ + btl $9, \flags /* interrupts off? */ jnc 1f TRACE_IRQS_ON 1: -- cgit v1.2.1 From 22636f8c9511245cb3c8412039f1dd95afb3aa59 Mon Sep 17 00:00:00 2001 From: Jan Beulich Date: Mon, 26 Feb 2018 04:11:51 -0700 Subject: x86/asm: Add instruction suffixes to bitops Omitting suffixes from instructions in AT&T mode is bad practice when operand size cannot be determined by the assembler from register operands, and is likely going to be warned about by upstream gas in the future (mine does already). Add the missing suffixes here. Note that for 64-bit this means some operations change from being 32-bit to 64-bit. Signed-off-by: Jan Beulich Signed-off-by: Thomas Gleixner Link: https://lkml.kernel.org/r/5A93F98702000078001ABACC@prv-mh.provo.novell.com --- arch/x86/include/asm/bitops.h | 29 ++++++++++++++++------------- arch/x86/include/asm/percpu.h | 2 +- 2 files changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index 3fa039855b8f..9f645ba57dbb 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -78,7 +78,7 @@ set_bit(long nr, volatile unsigned long *addr) : "iq" ((u8)CONST_MASK(nr)) : "memory"); } else { - asm volatile(LOCK_PREFIX "bts %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(bts) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr) : "memory"); } } @@ -94,7 +94,7 @@ set_bit(long nr, volatile unsigned long *addr) */ static __always_inline void __set_bit(long nr, volatile unsigned long *addr) { - asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory"); + asm volatile(__ASM_SIZE(bts) " %1,%0" : ADDR : "Ir" (nr) : "memory"); } /** @@ -115,7 +115,7 @@ clear_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)~CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX "btr %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(btr) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -137,7 +137,7 @@ static __always_inline void clear_bit_unlock(long nr, volatile unsigned long *ad static __always_inline void __clear_bit(long nr, volatile unsigned long *addr) { - asm volatile("btr %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btr) " %1,%0" : ADDR : "Ir" (nr)); } static __always_inline bool clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr) @@ -182,7 +182,7 @@ static __always_inline void __clear_bit_unlock(long nr, volatile unsigned long * */ static __always_inline void __change_bit(long nr, volatile unsigned long *addr) { - asm volatile("btc %1,%0" : ADDR : "Ir" (nr)); + asm volatile(__ASM_SIZE(btc) " %1,%0" : ADDR : "Ir" (nr)); } /** @@ -201,7 +201,7 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) : CONST_MASK_ADDR(nr, addr) : "iq" ((u8)CONST_MASK(nr))); } else { - asm volatile(LOCK_PREFIX "btc %1,%0" + asm volatile(LOCK_PREFIX __ASM_SIZE(btc) " %1,%0" : BITOP_ADDR(addr) : "Ir" (nr)); } @@ -217,7 +217,8 @@ static __always_inline void change_bit(long nr, volatile unsigned long *addr) */ static __always_inline bool test_and_set_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "bts", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(bts), + *addr, "Ir", nr, "%0", c); } /** @@ -246,7 +247,7 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * { bool oldbit; - asm("bts %2,%1" + asm(__ASM_SIZE(bts) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -263,7 +264,8 @@ static __always_inline bool __test_and_set_bit(long nr, volatile unsigned long * */ static __always_inline bool test_and_clear_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btr", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btr), + *addr, "Ir", nr, "%0", c); } /** @@ -286,7 +288,7 @@ static __always_inline bool __test_and_clear_bit(long nr, volatile unsigned long { bool oldbit; - asm volatile("btr %2,%1" + asm volatile(__ASM_SIZE(btr) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr)); @@ -298,7 +300,7 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon { bool oldbit; - asm volatile("btc %2,%1" + asm volatile(__ASM_SIZE(btc) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit), ADDR : "Ir" (nr) : "memory"); @@ -316,7 +318,8 @@ static __always_inline bool __test_and_change_bit(long nr, volatile unsigned lon */ static __always_inline bool test_and_change_bit(long nr, volatile unsigned long *addr) { - GEN_BINARY_RMWcc(LOCK_PREFIX "btc", *addr, "Ir", nr, "%0", c); + GEN_BINARY_RMWcc(LOCK_PREFIX __ASM_SIZE(btc), + *addr, "Ir", nr, "%0", c); } static __always_inline bool constant_test_bit(long nr, const volatile unsigned long *addr) @@ -329,7 +332,7 @@ static __always_inline bool variable_test_bit(long nr, volatile const unsigned l { bool oldbit; - asm volatile("bt %2,%1" + asm volatile(__ASM_SIZE(bt) " %2,%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long *)addr), "Ir" (nr)); diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h index ba3c523aaf16..a06b07399d17 100644 --- a/arch/x86/include/asm/percpu.h +++ b/arch/x86/include/asm/percpu.h @@ -526,7 +526,7 @@ static inline bool x86_this_cpu_variable_test_bit(int nr, { bool oldbit; - asm volatile("bt "__percpu_arg(2)",%1" + asm volatile("btl "__percpu_arg(2)",%1" CC_SET(c) : CC_OUT(c) (oldbit) : "m" (*(unsigned long __percpu *)addr), "Ir" (nr)); -- cgit v1.2.1 From ae8d1d0061ad7996c2c5e769e809a593544fa145 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 26 Feb 2018 17:25:54 -0600 Subject: x86/mm/sme: Disable stack protection for mem_encrypt_identity.c Stack protection is not compatible with early boot code. All of the early SME boot code is now isolated in a separate file, mem_encrypt_identity.c, so arch/x86/mm/Makefile can be updated to turn off stack protection for the entire file. This eliminates the need to worry about other functions within the file being instrumented with stack protection (as was seen when a newer version of GCC instrumented sme_encrypt_kernel() where an older version hadn't). It also allows removal of the __nostackprotector attribute from individual functions. Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Acked-by: Kirill A. Shutemov Cc: Linus Torvalds Cc: Borislav Petkov Link: https://lkml.kernel.org/r/20180226232554.14108.16881.stgit@tlendack-t1.amdoffice.net --- arch/x86/mm/Makefile | 1 + arch/x86/mm/mem_encrypt_identity.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 03c6c8561623..4b101dd6e52f 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile @@ -19,6 +19,7 @@ obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ nostackp := $(call cc-option, -fno-stack-protector) CFLAGS_physaddr.o := $(nostackp) CFLAGS_setup_nx.o := $(nostackp) +CFLAGS_mem_encrypt_identity.o := $(nostackp) CFLAGS_fault.o := -I$(src)/../include/asm/trace diff --git a/arch/x86/mm/mem_encrypt_identity.c b/arch/x86/mm/mem_encrypt_identity.c index b4139c5ab972..1b2197d13832 100644 --- a/arch/x86/mm/mem_encrypt_identity.c +++ b/arch/x86/mm/mem_encrypt_identity.c @@ -266,7 +266,7 @@ static unsigned long __init sme_pgtable_calc(unsigned long len) return entries + tables; } -void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) +void __init sme_encrypt_kernel(struct boot_params *bp) { unsigned long workarea_start, workarea_end, workarea_len; unsigned long execute_start, execute_end, execute_len; @@ -468,7 +468,7 @@ void __init __nostackprotector sme_encrypt_kernel(struct boot_params *bp) native_write_cr3(__native_read_cr3()); } -void __init __nostackprotector sme_enable(struct boot_params *bp) +void __init sme_enable(struct boot_params *bp) { const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off; unsigned int eax, ebx, ecx, edx; -- cgit v1.2.1 From 028091f82eefd5e84f81cef81a7673016ecbe78b Mon Sep 17 00:00:00 2001 From: Sebastian Panceac Date: Wed, 28 Feb 2018 11:40:49 +0200 Subject: x86/platform/intel-mid: Handle Intel Edison reboot correctly When the Intel Edison module is powered with 3.3V, the reboot command makes the module stuck. If the module is powered at a greater voltage, like 4.4V (as the Edison Mini Breakout board does), reboot works OK. The official Intel Edison BSP sends the IPCMSG_COLD_RESET message to the SCU by default. The IPCMSG_COLD_BOOT which is used by the upstream kernel is only sent when explicitely selected on the kernel command line. Use IPCMSG_COLD_RESET unconditionally which makes reboot work independent of the power supply voltage. [ tglx: Massaged changelog ] Fixes: bda7b072de99 ("x86/platform/intel-mid: Implement power off sequence") Signed-off-by: Sebastian Panceac Signed-off-by: Thomas Gleixner Acked-by: Andy Shevchenko Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/1519810849-15131-1-git-send-email-sebastian@resin.io --- arch/x86/platform/intel-mid/intel-mid.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 2c67bae6bb53..fb1df9488e98 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -79,7 +79,7 @@ static void intel_mid_power_off(void) static void intel_mid_reboot(void) { - intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0); + intel_scu_ipc_simple_command(IPCMSG_COLD_RESET, 0); } static unsigned long __init intel_mid_calibrate_tsc(void) -- cgit v1.2.1 From 47b02f4c621c5ae9fd27248dfa9a194bc1387ecb Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Tue, 27 Feb 2018 11:19:22 +0100 Subject: x86/xen: add tty0 and hvc0 as preferred consoles for dom0 Today the tty0 and hvc0 consoles are added as a preferred consoles for pv domUs only. As this requires a boot parameter for getting dom0 messages per default, add them for dom0, too. Signed-off-by: Juergen Gross Reviewed-by: Boris Ostrovsky Signed-off-by: Juergen Gross --- arch/x86/xen/enlighten_pv.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index c047f42552e1..3c2c2530737e 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1376,8 +1376,6 @@ asmlinkage __visible void __init xen_start_kernel(void) if (!xen_initial_domain()) { add_preferred_console("xenboot", 0, NULL); - add_preferred_console("tty", 0, NULL); - add_preferred_console("hvc", 0, NULL); if (pci_xen) x86_init.pci.arch_init = pci_xen_init; } else { @@ -1410,6 +1408,10 @@ asmlinkage __visible void __init xen_start_kernel(void) xen_boot_params_init_edd(); } + + add_preferred_console("tty", 0, NULL); + add_preferred_console("hvc", 0, NULL); + #ifdef CONFIG_PCI /* PCI BIOS service won't work from a PV guest. */ pci_probe &= ~PCI_PROBE_BIOS; -- cgit v1.2.1 From 9b217f33017715903d0956dfc58f82d2a2d00e63 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 1 Mar 2018 13:59:28 +0800 Subject: x86/apic: Move pending interrupt check code into it's own function The pending interrupt check code is mixed with the local APIC setup code, that looks messy. Extract the related code, move it into a new function named apic_pending_intr_clear(). Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Cc: bhe@redhat.com Cc: ebiederm@xmission.com Link: https://lkml.kernel.org/r/20180301055930.2396-2-douly.fnst@cn.fujitsu.com --- arch/x86/kernel/apic/apic.c | 100 ++++++++++++++++++++++++-------------------- 1 file changed, 55 insertions(+), 45 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 2ceac9f6c5d7..69d2936e3154 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1408,6 +1408,56 @@ static void lapic_setup_esr(void) oldvalue, value); } +static void apic_pending_intr_clear(void) +{ + long long max_loops = cpu_khz ? cpu_khz : 1000000; + unsigned long long tsc = 0, ntsc; + unsigned int value, queued; + int i, j, acked = 0; + + if (boot_cpu_has(X86_FEATURE_TSC)) + tsc = rdtsc(); + /* + * After a crash, we no longer service the interrupts and a pending + * interrupt from previous kernel might still have ISR bit set. + * + * Most probably by now CPU has serviced that pending interrupt and + * it might not have done the ack_APIC_irq() because it thought, + * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it + * does not clear the ISR bit and cpu thinks it has already serivced + * the interrupt. Hence a vector might get locked. It was noticed + * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. + */ + do { + queued = 0; + for (i = APIC_ISR_NR - 1; i >= 0; i--) + queued |= apic_read(APIC_IRR + i*0x10); + + for (i = APIC_ISR_NR - 1; i >= 0; i--) { + value = apic_read(APIC_ISR + i*0x10); + for (j = 31; j >= 0; j--) { + if (value & (1< 256) { + printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", + acked); + break; + } + if (queued) { + if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { + ntsc = rdtsc(); + max_loops = (cpu_khz << 10) - (ntsc - tsc); + } else + max_loops--; + } + } while (queued && max_loops > 0); + WARN_ON(max_loops <= 0); +} + /** * setup_local_APIC - setup the local APIC * @@ -1417,13 +1467,11 @@ static void lapic_setup_esr(void) static void setup_local_APIC(void) { int cpu = smp_processor_id(); - unsigned int value, queued; - int i, j, acked = 0; - unsigned long long tsc = 0, ntsc; - long long max_loops = cpu_khz ? cpu_khz : 1000000; + unsigned int value; +#ifdef CONFIG_X86_32 + int i; +#endif - if (boot_cpu_has(X86_FEATURE_TSC)) - tsc = rdtsc(); if (disable_apic) { disable_ioapic_support(); @@ -1475,45 +1523,7 @@ static void setup_local_APIC(void) value &= ~APIC_TPRI_MASK; apic_write(APIC_TASKPRI, value); - /* - * After a crash, we no longer service the interrupts and a pending - * interrupt from previous kernel might still have ISR bit set. - * - * Most probably by now CPU has serviced that pending interrupt and - * it might not have done the ack_APIC_irq() because it thought, - * interrupt came from i8259 as ExtInt. LAPIC did not get EOI so it - * does not clear the ISR bit and cpu thinks it has already serivced - * the interrupt. Hence a vector might get locked. It was noticed - * for timer irq (vector 0x31). Issue an extra EOI to clear ISR. - */ - do { - queued = 0; - for (i = APIC_ISR_NR - 1; i >= 0; i--) - queued |= apic_read(APIC_IRR + i*0x10); - - for (i = APIC_ISR_NR - 1; i >= 0; i--) { - value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1< 256) { - printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", - acked); - break; - } - if (queued) { - if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { - ntsc = rdtsc(); - max_loops = (cpu_khz << 10) - (ntsc - tsc); - } else - max_loops--; - } - } while (queued && max_loops > 0); - WARN_ON(max_loops <= 0); + apic_pending_intr_clear(); /* * Now that we are all set up, enable the APIC -- cgit v1.2.1 From 3ea9e7ae1a2038b9fdff729861c9b4af0087024f Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 1 Mar 2018 13:59:29 +0800 Subject: x86/apic: Modernize the pending interrupt code The pending interrupt check code is old, update the following: - Use for_each_set_bit() instead of open coding it - Replace printk() with pr_err() - Get rid of printk line breaks - Make curly braces balanced Suggested-by: Andy Shevchenko Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Cc: bhe@redhat.com Cc: ebiederm@xmission.com Link: https://lkml.kernel.org/r/20180301055930.2396-3-douly.fnst@cn.fujitsu.com --- arch/x86/kernel/apic/apic.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 69d2936e3154..7a347d7450b6 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1412,7 +1412,8 @@ static void apic_pending_intr_clear(void) { long long max_loops = cpu_khz ? cpu_khz : 1000000; unsigned long long tsc = 0, ntsc; - unsigned int value, queued; + unsigned int queued; + unsigned long value; int i, j, acked = 0; if (boot_cpu_has(X86_FEATURE_TSC)) @@ -1435,24 +1436,22 @@ static void apic_pending_intr_clear(void) for (i = APIC_ISR_NR - 1; i >= 0; i--) { value = apic_read(APIC_ISR + i*0x10); - for (j = 31; j >= 0; j--) { - if (value & (1< 256) { - printk(KERN_ERR "LAPIC pending interrupts after %d EOI\n", - acked); + pr_err("LAPIC pending interrupts after %d EOI\n", acked); break; } if (queued) { if (boot_cpu_has(X86_FEATURE_TSC) && cpu_khz) { ntsc = rdtsc(); max_loops = (cpu_khz << 10) - (ntsc - tsc); - } else + } else { max_loops--; + } } } while (queued && max_loops > 0); WARN_ON(max_loops <= 0); -- cgit v1.2.1 From 8f1561680f42a5491b371b513f1ab8197f31fd62 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 1 Mar 2018 13:59:30 +0800 Subject: x86/apic: Drop logical_smp_processor_id() inline The logical_smp_processor_id() inline which is only called in setup_local_APIC() on x86_32 systems has no real value. Drop it and directly use GET_APIC_LOGICAL_ID() at the call site and use a more suitable variable name for readability Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: andy.shevchenko@gmail.com Cc: bhe@redhat.com Cc: ebiederm@xmission.com Link: https://lkml.kernel.org/r/20180301055930.2396-4-douly.fnst@cn.fujitsu.com --- arch/x86/include/asm/smp.h | 10 ---------- arch/x86/kernel/apic/apic.c | 10 +++++----- 2 files changed, 5 insertions(+), 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 461f53d27708..e2057780d67f 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -176,16 +176,6 @@ static inline int wbinvd_on_all_cpus(void) extern unsigned disabled_cpus; #ifdef CONFIG_X86_LOCAL_APIC - -#ifndef CONFIG_X86_64 -static inline int logical_smp_processor_id(void) -{ - /* we don't want to mark this access volatile - bad code generation */ - return GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); -} - -#endif - extern int hard_smp_processor_id(void); #else /* CONFIG_X86_LOCAL_APIC */ diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 7a347d7450b6..ce2066373922 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1468,7 +1468,7 @@ static void setup_local_APIC(void) int cpu = smp_processor_id(); unsigned int value; #ifdef CONFIG_X86_32 - int i; + int logical_apicid, ldr_apicid; #endif @@ -1507,11 +1507,11 @@ static void setup_local_APIC(void) * initialized during get_smp_config(), make sure it matches the * actual value. */ - i = early_per_cpu(x86_cpu_to_logical_apicid, cpu); - WARN_ON(i != BAD_APICID && i != logical_smp_processor_id()); + logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu); + ldr_apicid = GET_APIC_LOGICAL_ID(apic_read(APIC_LDR)); + WARN_ON(logical_apicid != BAD_APICID && logical_apicid != ldr_apicid); /* always use the value from LDR */ - early_per_cpu(x86_cpu_to_logical_apicid, cpu) = - logical_smp_processor_id(); + early_per_cpu(x86_cpu_to_logical_apicid, cpu) = ldr_apicid; #endif /* -- cgit v1.2.1 From 801e459a6f3a63af9d447e6249088c76ae16efc4 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Wed, 21 Feb 2018 13:39:51 -0600 Subject: KVM: x86: Add a framework for supporting MSR-based features MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Provide a new KVM capability that allows bits within MSRs to be recognized as features. Two new ioctls are added to the /dev/kvm ioctl routine to retrieve the list of these MSRs and then retrieve their values. A kvm_x86_ops callback is used to determine support for the listed MSR-based features. Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini [Tweaked documentation. - Radim] Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 2 ++ arch/x86/kvm/svm.c | 6 ++++ arch/x86/kvm/vmx.c | 6 ++++ arch/x86/kvm/x86.c | 75 ++++++++++++++++++++++++++++++++++++++--- 4 files changed, 84 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0a9e330b34f0..bab0694b35c3 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1095,6 +1095,8 @@ struct kvm_x86_ops { int (*mem_enc_op)(struct kvm *kvm, void __user *argp); int (*mem_enc_reg_region)(struct kvm *kvm, struct kvm_enc_region *argp); int (*mem_enc_unreg_region)(struct kvm *kvm, struct kvm_enc_region *argp); + + int (*get_msr_feature)(struct kvm_msr_entry *entry); }; struct kvm_arch_async_pf { diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 3d8377f75eda..d8db947acf70 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3869,6 +3869,11 @@ static int cr8_write_interception(struct vcpu_svm *svm) return 0; } +static int svm_get_msr_feature(struct kvm_msr_entry *msr) +{ + return 1; +} + static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) { struct vcpu_svm *svm = to_svm(vcpu); @@ -6832,6 +6837,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .vcpu_unblocking = svm_vcpu_unblocking, .update_bp_intercept = update_bp_intercept, + .get_msr_feature = svm_get_msr_feature, .get_msr = svm_get_msr, .set_msr = svm_set_msr, .get_segment_base = svm_get_segment_base, diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ec14f2319a87..fafc1f6d8987 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3226,6 +3226,11 @@ static inline bool vmx_feature_control_msr_valid(struct kvm_vcpu *vcpu, return !(val & ~valid_bits); } +static int vmx_get_msr_feature(struct kvm_msr_entry *msr) +{ + return 1; +} + /* * Reads an msr value (of 'msr_index') into 'pdata'. * Returns 0 on success, non-0 otherwise. @@ -12296,6 +12301,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { .vcpu_put = vmx_vcpu_put, .update_bp_intercept = update_exception_bitmap, + .get_msr_feature = vmx_get_msr_feature, .get_msr = vmx_get_msr, .set_msr = vmx_set_msr, .get_segment_base = vmx_get_segment_base, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 96edda878dbf..239fc1fd7845 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1049,6 +1049,28 @@ static u32 emulated_msrs[] = { static unsigned num_emulated_msrs; +/* + * List of msr numbers which are used to expose MSR-based features that + * can be used by a hypervisor to validate requested CPU features. + */ +static u32 msr_based_features[] = { +}; + +static unsigned int num_msr_based_features; + +static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) +{ + struct kvm_msr_entry msr; + + msr.index = index; + if (kvm_x86_ops->get_msr_feature(&msr)) + return 1; + + *data = msr.data; + + return 0; +} + bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer) { if (efer & efer_reserved_bits) @@ -2680,13 +2702,11 @@ static int __msr_io(struct kvm_vcpu *vcpu, struct kvm_msrs *msrs, int (*do_msr)(struct kvm_vcpu *vcpu, unsigned index, u64 *data)) { - int i, idx; + int i; - idx = srcu_read_lock(&vcpu->kvm->srcu); for (i = 0; i < msrs->nmsrs; ++i) if (do_msr(vcpu, entries[i].index, &entries[i].data)) break; - srcu_read_unlock(&vcpu->kvm->srcu, idx); return i; } @@ -2785,6 +2805,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_SET_BOOT_CPU_ID: case KVM_CAP_SPLIT_IRQCHIP: case KVM_CAP_IMMEDIATE_EXIT: + case KVM_CAP_GET_MSR_FEATURES: r = 1; break; case KVM_CAP_ADJUST_CLOCK: @@ -2899,6 +2920,31 @@ long kvm_arch_dev_ioctl(struct file *filp, goto out; r = 0; break; + case KVM_GET_MSR_FEATURE_INDEX_LIST: { + struct kvm_msr_list __user *user_msr_list = argp; + struct kvm_msr_list msr_list; + unsigned int n; + + r = -EFAULT; + if (copy_from_user(&msr_list, user_msr_list, sizeof(msr_list))) + goto out; + n = msr_list.nmsrs; + msr_list.nmsrs = num_msr_based_features; + if (copy_to_user(user_msr_list, &msr_list, sizeof(msr_list))) + goto out; + r = -E2BIG; + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; + if (copy_to_user(user_msr_list->indices, &msr_based_features, + num_msr_based_features * sizeof(u32))) + goto out; + r = 0; + break; + } + case KVM_GET_MSRS: + r = msr_io(NULL, argp, do_get_msr_feature, 1); + break; } default: r = -EINVAL; @@ -3636,12 +3682,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, r = 0; break; } - case KVM_GET_MSRS: + case KVM_GET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_get_msr, 1); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; - case KVM_SET_MSRS: + } + case KVM_SET_MSRS: { + int idx = srcu_read_lock(&vcpu->kvm->srcu); r = msr_io(vcpu, argp, do_set_msr, 0); + srcu_read_unlock(&vcpu->kvm->srcu, idx); break; + } case KVM_TPR_ACCESS_REPORTING: { struct kvm_tpr_access_ctl tac; @@ -4464,6 +4516,19 @@ static void kvm_init_msr_list(void) j++; } num_emulated_msrs = j; + + for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { + struct kvm_msr_entry msr; + + msr.index = msr_based_features[i]; + if (kvm_x86_ops->get_msr_feature(&msr)) + continue; + + if (j < i) + msr_based_features[j] = msr_based_features[i]; + j++; + } + num_msr_based_features = j; } static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, -- cgit v1.2.1 From d1d93fa90f1afa926cb060b7f78ab01a65705b4d Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Sat, 24 Feb 2018 00:18:20 +0100 Subject: KVM: SVM: Add MSR-based feature support for serializing LFENCE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In order to determine if LFENCE is a serializing instruction on AMD processors, MSR 0xc0011029 (MSR_F10H_DECFG) must be read and the state of bit 1 checked. This patch will add support to allow a guest to properly make this determination. Add the MSR feature callback operation to svm.c and add MSR 0xc0011029 to the list of MSR-based features. If LFENCE is serializing, then the feature is supported, allowing the hypervisor to set the value of the MSR that guest will see. Support is also added to write (hypervisor only) and read the MSR value for the guest. A write by the guest will result in a #GP. A read by the guest will return the value as set by the host. In this way, the support to expose the feature to the guest is controlled by the hypervisor. Reviewed-by: Paolo Bonzini Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/svm.c | 36 +++++++++++++++++++++++++++++++++++- arch/x86/kvm/x86.c | 1 + 2 files changed, 36 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d8db947acf70..f874798f8209 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -178,6 +178,8 @@ struct vcpu_svm { uint64_t sysenter_eip; uint64_t tsc_aux; + u64 msr_decfg; + u64 next_rip; u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; @@ -3871,7 +3873,18 @@ static int cr8_write_interception(struct vcpu_svm *svm) static int svm_get_msr_feature(struct kvm_msr_entry *msr) { - return 1; + msr->data = 0; + + switch (msr->index) { + case MSR_F10H_DECFG: + if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) + msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE; + break; + default: + return 1; + } + + return 0; } static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) @@ -3969,6 +3982,9 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0x1E; } break; + case MSR_F10H_DECFG: + msr_info->data = svm->msr_decfg; + break; default: return kvm_get_msr_common(vcpu, msr_info); } @@ -4147,6 +4163,24 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) case MSR_VM_IGNNE: vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data); break; + case MSR_F10H_DECFG: { + struct kvm_msr_entry msr_entry; + + msr_entry.index = msr->index; + if (svm_get_msr_feature(&msr_entry)) + return 1; + + /* Check the supported bits */ + if (data & ~msr_entry.data) + return 1; + + /* Don't allow the guest to change a bit, #GP */ + if (!msr->host_initiated && (data ^ msr_entry.data)) + return 1; + + svm->msr_decfg = data; + break; + } case MSR_IA32_APICBASE: if (kvm_vcpu_apicv_active(vcpu)) avic_update_vapic_bar(to_svm(vcpu), data); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 239fc1fd7845..54b4ed55945b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1054,6 +1054,7 @@ static unsigned num_emulated_msrs; * can be used by a hypervisor to validate requested CPU features. */ static u32 msr_based_features[] = { + MSR_F10H_DECFG, }; static unsigned int num_msr_based_features; -- cgit v1.2.1 From 66421c1ec340096b291af763ed5721314cdd9c5c Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 28 Feb 2018 14:03:30 +0800 Subject: KVM: X86: Introduce kvm_get_msr_feature() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce kvm_get_msr_feature() to handle the msrs which are supported by different vendors and sharing the same emulation logic. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Liran Alon Cc: Nadav Amit Cc: Borislav Petkov Cc: Tom Lendacky Signed-off-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 54b4ed55945b..d97620eeb394 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1059,13 +1059,25 @@ static u32 msr_based_features[] = { static unsigned int num_msr_based_features; +static int kvm_get_msr_feature(struct kvm_msr_entry *msr) +{ + switch (msr->index) { + default: + if (kvm_x86_ops->get_msr_feature(msr)) + return 1; + } + return 0; +} + static int do_get_msr_feature(struct kvm_vcpu *vcpu, unsigned index, u64 *data) { struct kvm_msr_entry msr; + int r; msr.index = index; - if (kvm_x86_ops->get_msr_feature(&msr)) - return 1; + r = kvm_get_msr_feature(&msr); + if (r) + return r; *data = msr.data; @@ -4522,7 +4534,7 @@ static void kvm_init_msr_list(void) struct kvm_msr_entry msr; msr.index = msr_based_features[i]; - if (kvm_x86_ops->get_msr_feature(&msr)) + if (kvm_get_msr_feature(&msr)) continue; if (j < i) -- cgit v1.2.1 From 518e7b94817abed94becfe6a44f1ece0d4745afe Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Wed, 28 Feb 2018 14:03:31 +0800 Subject: KVM: X86: Allow userspace to define the microcode version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Linux (among the others) has checks to make sure that certain features aren't enabled on a certain family/model/stepping if the microcode version isn't greater than or equal to a known good version. By exposing the real microcode version, we're preventing buggy guests that don't check that they are running virtualized (i.e., they should trust the hypervisor) from disabling features that are effectively not buggy. Suggested-by: Filippo Sironi Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Liran Alon Cc: Nadav Amit Cc: Borislav Petkov Cc: Tom Lendacky Signed-off-by: Wanpeng Li Reviewed-by: Paolo Bonzini Signed-off-by: Radim Krčmář --- arch/x86/include/asm/kvm_host.h | 1 + arch/x86/kvm/svm.c | 4 +--- arch/x86/kvm/vmx.c | 1 + arch/x86/kvm/x86.c | 11 +++++++++-- 4 files changed, 12 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bab0694b35c3..b605a5b6a30c 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -507,6 +507,7 @@ struct kvm_vcpu_arch { u64 smi_count; bool tpr_access_reporting; u64 ia32_xss; + u64 microcode_version; /* * Paging state of the vcpu diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index f874798f8209..312f33f4ed36 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1907,6 +1907,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) u32 dummy; u32 eax = 1; + vcpu->arch.microcode_version = 0x01000065; svm->spec_ctrl = 0; if (!init_event) { @@ -3962,9 +3963,6 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = svm->spec_ctrl; break; - case MSR_IA32_UCODE_REV: - msr_info->data = 0x01000065; - break; case MSR_F15H_IC_CFG: { int family, model; diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index fafc1f6d8987..591214843046 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -5771,6 +5771,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) vmx->rmode.vm86_active = 0; vmx->spec_ctrl = 0; + vcpu->arch.microcode_version = 0x100000000ULL; vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val(); kvm_set_cr8(vcpu, 0); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d97620eeb394..11649d290b93 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1055,6 +1055,7 @@ static unsigned num_emulated_msrs; */ static u32 msr_based_features[] = { MSR_F10H_DECFG, + MSR_IA32_UCODE_REV, }; static unsigned int num_msr_based_features; @@ -1062,6 +1063,9 @@ static unsigned int num_msr_based_features; static int kvm_get_msr_feature(struct kvm_msr_entry *msr) { switch (msr->index) { + case MSR_IA32_UCODE_REV: + rdmsrl(msr->index, msr->data); + break; default: if (kvm_x86_ops->get_msr_feature(msr)) return 1; @@ -2257,7 +2261,6 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) switch (msr) { case MSR_AMD64_NB_CFG: - case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_WRITE: case MSR_VM_HSAVE_PA: case MSR_AMD64_PATCH_LOADER: @@ -2265,6 +2268,10 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_AMD64_DC_CFG: break; + case MSR_IA32_UCODE_REV: + if (msr_info->host_initiated) + vcpu->arch.microcode_version = data; + break; case MSR_EFER: return set_efer(vcpu, data); case MSR_K7_HWCR: @@ -2560,7 +2567,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) msr_info->data = 0; break; case MSR_IA32_UCODE_REV: - msr_info->data = 0x100000000ULL; + msr_info->data = vcpu->arch.microcode_version; break; case MSR_MTRRcap: case 0x200 ... 0x2ff: -- cgit v1.2.1 From b7e31be385584afe7f073130e8e570d53c95f7fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Thu, 1 Mar 2018 15:24:25 +0100 Subject: KVM: x86: fix vcpu initialization with userspace lapic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Moving the code around broke this rare configuration. Use this opportunity to finally call lapic reset from vcpu reset. Reported-by: syzbot+fb7a33a4b6c35007a72b@syzkaller.appspotmail.com Suggested-by: Paolo Bonzini Fixes: 0b2e9904c159 ("KVM: x86: move LAPIC initialization after VMCS creation") Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář --- arch/x86/kvm/lapic.c | 10 ++++------ arch/x86/kvm/x86.c | 3 ++- 2 files changed, 6 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cc5fe7a50dde..391dda8d43b7 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -2002,14 +2002,13 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event) { - struct kvm_lapic *apic; + struct kvm_lapic *apic = vcpu->arch.apic; int i; - apic_debug("%s\n", __func__); + if (!apic) + return; - ASSERT(vcpu); - apic = vcpu->arch.apic; - ASSERT(apic != NULL); + apic_debug("%s\n", __func__); /* Stop the timer in case it's a reset to an active apic */ hrtimer_cancel(&apic->lapic_timer.timer); @@ -2568,7 +2567,6 @@ void kvm_apic_accept_events(struct kvm_vcpu *vcpu) pe = xchg(&apic->pending_events, 0); if (test_bit(KVM_APIC_INIT, &pe)) { - kvm_lapic_reset(vcpu, true); kvm_vcpu_reset(vcpu, true); if (kvm_vcpu_is_bsp(apic->vcpu)) vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 11649d290b93..18b5ca7a3197 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8060,7 +8060,6 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) kvm_vcpu_mtrr_init(vcpu); vcpu_load(vcpu); kvm_vcpu_reset(vcpu, false); - kvm_lapic_reset(vcpu, false); kvm_mmu_setup(vcpu); vcpu_put(vcpu); return 0; @@ -8103,6 +8102,8 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) { + kvm_lapic_reset(vcpu, init_event); + vcpu->arch.hflags = 0; vcpu->arch.smi_pending = 0; -- cgit v1.2.1 From 581e929018ce078d0ce0b02780de2f61e858903b Mon Sep 17 00:00:00 2001 From: Frank Rowand Date: Fri, 2 Mar 2018 20:00:31 -0800 Subject: x86: devicetree: fix config option around x86_flattree_get_config() x86_flattree_get_config() is incorrectly protected by ifdef CONFIG_OF_FLATTREE. It uses of_get_flat_dt_size(), which only exists if CONFIG_OF_EARLY_FLATTREE. This issue has not been exposed previously because OF_FLATTREE did not occur unless it was selected by OF_EARLY_FLATTREE. A devicetree overlay change is selecting OF_FLATTREE directly instead of indirectly enabling it by selecting OF_EARLY_FLATTREE. This problem was exposed by a randconfig generated by the kbuild test robot, where Platform OLPC was enabled. OLPC selects OF_PROMTREE instead of OF_EARLY_FLATREE. The only other x86 platform that selects OF is X86_INTEL_CE, which does select OF_EARLY_FLATTREE. Signed-off-by: Frank Rowand Acked-by: Thomas Gleixner --- arch/x86/kernel/devicetree.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 25de5f6ca997..45416826f6ee 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -259,7 +259,7 @@ static void __init dtb_apic_setup(void) dtb_ioapic_setup(); } -#ifdef CONFIG_OF_FLATTREE +#ifdef CONFIG_OF_EARLY_FLATTREE static void __init x86_flattree_get_config(void) { u32 size, map_len; -- cgit v1.2.1 From 317660940fd9dddd3201c2f92e25c27902c753fa Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Fri, 2 Mar 2018 07:22:30 -0800 Subject: perf/x86/intel/uncore: Fix Skylake UPI event format There is no event extension (bit 21) for SKX UPI, so use 'event' instead of 'event_ext'. Reported-by: Stephane Eranian Signed-off-by: Kan Liang Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Fixes: cd34cd97b7b4 ("perf/x86/intel/uncore: Add Skylake server uncore support") Link: http://lkml.kernel.org/r/1520004150-4855-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snbep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 6d8044ab1060..22ec65bc033a 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3606,7 +3606,7 @@ static struct intel_uncore_type skx_uncore_imc = { }; static struct attribute *skx_upi_uncore_formats_attr[] = { - &format_attr_event_ext.attr, + &format_attr_event.attr, &format_attr_umask_ext.attr, &format_attr_edge.attr, &format_attr_inv.attr, -- cgit v1.2.1 From f6a015498dcaee72f80283cb7873d88deb07129c Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Tue, 6 Mar 2018 00:29:17 -0600 Subject: signal/x86: Include the field offsets in the build time checks Due to an oversight when refactoring siginfo_t si_pkey has been in the wrong position since 4.16-rc1. Add an explicit check of the offset of every user space field in siginfo_t and compat_siginfo_t to make a mistake like this hard to make in the future. I have run this code on 4.15 and 4.16-rc1 with the position of si_pkey fixed and all of the fields show up in the same location. Signed-off-by: "Eric W. Biederman" --- arch/x86/kernel/signal_compat.c | 65 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/signal_compat.c b/arch/x86/kernel/signal_compat.c index ac057f9b0763..0d930d8987cc 100644 --- a/arch/x86/kernel/signal_compat.c +++ b/arch/x86/kernel/signal_compat.c @@ -43,6 +43,13 @@ static inline void signal_compat_build_tests(void) BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields) != 3 * sizeof(int)); #define CHECK_CSI_OFFSET(name) BUILD_BUG_ON(_sifields_offset != offsetof(compat_siginfo_t, _sifields.name)) + BUILD_BUG_ON(offsetof(siginfo_t, si_signo) != 0); + BUILD_BUG_ON(offsetof(siginfo_t, si_errno) != 4); + BUILD_BUG_ON(offsetof(siginfo_t, si_code) != 8); + + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_signo) != 0); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_errno) != 4); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_code) != 8); /* * Ensure that the size of each si_field never changes. * If it does, it is a sign that the @@ -63,36 +70,94 @@ static inline void signal_compat_build_tests(void) CHECK_CSI_SIZE (_kill, 2*sizeof(int)); CHECK_SI_SIZE (_kill, 2*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0xC); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); + CHECK_CSI_OFFSET(_timer); CHECK_CSI_SIZE (_timer, 3*sizeof(int)); CHECK_SI_SIZE (_timer, 6*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_tid) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_overrun) != 0x14); + BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_tid) != 0x0C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_overrun) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); + CHECK_CSI_OFFSET(_rt); CHECK_CSI_SIZE (_rt, 3*sizeof(int)); CHECK_SI_SIZE (_rt, 4*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); + BUILD_BUG_ON(offsetof(siginfo_t, si_value) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_value) != 0x14); + CHECK_CSI_OFFSET(_sigchld); CHECK_CSI_SIZE (_sigchld, 5*sizeof(int)); CHECK_SI_SIZE (_sigchld, 8*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_pid) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_uid) != 0x14); + BUILD_BUG_ON(offsetof(siginfo_t, si_status) != 0x18); + BUILD_BUG_ON(offsetof(siginfo_t, si_utime) != 0x20); + BUILD_BUG_ON(offsetof(siginfo_t, si_stime) != 0x28); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pid) != 0x0C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_uid) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_status) != 0x14); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_utime) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_stime) != 0x1C); + #ifdef CONFIG_X86_X32_ABI CHECK_CSI_OFFSET(_sigchld_x32); CHECK_CSI_SIZE (_sigchld_x32, 7*sizeof(int)); /* no _sigchld_x32 in the generic siginfo_t */ + BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._utime) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, _sifields._sigchld_x32._stime) != 0x20); #endif CHECK_CSI_OFFSET(_sigfault); CHECK_CSI_SIZE (_sigfault, 4*sizeof(int)); CHECK_SI_SIZE (_sigfault, 8*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_addr) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr) != 0x0C); + + BUILD_BUG_ON(offsetof(siginfo_t, si_addr_lsb) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_addr_lsb) != 0x10); + + BUILD_BUG_ON(offsetof(siginfo_t, si_lower) != 0x20); + BUILD_BUG_ON(offsetof(siginfo_t, si_upper) != 0x28); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_lower) != 0x14); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_upper) != 0x18); + + BUILD_BUG_ON(offsetof(siginfo_t, si_pkey) != 0x20); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_pkey) != 0x14); + CHECK_CSI_OFFSET(_sigpoll); CHECK_CSI_SIZE (_sigpoll, 2*sizeof(int)); CHECK_SI_SIZE (_sigpoll, 4*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_band) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_fd) != 0x18); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_band) != 0x0C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_fd) != 0x10); + CHECK_CSI_OFFSET(_sigsys); CHECK_CSI_SIZE (_sigsys, 3*sizeof(int)); CHECK_SI_SIZE (_sigsys, 4*sizeof(int)); + BUILD_BUG_ON(offsetof(siginfo_t, si_call_addr) != 0x10); + BUILD_BUG_ON(offsetof(siginfo_t, si_syscall) != 0x18); + BUILD_BUG_ON(offsetof(siginfo_t, si_arch) != 0x1C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_call_addr) != 0x0C); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_syscall) != 0x10); + BUILD_BUG_ON(offsetof(compat_siginfo_t, si_arch) != 0x14); + /* any new si_fields should be added here */ } -- cgit v1.2.1 From 6007b080d2e2adb7af22bf29165f0594ea12b34c Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Wed, 7 Mar 2018 22:10:01 +0100 Subject: bpf, x64: increase number of passes In Cilium some of the main programs we run today are hitting 9 passes on x64's JIT compiler, and we've had cases already where we surpassed the limit where the JIT then punts the program to the interpreter instead, leading to insertion failures due to CONFIG_BPF_JIT_ALWAYS_ON or insertion failures due to the prog array owner being JITed but the program to insert not (both must have the same JITed/non-JITed property). One concrete case the program image shrunk from 12,767 bytes down to 10,288 bytes where the image converged after 16 steps. I've measured that this took 340us in the JIT until it converges on my i7-6600U. Thus, increase the original limit we had from day one where the JIT covered cBPF only back then before we run into the case (as similar with the complexity limit) where we trip over this and hit program rejections. Also add a cond_resched() into the compilation loop, the JIT process runs without any locks and may sleep anyway. Signed-off-by: Daniel Borkmann Acked-by: Alexei Starovoitov Reviewed-by: Eric Dumazet Signed-off-by: Alexei Starovoitov --- arch/x86/net/bpf_jit_comp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 45e4eb5bcbb2..ce5b2ebd5701 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1188,7 +1188,7 @@ skip_init_addrs: * may converge on the last pass. In such case do one more * pass to emit the final image */ - for (pass = 0; pass < 10 || image; pass++) { + for (pass = 0; pass < 20 || image; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); if (proglen <= 0) { image = NULL; @@ -1215,6 +1215,7 @@ skip_init_addrs: } } oldproglen = proglen; + cond_resched(); } if (bpf_jit_enable > 1) -- cgit v1.2.1 From 628df9dc5ad886b0a9b33c75a7b09710eb859ca1 Mon Sep 17 00:00:00 2001 From: Ivan Gorinov Date: Wed, 7 Mar 2018 11:46:29 -0800 Subject: x86/devicetree: Initialize device tree before using it Commit 08d53aa58cb1 added CRC32 calculation in early_init_dt_verify() and checking in late initcall of_fdt_raw_init(), making early_init_dt_verify() mandatory. The required call to early_init_dt_verify() was not added to the x86-specific implementation, causing failure to create the sysfs entry in of_fdt_raw_init(). Fixes: 08d53aa58cb1 ("of/fdt: export fdt blob as /sys/firmware/fdt") Signed-off-by: Ivan Gorinov Signed-off-by: Thomas Gleixner Cc: Mark Rutland Cc: Rob Herring Link: https://lkml.kernel.org/r/c8c7e941efc63b5d25ebf9b6350b0f3df38f6098.1520450752.git.ivan.gorinov@intel.com --- arch/x86/kernel/devicetree.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 25de5f6ca997..63d2ebc21825 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include @@ -270,14 +271,15 @@ static void __init x86_flattree_get_config(void) map_len = max(PAGE_SIZE - (initial_dtb & ~PAGE_MASK), (u64)128); - initial_boot_params = dt = early_memremap(initial_dtb, map_len); - size = of_get_flat_dt_size(); + dt = early_memremap(initial_dtb, map_len); + size = fdt_totalsize(dt); if (map_len < size) { early_memunmap(dt, map_len); - initial_boot_params = dt = early_memremap(initial_dtb, size); + dt = early_memremap(initial_dtb, size); map_len = size; } + early_init_dt_verify(dt); unflatten_and_copy_device_tree(); early_memunmap(dt, map_len); } -- cgit v1.2.1 From 0a5169add90e43ab45ab1ba34223b8583fcaf675 Mon Sep 17 00:00:00 2001 From: Ivan Gorinov Date: Wed, 7 Mar 2018 11:46:53 -0800 Subject: x86/devicetree: Fix device IRQ settings in DT IRQ parameters for the SoC devices connected directly to I/O APIC lines (without PCI IRQ routing) may be specified in the Device Tree. Called from DT IRQ parser, irq_create_fwspec_mapping() calls irq_domain_alloc_irqs() with a pointer to irq_fwspec structure as @arg. But x86-specific DT IRQ allocation code casts @arg to of_phandle_args structure pointer and crashes trying to read the IRQ parameters. The function was not converted when the mapping descriptor was changed to irq_fwspec in the generic irqdomain code. Fixes: 11e4438ee330 ("irqdomain: Introduce a firmware-specific IRQ specifier structure") Signed-off-by: Ivan Gorinov Signed-off-by: Thomas Gleixner Cc: Mark Rutland Cc: Rob Herring Link: https://lkml.kernel.org/r/a234dee27ea60ce76141872da0d6bdb378b2a9ee.1520450752.git.ivan.gorinov@intel.com --- arch/x86/kernel/devicetree.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 63d2ebc21825..5cd387fcc777 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -195,19 +195,22 @@ static struct of_ioapic_type of_ioapic_type[] = static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { - struct of_phandle_args *irq_data = (void *)arg; + struct irq_fwspec *fwspec = (struct irq_fwspec *)arg; struct of_ioapic_type *it; struct irq_alloc_info tmp; + int type_index; - if (WARN_ON(irq_data->args_count < 2)) + if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; - if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type)) + + type_index = fwspec->param[1]; + if (type_index >= ARRAY_SIZE(of_ioapic_type)) return -EINVAL; - it = &of_ioapic_type[irq_data->args[1]]; + it = &of_ioapic_type[type_index]; ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity); tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain)); - tmp.ioapic_pin = irq_data->args[0]; + tmp.ioapic_pin = fwspec->param[0]; return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp); } -- cgit v1.2.1 From c996f3802006a585a6c3f8eaa73e375330efc0e7 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Thu, 1 Mar 2018 16:13:36 +0100 Subject: x86/MSR: Move native_* variants to msr.h ... where they belong. No functional change. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Reviewed-by: Darren Kenny Cc: kvm@vger.kernel.org Link: https://lkml.kernel.org/r/20180301151336.12948-1-bp@alien8.de --- arch/x86/include/asm/microcode.h | 14 -------------- arch/x86/include/asm/msr.h | 14 ++++++++++++++ arch/x86/kvm/svm.c | 1 - arch/x86/kvm/vmx.c | 1 - 4 files changed, 14 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 7fb1047d61c7..871714e2e4c6 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h @@ -6,20 +6,6 @@ #include #include -#define native_rdmsr(msr, val1, val2) \ -do { \ - u64 __val = __rdmsr((msr)); \ - (void)((val1) = (u32)__val); \ - (void)((val2) = (u32)(__val >> 32)); \ -} while (0) - -#define native_wrmsr(msr, low, high) \ - __wrmsr(msr, low, high) - -#define native_wrmsrl(msr, val) \ - __wrmsr((msr), (u32)((u64)(val)), \ - (u32)((u64)(val) >> 32)) - struct ucode_patch { struct list_head plist; void *data; /* Intel uses only this one */ diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 30df295f6d94..77254c9c8f61 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -108,6 +108,20 @@ static inline void notrace __wrmsr(unsigned int msr, u32 low, u32 high) : : "c" (msr), "a"(low), "d" (high) : "memory"); } +#define native_rdmsr(msr, val1, val2) \ +do { \ + u64 __val = __rdmsr((msr)); \ + (void)((val1) = (u32)__val); \ + (void)((val2) = (u32)(__val >> 32)); \ +} while (0) + +#define native_wrmsr(msr, low, high) \ + __wrmsr(msr, low, high) + +#define native_wrmsrl(msr, val) \ + __wrmsr((msr), (u32)((u64)(val)), \ + (u32)((u64)(val) >> 32)) + static inline unsigned long long native_read_msr(unsigned int msr) { unsigned long long val; diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index be9c839e2c89..9d2043f94e29 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -49,7 +49,6 @@ #include #include #include -#include #include #include diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 051dab74e4e9..ee5ed44bc284 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include "trace.h" -- cgit v1.2.1 From 9558080935e0bd744d68a7e1747a7117310623cf Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 6 Mar 2018 10:49:13 +0100 Subject: x86/fault: Do not print IP in show_fault_oops() ... because __show_regs() already does that. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/20180306094920.16917-3-bp@alien8.de --- arch/x86/mm/fault.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c88573d90f3e..93505990df10 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -699,7 +699,6 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code, printk(KERN_CONT "paging request"); printk(KERN_CONT " at %px\n", (void *) address); - printk(KERN_ALERT "IP: %pS\n", (void *)regs->ip); dump_pagetable(address); } -- cgit v1.2.1 From 16d1cb0bc43642a4d934631a73c5210ad2499e2f Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 6 Mar 2018 10:49:14 +0100 Subject: x86/dumpstack: Unify show_regs() The 32-bit version uses KERN_EMERG and commit b0f4c4b32c8e ("bugs, x86: Fix printk levels for panic, softlockups and stack dumps") changed the 64-bit version to KERN_DEFAULT. The same justification in that commit that those messages do not belong in the terminal, holds true for 32-bit also, so make it so. Make code_bytes static, while at it. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/20180306094920.16917-4-bp@alien8.de --- arch/x86/include/asm/stacktrace.h | 2 -- arch/x86/kernel/dumpstack.c | 49 ++++++++++++++++++++++++++++++++++++++- arch/x86/kernel/dumpstack_32.c | 42 --------------------------------- arch/x86/kernel/dumpstack_64.c | 42 --------------------------------- 4 files changed, 48 insertions(+), 87 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h index f73706878772..133d9425fced 100644 --- a/arch/x86/include/asm/stacktrace.h +++ b/arch/x86/include/asm/stacktrace.h @@ -87,8 +87,6 @@ get_stack_pointer(struct task_struct *task, struct pt_regs *regs) void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, unsigned long *stack, char *log_lvl); -extern unsigned int code_bytes; - /* The form of the top of the frame on the stack */ struct stack_frame { struct stack_frame *next_frame; diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index a2d8a3908670..18fa9d74c182 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -24,7 +24,7 @@ int panic_on_unrecovered_nmi; int panic_on_io_nmi; -unsigned int code_bytes = 64; +static unsigned int code_bytes = 64; static int die_counter; bool in_task_stack(unsigned long *stack, struct task_struct *task, @@ -375,3 +375,50 @@ static int __init code_bytes_setup(char *s) return 1; } __setup("code_bytes=", code_bytes_setup); + +void show_regs(struct pt_regs *regs) +{ + bool all = true; + int i; + + show_regs_print_info(KERN_DEFAULT); + + if (IS_ENABLED(CONFIG_X86_32)) + all = !user_mode(regs); + + __show_regs(regs, all); + + /* + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ + if (!user_mode(regs)) { + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + u8 *ip; + + show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); + + printk(KERN_DEFAULT "Code: "); + + ip = (u8 *)regs->ip - code_prologue; + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { + /* try starting at IP */ + ip = (u8 *)regs->ip; + code_len = code_len - code_prologue + 1; + } + for (i = 0; i < code_len; i++, ip++) { + if (ip < (u8 *)PAGE_OFFSET || + probe_kernel_address(ip, c)) { + pr_cont(" Bad RIP value."); + break; + } + if (ip == (u8 *)regs->ip) + pr_cont("<%02x> ", c); + else + pr_cont("%02x ", c); + } + } + pr_cont("\n"); +} diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c index 04170f63e3a1..cd53f3030e40 100644 --- a/arch/x86/kernel/dumpstack_32.c +++ b/arch/x86/kernel/dumpstack_32.c @@ -127,45 +127,3 @@ unknown: info->type = STACK_TYPE_UNKNOWN; return -EINVAL; } - -void show_regs(struct pt_regs *regs) -{ - int i; - - show_regs_print_info(KERN_EMERG); - __show_regs(regs, !user_mode(regs)); - - /* - * When in-kernel, we also print out the stack and code at the - * time of the fault.. - */ - if (!user_mode(regs)) { - unsigned int code_prologue = code_bytes * 43 / 64; - unsigned int code_len = code_bytes; - unsigned char c; - u8 *ip; - - show_trace_log_lvl(current, regs, NULL, KERN_EMERG); - - pr_emerg("Code:"); - - ip = (u8 *)regs->ip - code_prologue; - if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - /* try starting at IP */ - ip = (u8 *)regs->ip; - code_len = code_len - code_prologue + 1; - } - for (i = 0; i < code_len; i++, ip++) { - if (ip < (u8 *)PAGE_OFFSET || - probe_kernel_address(ip, c)) { - pr_cont(" Bad EIP value."); - break; - } - if (ip == (u8 *)regs->ip) - pr_cont(" <%02x>", c); - else - pr_cont(" %02x", c); - } - } - pr_cont("\n"); -} diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c index 563e28d14f2c..5cdb9e84da57 100644 --- a/arch/x86/kernel/dumpstack_64.c +++ b/arch/x86/kernel/dumpstack_64.c @@ -149,45 +149,3 @@ unknown: info->type = STACK_TYPE_UNKNOWN; return -EINVAL; } - -void show_regs(struct pt_regs *regs) -{ - int i; - - show_regs_print_info(KERN_DEFAULT); - __show_regs(regs, 1); - - /* - * When in-kernel, we also print out the stack and code at the - * time of the fault.. - */ - if (!user_mode(regs)) { - unsigned int code_prologue = code_bytes * 43 / 64; - unsigned int code_len = code_bytes; - unsigned char c; - u8 *ip; - - show_trace_log_lvl(current, regs, NULL, KERN_DEFAULT); - - printk(KERN_DEFAULT "Code: "); - - ip = (u8 *)regs->ip - code_prologue; - if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { - /* try starting at IP */ - ip = (u8 *)regs->ip; - code_len = code_len - code_prologue + 1; - } - for (i = 0; i < code_len; i++, ip++) { - if (ip < (u8 *)PAGE_OFFSET || - probe_kernel_address(ip, c)) { - pr_cont(" Bad RIP value."); - break; - } - if (ip == (u8 *)regs->ip) - pr_cont("<%02x> ", c); - else - pr_cont("%02x ", c); - } - } - pr_cont("\n"); -} -- cgit v1.2.1 From 63338a38db955cb4e0352c11b78732157c78d30b Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 7 Mar 2018 08:39:12 +0100 Subject: jailhouse: Provide detection for non-x86 systems Implement jailhouse_paravirt() via device tree probing on architectures != x86. Will be used by the PCI core. Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Reviewed-by: Juergen Gross Cc: jailhouse-dev@googlegroups.com Cc: Mark Rutland Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: Andy Shevchenko Cc: Rob Herring Cc: Bjorn Helgaas Link: https://lkml.kernel.org/r/dae9fe0c6e63141c28ca90492fa5712b4c33ffb5.1520408357.git.jan.kiszka@siemens.com --- arch/x86/include/asm/jailhouse_para.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/jailhouse_para.h b/arch/x86/include/asm/jailhouse_para.h index 875b54376689..b885a961a150 100644 --- a/arch/x86/include/asm/jailhouse_para.h +++ b/arch/x86/include/asm/jailhouse_para.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL2.0 */ /* - * Jailhouse paravirt_ops implementation + * Jailhouse paravirt detection * * Copyright (c) Siemens AG, 2015-2017 * -- cgit v1.2.1 From 690f4304104f37e473bd5e43fc5247f5cd35b225 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 7 Mar 2018 08:39:13 +0100 Subject: PCI: Scan all functions when running over Jailhouse Per PCIe r4.0, sec 7.5.1.1.9, multi-function devices are required to have a function 0. Therefore, Linux scans for devices at function 0 (devfn 0/8/16/...) and only scans for other functions if function 0 has its Multi-Function Device bit set or ARI or SR-IOV indicate there are more functions. The Jailhouse hypervisor may pass individual functions of a multi-function device to a guest without passing function 0, which means a Linux guest won't find them. Change Linux PCI probing so it scans all function numbers when running as a guest over Jailhouse. This is technically prohibited by the spec, so it is possible that PCI devices without the Multi-Function Device bit set may have unexpected behavior in response to this probe. Originally-by: Benedikt Spranger Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Acked-by: Bjorn Helgaas Cc: jailhouse-dev@googlegroups.com Cc: Benedikt Spranger Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Link: https://lkml.kernel.org/r/06e279b2a3e06cf6689ab3975f8ab592bba02362.1520408357.git.jan.kiszka@siemens.com --- arch/x86/pci/legacy.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/legacy.c b/arch/x86/pci/legacy.c index 1cb01abcb1be..dfbe6ac38830 100644 --- a/arch/x86/pci/legacy.c +++ b/arch/x86/pci/legacy.c @@ -4,6 +4,7 @@ #include #include #include +#include #include /* @@ -34,13 +35,14 @@ int __init pci_legacy_init(void) void pcibios_scan_specific_bus(int busn) { + int stride = jailhouse_paravirt() ? 1 : 8; int devfn; u32 l; if (pci_find_bus(0, busn)) return; - for (devfn = 0; devfn < 256; devfn += 8) { + for (devfn = 0; devfn < 256; devfn += stride) { if (!raw_pci_read(0, busn, devfn, PCI_VENDOR_ID, 2, &l) && l != 0x0000 && l != 0xffff) { DBG("Found device at %02x:%02x [%04x]\n", busn, devfn, l); -- cgit v1.2.1 From 6fa4a94e150be25d02547d51f03b4bc34aaec32f Mon Sep 17 00:00:00 2001 From: Otavio Pontes Date: Wed, 7 Mar 2018 08:39:14 +0100 Subject: x86/jailhouse: Enable PCI mmconfig access in inmates Use the PCI mmconfig base address exported by jailhouse in boot parameters in order to access the memory mapped PCI configuration space. [Jan: rebased, fixed !CONFIG_PCI_MMCONFIG, used pcibios_last_bus] Signed-off-by: Otavio Pontes Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Cc: jailhouse-dev@googlegroups.com Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: Bjorn Helgaas Link: https://lkml.kernel.org/r/2ee9e4401fa22377b3965893a558120f169be82b.1520408357.git.jan.kiszka@siemens.com --- arch/x86/include/asm/pci_x86.h | 2 ++ arch/x86/kernel/jailhouse.c | 8 ++++++++ arch/x86/pci/mmconfig-shared.c | 4 ++-- 3 files changed, 12 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h index eb66fa9cd0fc..959d618dbb17 100644 --- a/arch/x86/include/asm/pci_x86.h +++ b/arch/x86/include/asm/pci_x86.h @@ -151,6 +151,8 @@ extern int pci_mmconfig_insert(struct device *dev, u16 seg, u8 start, u8 end, phys_addr_t addr); extern int pci_mmconfig_delete(u16 seg, u8 start, u8 end); extern struct pci_mmcfg_region *pci_mmconfig_lookup(int segment, int bus); +extern struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, + int end, u64 addr); extern struct list_head pci_mmcfg_list; diff --git a/arch/x86/kernel/jailhouse.c b/arch/x86/kernel/jailhouse.c index b68fd895235a..fa183a131edc 100644 --- a/arch/x86/kernel/jailhouse.c +++ b/arch/x86/kernel/jailhouse.c @@ -124,6 +124,14 @@ static int __init jailhouse_pci_arch_init(void) if (pcibios_last_bus < 0) pcibios_last_bus = 0xff; +#ifdef CONFIG_PCI_MMCONFIG + if (setup_data.pci_mmconfig_base) { + pci_mmconfig_add(0, 0, pcibios_last_bus, + setup_data.pci_mmconfig_base); + pci_mmcfg_arch_init(); + } +#endif + return 0; } diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c index 0b40482578b8..7389db538c30 100644 --- a/arch/x86/pci/mmconfig-shared.c +++ b/arch/x86/pci/mmconfig-shared.c @@ -94,8 +94,8 @@ static struct pci_mmcfg_region *pci_mmconfig_alloc(int segment, int start, return new; } -static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, - int end, u64 addr) +struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start, + int end, u64 addr) { struct pci_mmcfg_region *new; -- cgit v1.2.1 From 55027a7772b1a9c36fb9731fc6c847b6a59c5bef Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 7 Mar 2018 08:39:15 +0100 Subject: x86: Align x86_64 PCI_MMCONFIG with 32-bit variant Allow to enable PCI_MMCONFIG when only SFI is present and make this option default on. This will help consolidating both into one Kconfig statement. Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Cc: jailhouse-dev@googlegroups.com Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: Andy Shevchenko Cc: Bjorn Helgaas Link: https://lkml.kernel.org/r/a2faf78c54f340f5549149e8b679c95950dae83d.1520408357.git.jan.kiszka@siemens.com --- arch/x86/Kconfig | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c1236b187824..6bd763640925 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2658,7 +2658,8 @@ config PCI_DOMAINS config PCI_MMCONFIG bool "Support mmconfig PCI config space access" - depends on X86_64 && PCI && ACPI + default y + depends on X86_64 && PCI && (ACPI || SFI) config PCI_CNB20LE_QUIRK bool "Read CNB20LE Host Bridge Windows" if EXPERT -- cgit v1.2.1 From b45c9f3656b691ab7324c7eaefb3416bea0326d8 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 7 Mar 2018 08:39:16 +0100 Subject: x86: Consolidate PCI_MMCONFIG configs Since e279b6c1d329 ("x86: start unification of arch/x86/Kconfig.*"), there exist two PCI_MMCONFIG entries, one from the original i386 and another from x86_64. Consolidate both entries into a single one. Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Cc: jailhouse-dev@googlegroups.com Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: Andy Shevchenko Cc: Bjorn Helgaas Link: https://lkml.kernel.org/r/2a0ccd51ea6f7996e07162918228e23bdc1fbb03.1520408357.git.jan.kiszka@siemens.com --- arch/x86/Kconfig | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6bd763640925..573b7c49d9f8 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2640,8 +2640,10 @@ config PCI_DIRECT depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC || PCI_GOMMCONFIG)) config PCI_MMCONFIG - def_bool y - depends on X86_32 && PCI && (ACPI || SFI) && (PCI_GOMMCONFIG || PCI_GOANY) + bool "Support mmconfig PCI config space access" if X86_64 + default y + depends on PCI && (ACPI || SFI) + depends on X86_64 || (PCI_GOANY || PCI_GOMMCONFIG) config PCI_OLPC def_bool y @@ -2656,11 +2658,6 @@ config PCI_DOMAINS def_bool y depends on PCI -config PCI_MMCONFIG - bool "Support mmconfig PCI config space access" - default y - depends on X86_64 && PCI && (ACPI || SFI) - config PCI_CNB20LE_QUIRK bool "Read CNB20LE Host Bridge Windows" if EXPERT depends on PCI -- cgit v1.2.1 From 8364e1f8379c7f9d3e63f127a585f889906b3e10 Mon Sep 17 00:00:00 2001 From: Jan Kiszka Date: Wed, 7 Mar 2018 08:39:17 +0100 Subject: x86/jailhouse: Allow to use PCI_MMCONFIG without ACPI Jailhouse does not use ACPI, but it does support MMCONFIG. Make sure the latter can be built without having to enable ACPI as well. Primarily, its required to make the AMD mmconf-fam10h_64 depend upon MMCONFIG and ACPI, instead of just the former. Saves some bytes in the Jailhouse non-root kernel. Signed-off-by: Jan Kiszka Signed-off-by: Thomas Gleixner Cc: jailhouse-dev@googlegroups.com Cc: linux-pci@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Cc: Andy Shevchenko Cc: Bjorn Helgaas Link: https://lkml.kernel.org/r/788bbd5325d1922235e9562c213057425fbc548c.1520408357.git.jan.kiszka@siemens.com --- arch/x86/Kconfig | 6 +++++- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/cpu/amd.c | 2 +- 3 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 573b7c49d9f8..7f00f60a2028 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2642,7 +2642,7 @@ config PCI_DIRECT config PCI_MMCONFIG bool "Support mmconfig PCI config space access" if X86_64 default y - depends on PCI && (ACPI || SFI) + depends on PCI && (ACPI || SFI || JAILHOUSE_GUEST) depends on X86_64 || (PCI_GOANY || PCI_GOMMCONFIG) config PCI_OLPC @@ -2658,6 +2658,10 @@ config PCI_DOMAINS def_bool y depends on PCI +config MMCONF_FAM10H + def_bool y + depends on X86_64 && PCI_MMCONFIG && ACPI + config PCI_CNB20LE_QUIRK bool "Read CNB20LE Host Bridge Windows" if EXPERT depends on PCI diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 29786c87e864..73ccf80c09a2 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -146,6 +146,6 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_GART_IOMMU) += amd_gart_64.o aperture_64.o obj-$(CONFIG_CALGARY_IOMMU) += pci-calgary_64.o tce_64.o - obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o + obj-$(CONFIG_MMCONF_FAM10H) += mmconf-fam10h_64.o obj-y += vsmp_64.o endif diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f0e6456ca7d3..12bc0a1139da 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -716,7 +716,7 @@ static void init_amd_k8(struct cpuinfo_x86 *c) static void init_amd_gh(struct cpuinfo_x86 *c) { -#ifdef CONFIG_X86_64 +#ifdef CONFIG_MMCONF_FAM10H /* do this for boot cpu */ if (c == &boot_cpu_data) check_enable_amd_mmconf_dmi(); -- cgit v1.2.1 From fa94d0c6e0f3431523f5701084d799c77c7d4a4f Mon Sep 17 00:00:00 2001 From: Tony Luck Date: Tue, 6 Mar 2018 15:21:41 +0100 Subject: x86/MCE: Save microcode revision in machine check records Updating microcode used to be relatively rare. Now that it has become more common we should save the microcode version in a machine check record to make sure that those people looking at the error have this important information bundled with the rest of the logged information. [ Borislav: Simplify a bit. ] Signed-off-by: Tony Luck Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Yazen Ghannam Cc: linux-edac Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180301233449.24311-1-tony.luck@intel.com --- arch/x86/include/uapi/asm/mce.h | 1 + arch/x86/kernel/cpu/mcheck/mce.c | 4 +++- 2 files changed, 4 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 91723461dc1f..435db58a7bad 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -30,6 +30,7 @@ struct mce { __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ __u64 ppin; /* Protected Processor Inventory Number */ + __u32 microcode;/* Microcode revision */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8ff94d1e2dce..b3323cab9139 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -130,6 +130,8 @@ void mce_setup(struct mce *m) if (this_cpu_has(X86_FEATURE_INTEL_PPIN)) rdmsrl(MSR_PPIN, m->ppin); + + m->microcode = boot_cpu_data.microcode; } DEFINE_PER_CPU(struct mce, injectm); @@ -262,7 +264,7 @@ static void __print_mce(struct mce *m) */ pr_emerg(HW_ERR "PROCESSOR %u:%x TIME %llu SOCKET %u APIC %x microcode %x\n", m->cpuvendor, m->cpuid, m->time, m->socketid, m->apicid, - cpu_data(m->extcpu).microcode); + m->microcode); } static void print_mce(struct mce *m) -- cgit v1.2.1 From b3b7c4795ccab5be71f080774c45bbbcc75c2aaf Mon Sep 17 00:00:00 2001 From: Seunghun Han Date: Tue, 6 Mar 2018 15:21:43 +0100 Subject: x86/MCE: Serialize sysfs changes The check_interval file in /sys/devices/system/machinecheck/machinecheck directory is a global timer value for MCE polling. If it is changed by one CPU, mce_restart() broadcasts the event to other CPUs to delete and restart the MCE polling timer and __mcheck_cpu_init_timer() reinitializes the mce_timer variable. If more than one CPU writes a specific value to the check_interval file concurrently, mce_timer is not protected from such concurrent accesses and all kinds of explosions happen. Since only root can write to those sysfs variables, the issue is not a big deal security-wise. However, concurrent writes to these configuration variables is void of reason so the proper thing to do is to serialize the access with a mutex. Boris: - Make store_int_with_restart() use device_store_ulong() to filter out negative intervals - Limit min interval to 1 second - Correct locking - Massage commit message Signed-off-by: Seunghun Han Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Greg Kroah-Hartman Cc: Tony Luck Cc: linux-edac Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/20180302202706.9434-1-kkamagui@gmail.com --- arch/x86/kernel/cpu/mcheck/mce.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index b3323cab9139..466f47301334 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -56,6 +56,9 @@ static DEFINE_MUTEX(mce_log_mutex); +/* sysfs synchronization */ +static DEFINE_MUTEX(mce_sysfs_mutex); + #define CREATE_TRACE_POINTS #include @@ -2088,6 +2091,7 @@ static ssize_t set_ignore_ce(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.ignore_ce ^ !!new) { if (new) { /* disable ce features */ @@ -2100,6 +2104,8 @@ static ssize_t set_ignore_ce(struct device *s, on_each_cpu(mce_enable_ce, (void *)1, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2112,6 +2118,7 @@ static ssize_t set_cmci_disabled(struct device *s, if (kstrtou64(buf, 0, &new) < 0) return -EINVAL; + mutex_lock(&mce_sysfs_mutex); if (mca_cfg.cmci_disabled ^ !!new) { if (new) { /* disable cmci */ @@ -2123,6 +2130,8 @@ static ssize_t set_cmci_disabled(struct device *s, on_each_cpu(mce_enable_ce, NULL, 1); } } + mutex_unlock(&mce_sysfs_mutex); + return size; } @@ -2130,8 +2139,19 @@ static ssize_t store_int_with_restart(struct device *s, struct device_attribute *attr, const char *buf, size_t size) { - ssize_t ret = device_store_int(s, attr, buf, size); + unsigned long old_check_interval = check_interval; + ssize_t ret = device_store_ulong(s, attr, buf, size); + + if (check_interval == old_check_interval) + return ret; + + if (check_interval < 1) + check_interval = 1; + + mutex_lock(&mce_sysfs_mutex); mce_restart(); + mutex_unlock(&mce_sysfs_mutex); + return ret; } -- cgit v1.2.1 From 24193c5de470358d0ed70e1f8e58fdaf83823b95 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 6 Mar 2018 15:21:42 +0100 Subject: x86/MCE: Cleanup and complete struct mce fields definitions The struct is part of the uapi, document that fact and all fields properly and fix formatting. No functional changes. Signed-off-by: Borislav Petkov Signed-off-by: Thomas Gleixner Cc: Tony Luck Link: https://lkml.kernel.org/r/20180306142143.19990-3-bp@alien8.de --- arch/x86/include/uapi/asm/mce.h | 52 ++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/uapi/asm/mce.h b/arch/x86/include/uapi/asm/mce.h index 435db58a7bad..955c2a2e1cf9 100644 --- a/arch/x86/include/uapi/asm/mce.h +++ b/arch/x86/include/uapi/asm/mce.h @@ -5,32 +5,36 @@ #include #include -/* Fields are zero when not available */ +/* + * Fields are zero when not available. Also, this struct is shared with + * userspace mcelog and thus must keep existing fields at current offsets. + * Only add new fields to the end of the structure + */ struct mce { - __u64 status; - __u64 misc; - __u64 addr; - __u64 mcgstatus; - __u64 ip; - __u64 tsc; /* cpu time stamp counter */ - __u64 time; /* wall time_t when error was detected */ - __u8 cpuvendor; /* cpu vendor as encoded in system.h */ - __u8 inject_flags; /* software inject flags */ - __u8 severity; + __u64 status; /* Bank's MCi_STATUS MSR */ + __u64 misc; /* Bank's MCi_MISC MSR */ + __u64 addr; /* Bank's MCi_ADDR MSR */ + __u64 mcgstatus; /* Machine Check Global Status MSR */ + __u64 ip; /* Instruction Pointer when the error happened */ + __u64 tsc; /* CPU time stamp counter */ + __u64 time; /* Wall time_t when error was detected */ + __u8 cpuvendor; /* Kernel's X86_VENDOR enum */ + __u8 inject_flags; /* Software inject flags */ + __u8 severity; /* Error severity */ __u8 pad; - __u32 cpuid; /* CPUID 1 EAX */ - __u8 cs; /* code segment */ - __u8 bank; /* machine check bank */ - __u8 cpu; /* cpu number; obsolete; use extcpu now */ - __u8 finished; /* entry is valid */ - __u32 extcpu; /* linux cpu number that detected the error */ - __u32 socketid; /* CPU socket ID */ - __u32 apicid; /* CPU initial apic ID */ - __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ - __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ - __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ - __u64 ppin; /* Protected Processor Inventory Number */ - __u32 microcode;/* Microcode revision */ + __u32 cpuid; /* CPUID 1 EAX */ + __u8 cs; /* Code segment */ + __u8 bank; /* Machine check bank reporting the error */ + __u8 cpu; /* CPU number; obsoleted by extcpu */ + __u8 finished; /* Entry is valid */ + __u32 extcpu; /* Linux CPU number that detected the error */ + __u32 socketid; /* CPU socket ID */ + __u32 apicid; /* CPU initial APIC ID */ + __u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */ + __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */ + __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */ + __u64 ppin; /* Protected Processor Inventory Number */ + __u32 microcode; /* Microcode revision */ }; #define MCE_GET_RECORD_LEN _IOR('M', 1, int) -- cgit v1.2.1 From f605cfca8c39ffa2b98c06d2b9f30ba64f1e54e3 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Thu, 1 Mar 2018 12:54:54 -0500 Subject: perf/x86/intel: Fix large period handling on Broadwell CPUs Large fixed period values could be truncated on Broadwell, for example: perf record -e cycles -c 10000000000 Here the fixed period is 0x2540BE400, but the period which finally applied is 0x540BE400 - which is wrong. The reason is that x86_pmu::limit_period() uses an u32 parameter, so the high 32 bits of 'period' get truncated. This bug was introduced in: commit 294fe0f52a44 ("perf/x86/intel: Add INST_RETIRED.ALL workarounds") It's safe to use u64 instead of u32: - Although the 'left' is s64, the value of 'left' must be positive when calling limit_period(). - bdw_limit_period() only modifies the lowest 6 bits, it doesn't touch the higher 32 bits. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: 294fe0f52a44 ("perf/x86/intel: Add INST_RETIRED.ALL workarounds") Link: http://lkml.kernel.org/r/1519926894-3520-1-git-send-email-kan.liang@linux.intel.com [ Rewrote unacceptably bad changelog. ] Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 2 +- arch/x86/events/perf_event.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 56457cb73448..6b6c1717787d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3188,7 +3188,7 @@ glp_get_event_constraints(struct cpu_hw_events *cpuc, int idx, * Therefore the effective (average) period matches the requested period, * despite coarser hardware granularity. */ -static unsigned bdw_limit_period(struct perf_event *event, unsigned left) +static u64 bdw_limit_period(struct perf_event *event, u64 left) { if ((event->hw.config & INTEL_ARCH_EVENT_MASK) == X86_CONFIG(.event=0xc0, .umask=0x01)) { diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78f91ec1056e..6495ffd57e3e 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -557,7 +557,7 @@ struct x86_pmu { struct x86_pmu_quirk *quirks; int perfctr_second_write; bool late_ack; - unsigned (*limit_period)(struct perf_event *event, unsigned l); + u64 (*limit_period)(struct perf_event *event, u64 l); /* * sysfs attrs -- cgit v1.2.1 From 82d71ed0277efc45360828af8c4e4d40e1b45352 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 20 Feb 2018 02:11:50 -0800 Subject: perf/x86/intel: Properly save/restore the PMU state in the NMI handler The PMU is disabled in intel_pmu_handle_irq(), but cpuc->enabled is not updated accordingly. This is fine in current usage because no-one checks it - but fix it for future code: for example, the drain_pebs() will be modified to fix an auto-reload bug. Properly save/restore the old PMU state. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Cc: kernel test robot Link: http://lkml.kernel.org/r/6f44ee84-56f8-79f1-559b-08e371eaeb78@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 6b6c1717787d..1ba7ca7b675d 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2201,9 +2201,15 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) int bit, loops; u64 status; int handled; + int pmu_enabled; cpuc = this_cpu_ptr(&cpu_hw_events); + /* + * Save the PMU state. + * It needs to be restored when leaving the handler. + */ + pmu_enabled = cpuc->enabled; /* * No known reason to not always do late ACK, * but just in case do it opt-in. @@ -2211,6 +2217,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) if (!x86_pmu.late_ack) apic_write(APIC_LVTPC, APIC_DM_NMI); intel_bts_disable_local(); + cpuc->enabled = 0; __intel_pmu_disable_all(); handled = intel_pmu_drain_bts_buffer(); handled += intel_bts_interrupt(); @@ -2320,7 +2327,8 @@ again: done: /* Only restore PMU state when it's active. See x86_pmu_disable(). */ - if (cpuc->enabled) + cpuc->enabled = pmu_enabled; + if (pmu_enabled) __intel_pmu_enable_all(0, true); intel_bts_enable_local(); -- cgit v1.2.1 From d31fc13fdcb20e1c317f9a7dd6273c18fbd58308 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:31 -0800 Subject: perf/x86/intel: Fix event update for auto-reload There is a bug when reading event->count with large PEBS enabled. Here is an example: # ./read_count 0x71f0 0x122c0 0x1000000001c54 0x100000001257d 0x200000000bdc5 In fixed period mode, the auto-reload mechanism could be enabled for PEBS events, but the calculation of event->count does not take the auto-reload values into account. Anyone who reads event->count will get the wrong result, e.g x86_pmu_read(). This bug was introduced with the auto-reload mechanism enabled since commit: 851559e35fd5 ("perf/x86/intel: Use the PEBS auto reload mechanism when possible") Introduce intel_pmu_save_and_restart_reload() to calculate the event->count only for auto-reload. Since the counter increments a negative counter value and overflows on the sign switch, giving the interval: [-period, 0] the difference between two consequtive reads is: A) value2 - value1; when no overflows have happened in between, B) (0 - value1) + (value2 - (-period)); when one overflow happened in between, C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); when @n overflows happened in between. Here A) is the obvious difference, B) is the extension to the discrete interval, where the first term is to the top of the interval and the second term is from the bottom of the next interval and C) the extension to multiple intervals, where the middle term is the whole intervals covered. The equation for all cases is: value2 - value1 + n * period Previously the event->count is updated right before the sample output. But for case A, there is no PEBS record ready. It needs to be specially handled. Remove the auto-reload code from x86_perf_event_set_period() since we'll not longer call that function in this case. Based-on-code-from: Peter Zijlstra (Intel) Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Fixes: 851559e35fd5 ("perf/x86/intel: Use the PEBS auto reload mechanism when possible") Link: http://lkml.kernel.org/r/1518474035-21006-2-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 15 +++----- arch/x86/events/intel/ds.c | 92 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 94 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 140d33288e78..5a3ccd1715e2 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1156,16 +1156,13 @@ int x86_perf_event_set_period(struct perf_event *event) per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; - if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) || - local64_read(&hwc->prev_count) != (u64)-left) { - /* - * The hw event starts counting from this event offset, - * mark it to be able to extra future deltas: - */ - local64_set(&hwc->prev_count, (u64)-left); + /* + * The hw event starts counting from this event offset, + * mark it to be able to extra future deltas: + */ + local64_set(&hwc->prev_count, (u64)-left); - wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); - } + wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask); /* * Due to erratum on certan cpu we need diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 18c25ab28557..f39a4df3a7bd 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1306,17 +1306,84 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } +/* + * Special variant of intel_pmu_save_and_restart() for auto-reload. + */ +static int +intel_pmu_save_and_restart_reload(struct perf_event *event, int count) +{ + struct hw_perf_event *hwc = &event->hw; + int shift = 64 - x86_pmu.cntval_bits; + u64 period = hwc->sample_period; + u64 prev_raw_count, new_raw_count; + s64 new, old; + + WARN_ON(!period); + + /* + * drain_pebs() only happens when the PMU is disabled. + */ + WARN_ON(this_cpu_read(cpu_hw_events.enabled)); + + prev_raw_count = local64_read(&hwc->prev_count); + rdpmcl(hwc->event_base_rdpmc, new_raw_count); + local64_set(&hwc->prev_count, new_raw_count); + + /* + * Since the counter increments a negative counter value and + * overflows on the sign switch, giving the interval: + * + * [-period, 0] + * + * the difference between two consequtive reads is: + * + * A) value2 - value1; + * when no overflows have happened in between, + * + * B) (0 - value1) + (value2 - (-period)); + * when one overflow happened in between, + * + * C) (0 - value1) + (n - 1) * (period) + (value2 - (-period)); + * when @n overflows happened in between. + * + * Here A) is the obvious difference, B) is the extension to the + * discrete interval, where the first term is to the top of the + * interval and the second term is from the bottom of the next + * interval and C) the extension to multiple intervals, where the + * middle term is the whole intervals covered. + * + * An equivalent of C, by reduction, is: + * + * value2 - value1 + n * period + */ + new = ((s64)(new_raw_count << shift) >> shift); + old = ((s64)(prev_raw_count << shift) >> shift); + local64_add(new - old + count * period, &event->count); + + perf_event_update_userpage(event); + + return 0; +} + static void __intel_pmu_pebs_event(struct perf_event *event, struct pt_regs *iregs, void *base, void *top, int bit, int count) { + struct hw_perf_event *hwc = &event->hw; struct perf_sample_data data; struct pt_regs regs; void *at = get_next_pebs_record_by_bit(base, top, bit); - if (!intel_pmu_save_and_restart(event) && - !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)) + if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) { + /* + * Now, auto-reload is only enabled in fixed period mode. + * The reload value is always hwc->sample_period. + * May need to change it, if auto-reload is enabled in + * freq mode later. + */ + intel_pmu_save_and_restart_reload(event, count); + } else if (!intel_pmu_save_and_restart(event)) return; while (count > 1) { @@ -1368,8 +1435,11 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs) return; n = top - at; - if (n <= 0) + if (n <= 0) { + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); return; + } __intel_pmu_pebs_event(event, iregs, at, top, 0, n); } @@ -1392,8 +1462,22 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs) ds->pebs_index = ds->pebs_buffer_base; - if (unlikely(base >= top)) + if (unlikely(base >= top)) { + /* + * The drain_pebs() could be called twice in a short period + * for auto-reload event in pmu::read(). There are no + * overflows have happened in between. + * It needs to call intel_pmu_save_and_restart_reload() to + * update the event->count for this case. + */ + for_each_set_bit(bit, (unsigned long *)&cpuc->pebs_enabled, + x86_pmu.max_pebs_events) { + event = cpuc->events[bit]; + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_save_and_restart_reload(event, 0); + } return; + } for (at = base; at < top; at += x86_pmu.pebs_record_size) { struct pebs_record_nhm *p = at; -- cgit v1.2.1 From bcfbe5c41d630ce6b74da45134cea484248b515a Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:32 -0800 Subject: perf/x86: Introduce a ->read() callback in 'struct x86_pmu' Auto-reload needs to be specially handled when reading event counts. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/1518474035-21006-3-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 2 ++ arch/x86/events/perf_event.h | 1 + 2 files changed, 3 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 5a3ccd1715e2..00a6251981d2 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1881,6 +1881,8 @@ early_initcall(init_hw_perf_events); static inline void x86_pmu_read(struct perf_event *event) { + if (x86_pmu.read) + return x86_pmu.read(event); x86_perf_event_update(event); } diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 6495ffd57e3e..d445f0026989 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -520,6 +520,7 @@ struct x86_pmu { void (*disable)(struct perf_event *); void (*add)(struct perf_event *); void (*del)(struct perf_event *); + void (*read)(struct perf_event *event); int (*hw_config)(struct perf_event *event); int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); unsigned eventsel; -- cgit v1.2.1 From 5bee2cc69d986e20808c93c46f7b6aef51edd827 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:33 -0800 Subject: perf/x86/intel/ds: Introduce ->read() function for auto-reload events and flush the PEBS buffer there There is no way to get exact auto-reload times and values which are needed for event updates unless we flush the PEBS buffer. Introduce intel_pmu_auto_reload_read() to drain the PEBS buffer for auto reload event. To prevent races with the hardware, we can only call drain_pebs() when the PMU is disabled. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Link: http://lkml.kernel.org/r/1518474035-21006-4-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/ds.c | 9 +++++++++ arch/x86/events/perf_event.h | 2 ++ 2 files changed, 11 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index f39a4df3a7bd..73844025adaf 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1306,6 +1306,15 @@ get_next_pebs_record_by_bit(void *base, void *top, int bit) return NULL; } +void intel_pmu_auto_reload_read(struct perf_event *event) +{ + WARN_ON(!(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD)); + + perf_pmu_disable(event->pmu); + intel_pmu_drain_pebs_buffer(); + perf_pmu_enable(event->pmu); +} + /* * Special variant of intel_pmu_save_and_restart() for auto-reload. */ diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index d445f0026989..91643472f385 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -924,6 +924,8 @@ void intel_pmu_pebs_disable_all(void); void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in); +void intel_pmu_auto_reload_read(struct perf_event *event); + void intel_ds_init(void); void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); -- cgit v1.2.1 From ceb90d9e0248947839a0ff4bee98cf28695a6020 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:34 -0800 Subject: perf/x86/intel: Fix PMU read for auto-reload Auto-reload events needs to be specially handled in event count read. Auto-reload is only available for intel_pmu. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Fixes: b8241d20699e ("perf/x86/intel: Implement batched PEBS interrupt handling (large PEBS interrupt threshold)") Link: http://lkml.kernel.org/r/1518474035-21006-5-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 1ba7ca7b675d..41c68d337e84 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2060,6 +2060,14 @@ static void intel_pmu_del_event(struct perf_event *event) intel_pmu_pebs_del(event); } +static void intel_pmu_read_event(struct perf_event *event) +{ + if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) + intel_pmu_auto_reload_read(event); + else + x86_perf_event_update(event); +} + static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) { int idx = hwc->idx - INTEL_PMC_IDX_FIXED; @@ -3503,6 +3511,7 @@ static __initconst const struct x86_pmu intel_pmu = { .disable = intel_pmu_disable_event, .add = intel_pmu_add_event, .del = intel_pmu_del_event, + .read = intel_pmu_read_event, .hw_config = intel_pmu_hw_config, .schedule_events = x86_schedule_events, .eventsel = MSR_ARCH_PERFMON_EVENTSEL0, -- cgit v1.2.1 From 1af22eba248efe2de25658041a80a3d40fb3e92e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:35 -0800 Subject: perf/x86/intel: Disable userspace RDPMC usage for large PEBS Userspace RDPMC cannot possibly work for large PEBS, which was introduced in: b8241d20699e ("perf/x86/intel: Implement batched PEBS interrupt handling (large PEBS interrupt threshold)") When the PEBS interrupt threshold is larger than one, there is no way to get exact auto-reload times and value for userspace RDPMC. Disable the userspace RDPMC usage when large PEBS is enabled. The only exception is when the PEBS interrupt threshold is 1, in which case user-space RDPMC works well even with auto-reload events. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Fixes: b8241d20699e ("perf/x86/intel: Implement batched PEBS interrupt handling (large PEBS interrupt threshold)") Link: http://lkml.kernel.org/r/1518474035-21006-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 00a6251981d2..9c86e10f1196 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2117,7 +2117,8 @@ static int x86_pmu_event_init(struct perf_event *event) event->destroy(event); } - if (READ_ONCE(x86_pmu.attr_rdpmc)) + if (READ_ONCE(x86_pmu.attr_rdpmc) && + !(event->hw.flags & PERF_X86_EVENT_FREERUNNING)) event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; return err; -- cgit v1.2.1 From 9f66d8d73e654c5f867daa6aa186300ecaf49d3a Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Thu, 8 Mar 2018 08:00:14 +0000 Subject: x86/efi: Replace GFP_ATOMIC with GFP_KERNEL in efi_query_variable_store() efi_query_variable_store() does an atomic kzalloc() unnecessarily, because we can never get this far when called in an atomic context, namely when nonblocking == 1. Replace it with GFP_KERNEL. This was found by the DCNS static analysis tool written by myself. Signed-off-by: Jia-Ju Bai Signed-off-by: Ard Biesheuvel Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20180308080020.22828-7-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- arch/x86/platform/efi/quirks.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 5b513ccffde4..1ef11c26f79b 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -177,7 +177,7 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, * that by attempting to use more space than is available. */ unsigned long dummy_size = remaining_size + 1024; - void *dummy = kzalloc(dummy_size, GFP_ATOMIC); + void *dummy = kzalloc(dummy_size, GFP_KERNEL); if (!dummy) return EFI_OUT_OF_RESOURCES; -- cgit v1.2.1 From f779ca740f25c8a6a72d951334f9efc3158a318b Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 8 Mar 2018 08:00:20 +0000 Subject: efi: Make const array 'apple' static Don't populate the const read-only array 'buf' on the stack but instead make it static. Makes the object code smaller by 64 bytes: Before: text data bss dec hex filename 9264 1 16 9281 2441 arch/x86/boot/compressed/eboot.o After: text data bss dec hex filename 9200 1 16 9217 2401 arch/x86/boot/compressed/eboot.o (GCC version 7.2.0 x86_64) Signed-off-by: Colin Ian King Signed-off-by: Ard Biesheuvel Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20180308080020.22828-13-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index 886a9115af62..f2251c1c9853 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -423,7 +423,7 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) static void setup_quirks(struct boot_params *boot_params) { - efi_char16_t const apple[] = { 'A', 'p', 'p', 'l', 'e', 0 }; + static efi_char16_t const apple[] = { 'A', 'p', 'p', 'l', 'e', 0 }; efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long) efi_table_attr(efi_system_table, fw_vendor, sys_table); -- cgit v1.2.1 From a403d798182f4f7be5e9bab56cfa37e9828fd92a Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Feb 2018 21:04:47 +0300 Subject: x86/boot/compressed/64: Describe the logic behind the LA57 check The patch explains the LA57 check in more details. Tested-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Cyrill Gorcunov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/pgtable_64.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 3f1697fcc7a8..45c76eff2718 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -18,10 +18,22 @@ struct paging_config paging_prepare(void) { struct paging_config paging_config = {}; - /* Check if LA57 is desired and supported */ - if (IS_ENABLED(CONFIG_X86_5LEVEL) && native_cpuid_eax(0) >= 7 && - (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) + /* + * Check if LA57 is desired and supported. + * + * There are two parts to the check: + * - if the kernel supports 5-level paging: CONFIG_X86_5LEVEL=y + * - if the machine supports 5-level paging: + * + CPUID leaf 7 is supported + * + the leaf has the feature bit set + * + * That's substitute for boot_cpu_has() in early boot code. + */ + if (IS_ENABLED(CONFIG_X86_5LEVEL) && + native_cpuid_eax(0) >= 7 && + (native_cpuid_ecx(7) & (1 << (X86_FEATURE_LA57 & 31)))) { paging_config.l5_required = 1; + } return paging_config; } -- cgit v1.2.1 From 3548e131ec6a82208f36e68d31947b0fe244c7a7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Feb 2018 21:04:48 +0300 Subject: x86/boot/compressed/64: Find a place for 32-bit trampoline If a bootloader enables 64-bit mode with 4-level paging, we might need to switch over to 5-level paging. The switching requires the disabling of paging, which works fine if kernel itself is loaded below 4G. But if the bootloader puts the kernel above 4G (not sure if anybody does this), we would lose control as soon as paging is disabled, because the code becomes unreachable to the CPU. To handle the situation, we need a trampoline in lower memory that would take care of switching on 5-level paging. This patch finds a spot in low memory for a trampoline. The heuristic is based on code in reserve_bios_regions(). We find the end of low memory based on BIOS and EBDA start addresses. The trampoline is put just before end of low memory. It's mimic approach taken to allocate memory for realtime trampoline. Tested-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Cyrill Gorcunov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/misc.c | 6 ++++++ arch/x86/boot/compressed/pgtable.h | 11 +++++++++++ arch/x86/boot/compressed/pgtable_64.c | 34 ++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+) create mode 100644 arch/x86/boot/compressed/pgtable.h (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c index b50c42455e25..8e4b55dd5df9 100644 --- a/arch/x86/boot/compressed/misc.c +++ b/arch/x86/boot/compressed/misc.c @@ -14,6 +14,7 @@ #include "misc.h" #include "error.h" +#include "pgtable.h" #include "../string.h" #include "../voffset.h" @@ -372,6 +373,11 @@ asmlinkage __visible void *extract_kernel(void *rmode, memptr heap, debug_putaddr(output_len); debug_putaddr(kernel_total_size); +#ifdef CONFIG_X86_64 + /* Report address of 32-bit trampoline */ + debug_putaddr(trampoline_32bit); +#endif + /* * The memory hole needed for the kernel is the larger of either * the entire decompressed kernel plus relocation table, or the diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h new file mode 100644 index 000000000000..57722a2fe2a0 --- /dev/null +++ b/arch/x86/boot/compressed/pgtable.h @@ -0,0 +1,11 @@ +#ifndef BOOT_COMPRESSED_PAGETABLE_H +#define BOOT_COMPRESSED_PAGETABLE_H + +#define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) + +#ifndef __ASSEMBLER__ + +extern unsigned long *trampoline_32bit; + +#endif /* __ASSEMBLER__ */ +#endif /* BOOT_COMPRESSED_PAGETABLE_H */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 45c76eff2718..21d5cc1cd5fa 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,4 +1,5 @@ #include +#include "pgtable.h" /* * __force_order is used by special_insns.h asm code to force instruction @@ -9,14 +10,27 @@ */ unsigned long __force_order; +#define BIOS_START_MIN 0x20000U /* 128K, less than this is insane */ +#define BIOS_START_MAX 0x9f000U /* 640K, absolute maximum */ + struct paging_config { unsigned long trampoline_start; unsigned long l5_required; }; +/* + * Trampoline address will be printed by extract_kernel() for debugging + * purposes. + * + * Avoid putting the pointer into .bss as it will be cleared between + * paging_prepare() and extract_kernel(). + */ +unsigned long *trampoline_32bit __section(.data); + struct paging_config paging_prepare(void) { struct paging_config paging_config = {}; + unsigned long bios_start, ebda_start; /* * Check if LA57 is desired and supported. @@ -35,5 +49,25 @@ struct paging_config paging_prepare(void) paging_config.l5_required = 1; } + /* + * Find a suitable spot for the trampoline. + * This code is based on reserve_bios_regions(). + */ + + ebda_start = *(unsigned short *)0x40e << 4; + bios_start = *(unsigned short *)0x413 << 10; + + if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX) + bios_start = BIOS_START_MAX; + + if (ebda_start > BIOS_START_MIN && ebda_start < bios_start) + bios_start = ebda_start; + + /* Place the trampoline just below the end of low memory, aligned to 4k */ + paging_config.trampoline_start = bios_start - TRAMPOLINE_32BIT_SIZE; + paging_config.trampoline_start = round_down(paging_config.trampoline_start, PAGE_SIZE); + + trampoline_32bit = (unsigned long *)paging_config.trampoline_start; + return paging_config; } -- cgit v1.2.1 From fb5268354d20b82c12569e325b0d051c09f983f7 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Feb 2018 21:04:49 +0300 Subject: x86/boot/compressed/64: Save and restore trampoline memory The memory area we found for trampoline shouldn't contain anything useful. But let's preserve the data anyway. Just to be on safe side. paging_prepare() would save the data into a buffer. cleanup_trampoline() would restore it back once we are done with the trampoline. Tested-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Cyrill Gorcunov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 10 ++++++++++ arch/x86/boot/compressed/pgtable_64.c | 13 +++++++++++++ 2 files changed, 23 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index d598d65db32c..8ba0582c65d5 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -355,6 +355,16 @@ ENTRY(startup_64) lretq lvl5: + /* + * cleanup_trampoline() would restore trampoline memory. + * + * RSI holds real mode data and needs to be preserved across + * this function call. + */ + pushq %rsi + call cleanup_trampoline + popq %rsi + /* Zero EFLAGS */ pushq $0 popfq diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 21d5cc1cd5fa..01d08d3e3e43 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -1,5 +1,6 @@ #include #include "pgtable.h" +#include "../string.h" /* * __force_order is used by special_insns.h asm code to force instruction @@ -18,6 +19,9 @@ struct paging_config { unsigned long l5_required; }; +/* Buffer to preserve trampoline memory */ +static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; + /* * Trampoline address will be printed by extract_kernel() for debugging * purposes. @@ -69,5 +73,14 @@ struct paging_config paging_prepare(void) trampoline_32bit = (unsigned long *)paging_config.trampoline_start; + /* Preserve trampoline memory */ + memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE); + return paging_config; } + +void cleanup_trampoline(void) +{ + /* Restore trampoline memory */ + memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE); +} -- cgit v1.2.1 From 32fcefa2bfc8961987e91d1daeb00624b4176d2e Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Feb 2018 21:04:50 +0300 Subject: x86/boot/compressed/64: Set up trampoline memory This patch clears up trampoline memory and copies trampoline code in place. It's not yet used though. Tested-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Cyrill Gorcunov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 3 ++- arch/x86/boot/compressed/pgtable.h | 9 +++++++++ arch/x86/boot/compressed/pgtable_64.c | 7 +++++++ 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 8ba0582c65d5..c813cb004056 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -501,8 +501,9 @@ relocated: jmp *%rax .code32 +ENTRY(trampoline_32bit_src) compatible_mode: - /* Setup data and stack segments */ + /* Set up data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds movl %eax, %ss diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h index 57722a2fe2a0..91f75638f6e6 100644 --- a/arch/x86/boot/compressed/pgtable.h +++ b/arch/x86/boot/compressed/pgtable.h @@ -3,9 +3,18 @@ #define TRAMPOLINE_32BIT_SIZE (2 * PAGE_SIZE) +#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0 + +#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE +#define TRAMPOLINE_32BIT_CODE_SIZE 0x60 + +#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE + #ifndef __ASSEMBLER__ extern unsigned long *trampoline_32bit; +extern void trampoline_32bit_src(void *return_ptr); + #endif /* __ASSEMBLER__ */ #endif /* BOOT_COMPRESSED_PAGETABLE_H */ diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 01d08d3e3e43..810c2c32d98e 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -76,6 +76,13 @@ struct paging_config paging_prepare(void) /* Preserve trampoline memory */ memcpy(trampoline_save, trampoline_32bit, TRAMPOLINE_32BIT_SIZE); + /* Clear trampoline memory first */ + memset(trampoline_32bit, 0, TRAMPOLINE_32BIT_SIZE); + + /* Copy trampoline code in place */ + memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long), + &trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE); + return paging_config; } -- cgit v1.2.1 From e9d0e6330eb81ca49bdd8849cc52b3b0f70ed5cb Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 26 Feb 2018 21:04:51 +0300 Subject: x86/boot/compressed/64: Prepare new top-level page table for trampoline If trampoline code would need to switch between 4- and 5-level paging modes, we have to use a page table in trampoline memory. Having it in trampoline memory guarantees that it's below 4G and we can point CR3 to it from 32-bit trampoline code. We only use the page table if the desired paging mode doesn't match the mode we are in. Otherwise the page table is unused and trampoline code wouldn't touch CR3. For 4- to 5-level paging transition, we set up current (4-level paging) CR3 as the first and the only entry in a new top-level page table. For 5- to 4-level paging transition, copy page table pointed by first entry in the current top-level page table as our new top-level page table. If the page table is used by trampoline we would need to copy it to new page table outside trampoline and update CR3 before restoring trampoline memory. Tested-by: Borislav Petkov Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Shevchenko Cc: Cyrill Gorcunov Cc: Eric Biederman Cc: H. Peter Anvin Cc: Juergen Gross Cc: Kees Cook Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180226180451.86788-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/pgtable_64.c | 61 +++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c index 810c2c32d98e..32af1cbcd903 100644 --- a/arch/x86/boot/compressed/pgtable_64.c +++ b/arch/x86/boot/compressed/pgtable_64.c @@ -22,6 +22,14 @@ struct paging_config { /* Buffer to preserve trampoline memory */ static char trampoline_save[TRAMPOLINE_32BIT_SIZE]; +/* + * The page table is going to be used instead of page table in the trampoline + * memory. + * + * It must not be in BSS as BSS is cleared after cleanup_trampoline(). + */ +static char top_pgtable[PAGE_SIZE] __aligned(PAGE_SIZE) __section(.data); + /* * Trampoline address will be printed by extract_kernel() for debugging * purposes. @@ -83,11 +91,64 @@ struct paging_config paging_prepare(void) memcpy(trampoline_32bit + TRAMPOLINE_32BIT_CODE_OFFSET / sizeof(unsigned long), &trampoline_32bit_src, TRAMPOLINE_32BIT_CODE_SIZE); + /* + * The code below prepares page table in trampoline memory. + * + * The new page table will be used by trampoline code for switching + * from 4- to 5-level paging or vice versa. + * + * If switching is not required, the page table is unused: trampoline + * code wouldn't touch CR3. + */ + + /* + * We are not going to use the page table in trampoline memory if we + * are already in the desired paging mode. + */ + if (paging_config.l5_required == !!(native_read_cr4() & X86_CR4_LA57)) + goto out; + + if (paging_config.l5_required) { + /* + * For 4- to 5-level paging transition, set up current CR3 as + * the first and the only entry in a new top-level page table. + */ + trampoline_32bit[TRAMPOLINE_32BIT_PGTABLE_OFFSET] = __native_read_cr3() | _PAGE_TABLE_NOENC; + } else { + unsigned long src; + + /* + * For 5- to 4-level paging transition, copy page table pointed + * by first entry in the current top-level page table as our + * new top-level page table. + * + * We cannot just point to the page table from trampoline as it + * may be above 4G. + */ + src = *(unsigned long *)__native_read_cr3() & PAGE_MASK; + memcpy(trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET / sizeof(unsigned long), + (void *)src, PAGE_SIZE); + } + +out: return paging_config; } void cleanup_trampoline(void) { + void *trampoline_pgtable; + + trampoline_pgtable = trampoline_32bit + TRAMPOLINE_32BIT_PGTABLE_OFFSET; + + /* + * Move the top level page table out of trampoline memory, + * if it's there. + */ + if ((void *)__native_read_cr3() == trampoline_pgtable) { + memcpy(top_pgtable, trampoline_pgtable, PAGE_SIZE); + native_write_cr3((unsigned long)top_pgtable); + } + /* Restore trampoline memory */ memcpy(trampoline_32bit, trampoline_save, TRAMPOLINE_32BIT_SIZE); } -- cgit v1.2.1 From c100a583601d357f923c41af5434dc1f8d07890f Mon Sep 17 00:00:00 2001 From: Baoquan He Date: Fri, 2 Mar 2018 13:18:01 +0800 Subject: kdump, vmcoreinfo: Export pgtable_l5_enabled value User-space utilities examining crash-kernels need to know if the crashed kernel was in 5-level paging mode or not. So write 'pgtable_l5_enabled' to vmcoreinfo, which covers these three cases: pgtable_l5_enabled == 0 when: - Compiled with !CONFIG_X86_5LEVEL - Compiled with CONFIG_X86_5LEVEL=y while CPU has no 'la57' flag pgtable_l5_enabled != 0 when: - Compiled with CONFIG_X86_5LEVEL=y and CPU has 'la57' flag Signed-off-by: Baoquan He Acked-by: Kirill A. Shutemov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: douly.fnst@cn.fujitsu.com Cc: dyoung@redhat.com Cc: ebiederm@xmission.com Cc: kirill.shutemov@linux.intel.com Cc: vgoyal@redhat.com Link: http://lkml.kernel.org/r/20180302051801.19594-1-bhe@redhat.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/machine_kexec_64.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 3b7427aa7d85..02f913cb27b5 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -350,6 +350,7 @@ void arch_crash_save_vmcoreinfo(void) { VMCOREINFO_NUMBER(phys_base); VMCOREINFO_SYMBOL(init_top_pgt); + VMCOREINFO_NUMBER(pgtable_l5_enabled); #ifdef CONFIG_NUMA VMCOREINFO_SYMBOL(node_data); -- cgit v1.2.1 From 7e904a91bf6049071ef9d605a52f863ae774081d Mon Sep 17 00:00:00 2001 From: Sai Praneeth Date: Mon, 12 Mar 2018 08:44:56 +0000 Subject: efi: Use efi_mm in x86 as well as ARM Presently, only ARM uses mm_struct to manage EFI page tables and EFI runtime region mappings. As this is the preferred approach, let's make this data structure common across architectures. Specially, for x86, using this data structure improves code maintainability and readability. Tested-by: Bhupesh Sharma [ardb: don't #include the world to get a declaration of struct mm_struct] Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Ard Biesheuvel Reviewed-by: Matt Fleming Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Lee, Chun-Yi Cc: Linus Torvalds Cc: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Ravi Shankar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20180312084500.10764-2-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/efi.h | 1 + arch/x86/platform/efi/efi_64.c | 3 +++ 2 files changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index a399c1ebf6f0..c62443fa7d0a 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -7,6 +7,7 @@ #include #include #include +#include /* * We map the EFI regions needed for runtime services non-contiguously, diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 780460aa5ea5..29425b6c98a7 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -233,6 +233,9 @@ int __init efi_alloc_page_tables(void) return -ENOMEM; } + mm_init_cpumask(&efi_mm); + init_new_context(NULL, &efi_mm); + return 0; } -- cgit v1.2.1 From 36b649760e94968e0495b73284aaf07eed0a328f Mon Sep 17 00:00:00 2001 From: Ard Biesheuvel Date: Mon, 12 Mar 2018 08:45:00 +0000 Subject: efi: Use string literals for efi_char16_t variable initializers Now that we unambiguously build the entire kernel with -fshort-wchar, it is no longer necessary to open code efi_char16_t[] initializers as arrays of characters, and we can move to the L"xxx" notation instead. Signed-off-by: Ard Biesheuvel Cc: Arnd Bergmann Cc: Linus Torvalds Cc: Lukas Wunner Cc: Matt Fleming Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-efi@vger.kernel.org Link: http://lkml.kernel.org/r/20180312084500.10764-6-ard.biesheuvel@linaro.org Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/eboot.c | 3 ++- arch/x86/platform/efi/quirks.c | 8 +++++--- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index f2251c1c9853..47d3efff6805 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -421,9 +421,10 @@ static void retrieve_apple_device_properties(struct boot_params *boot_params) } } +static const efi_char16_t apple[] = L"Apple"; + static void setup_quirks(struct boot_params *boot_params) { - static efi_char16_t const apple[] = { 'A', 'p', 'p', 'l', 'e', 0 }; efi_char16_t *fw_vendor = (efi_char16_t *)(unsigned long) efi_table_attr(efi_system_table, fw_vendor, sys_table); diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c index 1ef11c26f79b..36c1f8b9f7e0 100644 --- a/arch/x86/platform/efi/quirks.c +++ b/arch/x86/platform/efi/quirks.c @@ -75,7 +75,7 @@ struct quark_security_header { u32 rsvd[2]; }; -static efi_char16_t efi_dummy_name[6] = { 'D', 'U', 'M', 'M', 'Y', 0 }; +static const efi_char16_t efi_dummy_name[] = L"DUMMY"; static bool efi_no_storage_paranoia; @@ -105,7 +105,8 @@ early_param("efi_no_storage_paranoia", setup_storage_paranoia); */ void efi_delete_dummy_variable(void) { - efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, + efi.set_variable((efi_char16_t *)efi_dummy_name, + &EFI_DUMMY_GUID, EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, @@ -182,7 +183,8 @@ efi_status_t efi_query_variable_store(u32 attributes, unsigned long size, if (!dummy) return EFI_OUT_OF_RESOURCES; - status = efi.set_variable(efi_dummy_name, &EFI_DUMMY_GUID, + status = efi.set_variable((efi_char16_t *)efi_dummy_name, + &EFI_DUMMY_GUID, EFI_VARIABLE_NON_VOLATILE | EFI_VARIABLE_BOOTSERVICE_ACCESS | EFI_VARIABLE_RUNTIME_ACCESS, -- cgit v1.2.1 From a5b162b2ecb013ed517ab5ce90079117ada743f4 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 5 Mar 2018 11:16:41 +0300 Subject: x86/mm: Do not use paravirtualized calls in native_set_p4d() In 4-level paging mode, native_set_p4d() updates the entry in the top-level page table. With PTI, update to the top-level kernel page table requires update to the userspace copy of the table as well, using pti_set_user_pgd(). native_set_p4d() uses p4d_val() and pgd_val() to convert types between p4d_t and pgd_t. p4d_val() and pgd_val() are paravirtualized and we must not use them in native helpers, as they crash the boot in paravirtualized environments. Replace p4d_val() and pgd_val() with native_p4d_val() and native_pgd_val() in native_set_p4d(). Reported-by: Fengguang Wu Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 91f606a8fa68 ("x86/mm: Replace compile-time checks for 5-level paging with runtime-time checks") Link: http://lkml.kernel.org/r/20180305081641.4290-1-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/pgtable_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 81dda8d1d0bd..163e01a0631d 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -224,9 +224,9 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) return; } - pgd = native_make_pgd(p4d_val(p4d)); + pgd = native_make_pgd(native_p4d_val(p4d)); pgd = pti_set_user_pgd((pgd_t *)p4dp, pgd); - *p4dp = native_make_p4d(pgd_val(pgd)); + *p4dp = native_make_p4d(native_pgd_val(pgd)); } static inline void native_p4d_clear(p4d_t *p4d) -- cgit v1.2.1 From 3ede3417f8d59fff5c07339b310b343468c81b07 Mon Sep 17 00:00:00 2001 From: Sai Praneeth Date: Mon, 12 Mar 2018 09:43:54 +0000 Subject: x86/efi: Replace efi_pgd with efi_mm.pgd Since the previous patch added support for efi_mm, let's handle efi_pgd through efi_mm and remove global variable efi_pgd. Tested-by: Bhupesh Sharma Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Ard Biesheuvel Reviewed-by: Matt Fleming Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Lee, Chun-Yi Cc: Linus Torvalds Cc: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Ravi Shankar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-efi@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/debug_pagetables.c | 6 +++--- arch/x86/platform/efi/efi_64.c | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/debug_pagetables.c b/arch/x86/mm/debug_pagetables.c index 51a6f92da2bf..225fe2f0bfec 100644 --- a/arch/x86/mm/debug_pagetables.c +++ b/arch/x86/mm/debug_pagetables.c @@ -1,4 +1,5 @@ #include +#include #include #include #include @@ -73,13 +74,12 @@ static const struct file_operations ptdump_curusr_fops = { #endif #if defined(CONFIG_EFI) && defined(CONFIG_X86_64) -extern pgd_t *efi_pgd; static struct dentry *pe_efi; static int ptdump_show_efi(struct seq_file *m, void *v) { - if (efi_pgd) - ptdump_walk_pgd_level_debugfs(m, efi_pgd, false); + if (efi_mm.pgd) + ptdump_walk_pgd_level_debugfs(m, efi_mm.pgd, false); return 0; } diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 29425b6c98a7..29b267b8cb63 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -191,8 +191,7 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) early_code_mapping_set_exec(0); } -pgd_t *efi_pgd; -EXPORT_SYMBOL_GPL(efi_pgd); +EXPORT_SYMBOL_GPL(efi_mm); /* * We need our own copy of the higher levels of the page tables @@ -205,7 +204,7 @@ EXPORT_SYMBOL_GPL(efi_pgd); */ int __init efi_alloc_page_tables(void) { - pgd_t *pgd; + pgd_t *pgd, *efi_pgd; p4d_t *p4d; pud_t *pud; gfp_t gfp_mask; @@ -233,6 +232,7 @@ int __init efi_alloc_page_tables(void) return -ENOMEM; } + efi_mm.pgd = efi_pgd; mm_init_cpumask(&efi_mm); init_new_context(NULL, &efi_mm); @@ -248,6 +248,7 @@ void efi_sync_low_kernel_mappings(void) pgd_t *pgd_k, *pgd_efi; p4d_t *p4d_k, *p4d_efi; pud_t *pud_k, *pud_efi; + pgd_t *efi_pgd = efi_mm.pgd; if (efi_enabled(EFI_OLD_MEMMAP)) return; @@ -341,7 +342,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) unsigned long pfn, text, pf; struct page *page; unsigned npages; - pgd_t *pgd; + pgd_t *pgd = efi_mm.pgd; if (efi_enabled(EFI_OLD_MEMMAP)) return 0; @@ -351,8 +352,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) * this value is loaded into cr3 the PGD will be decrypted during * the pagetable walk. */ - efi_scratch.efi_pgt = (pgd_t *)__sme_pa(efi_pgd); - pgd = efi_pgd; + efi_scratch.efi_pgt = (pgd_t *)__sme_pa(pgd); /* * It can happen that the physical address of new_memmap lands in memory @@ -422,7 +422,7 @@ static void __init __map_region(efi_memory_desc_t *md, u64 va) { unsigned long flags = _PAGE_RW; unsigned long pfn; - pgd_t *pgd = efi_pgd; + pgd_t *pgd = efi_mm.pgd; if (!(md->attribute & EFI_MEMORY_WB)) flags |= _PAGE_PCD; @@ -526,7 +526,7 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len) static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf) { unsigned long pfn; - pgd_t *pgd = efi_pgd; + pgd_t *pgd = efi_mm.pgd; int err1, err2; /* Update the 1:1 mapping */ @@ -623,7 +623,7 @@ void __init efi_dump_pagetable(void) if (efi_enabled(EFI_OLD_MEMMAP)) ptdump_walk_pgd_level(NULL, swapper_pg_dir); else - ptdump_walk_pgd_level(NULL, efi_pgd); + ptdump_walk_pgd_level(NULL, efi_mm.pgd); #endif } -- cgit v1.2.1 From 03781e40890c18bdea40092355b61431d0073c1d Mon Sep 17 00:00:00 2001 From: Sai Praneeth Date: Mon, 12 Mar 2018 09:43:55 +0000 Subject: x86/efi: Use efi_switch_mm() rather than manually twiddling with %cr3 Use helper function efi_switch_mm() to switch to/from efi_mm when invoking any UEFI runtime services. Likewise, we need to switch back to previous mm (mm context stolen by efi_mm) after the above calls return successfully. We can use efi_switch_mm() helper function only with x86_64 kernel and "efi=old_map" disabled because, x86_32 and efi=old_map do not use efi_pgd, rather they use swapper_pg_dir. Tested-by: Bhupesh Sharma [ardb: add #include of sched/task.h for task_lock/_unlock] Signed-off-by: Sai Praneeth Prakhya Signed-off-by: Ard Biesheuvel Reviewed-by: Matt Fleming Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Lee, Chun-Yi Cc: Linus Torvalds Cc: Michael S. Tsirkin Cc: Peter Zijlstra Cc: Ravi Shankar Cc: Ricardo Neri Cc: Thomas Gleixner Cc: Tony Luck Cc: linux-efi@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/include/asm/efi.h | 25 +++++++++------------- arch/x86/platform/efi/efi_64.c | 41 +++++++++++++++++++----------------- arch/x86/platform/efi/efi_thunk_64.S | 2 +- 3 files changed, 33 insertions(+), 35 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index c62443fa7d0a..cec5fae23eb3 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -70,14 +70,13 @@ extern asmlinkage u64 efi_call(void *fp, ...); #define efi_call_phys(f, args...) efi_call((f), args) /* - * Scratch space used for switching the pagetable in the EFI stub + * struct efi_scratch - Scratch space used while switching to/from efi_mm + * @phys_stack: stack used during EFI Mixed Mode + * @prev_mm: store/restore stolen mm_struct while switching to/from efi_mm */ struct efi_scratch { - u64 r15; - u64 prev_cr3; - pgd_t *efi_pgt; - bool use_pgd; - u64 phys_stack; + u64 phys_stack; + struct mm_struct *prev_mm; } __packed; #define arch_efi_call_virt_setup() \ @@ -87,11 +86,8 @@ struct efi_scratch { __kernel_fpu_begin(); \ firmware_restrict_branch_speculation_start(); \ \ - if (efi_scratch.use_pgd) { \ - efi_scratch.prev_cr3 = __read_cr3(); \ - write_cr3((unsigned long)efi_scratch.efi_pgt); \ - __flush_tlb_all(); \ - } \ + if (!efi_enabled(EFI_OLD_MEMMAP)) \ + efi_switch_mm(&efi_mm); \ }) #define arch_efi_call_virt(p, f, args...) \ @@ -99,10 +95,8 @@ struct efi_scratch { #define arch_efi_call_virt_teardown() \ ({ \ - if (efi_scratch.use_pgd) { \ - write_cr3(efi_scratch.prev_cr3); \ - __flush_tlb_all(); \ - } \ + if (!efi_enabled(EFI_OLD_MEMMAP)) \ + efi_switch_mm(efi_scratch.prev_mm); \ \ firmware_restrict_branch_speculation_end(); \ __kernel_fpu_end(); \ @@ -145,6 +139,7 @@ extern void __init efi_dump_pagetable(void); extern void __init efi_apply_memmap_quirks(void); extern int __init efi_reuse_config(u64 tables, int nr_tables); extern void efi_delete_dummy_variable(void); +extern void efi_switch_mm(struct mm_struct *mm); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c index 29b267b8cb63..4f8a9c963fbb 100644 --- a/arch/x86/platform/efi/efi_64.c +++ b/arch/x86/platform/efi/efi_64.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include @@ -82,9 +83,8 @@ pgd_t * __init efi_call_phys_prolog(void) int n_pgds, i, j; if (!efi_enabled(EFI_OLD_MEMMAP)) { - save_pgd = (pgd_t *)__read_cr3(); - write_cr3((unsigned long)efi_scratch.efi_pgt); - goto out; + efi_switch_mm(&efi_mm); + return NULL; } early_code_mapping_set_exec(1); @@ -156,8 +156,7 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd) pud_t *pud; if (!efi_enabled(EFI_OLD_MEMMAP)) { - write_cr3((unsigned long)save_pgd); - __flush_tlb_all(); + efi_switch_mm(efi_scratch.prev_mm); return; } @@ -347,13 +346,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) if (efi_enabled(EFI_OLD_MEMMAP)) return 0; - /* - * Since the PGD is encrypted, set the encryption mask so that when - * this value is loaded into cr3 the PGD will be decrypted during - * the pagetable walk. - */ - efi_scratch.efi_pgt = (pgd_t *)__sme_pa(pgd); - /* * It can happen that the physical address of new_memmap lands in memory * which is not mapped in the EFI page table. Therefore we need to go @@ -367,8 +359,6 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) return 1; } - efi_scratch.use_pgd = true; - /* * Certain firmware versions are way too sentimential and still believe * they are exclusive and unquestionable owners of the first physical page, @@ -627,6 +617,22 @@ void __init efi_dump_pagetable(void) #endif } +/* + * Makes the calling thread switch to/from efi_mm context. Can be used + * for SetVirtualAddressMap() i.e. current->active_mm == init_mm as well + * as during efi runtime calls i.e current->active_mm == current_mm. + * We are not mm_dropping()/mm_grabbing() any mm, because we are not + * losing/creating any references. + */ +void efi_switch_mm(struct mm_struct *mm) +{ + task_lock(current); + efi_scratch.prev_mm = current->active_mm; + current->active_mm = mm; + switch_mm(efi_scratch.prev_mm, mm, NULL); + task_unlock(current); +} + #ifdef CONFIG_EFI_MIXED extern efi_status_t efi64_thunk(u32, ...); @@ -680,16 +686,13 @@ efi_status_t efi_thunk_set_virtual_address_map( efi_sync_low_kernel_mappings(); local_irq_save(flags); - efi_scratch.prev_cr3 = __read_cr3(); - write_cr3((unsigned long)efi_scratch.efi_pgt); - __flush_tlb_all(); + efi_switch_mm(&efi_mm); func = (u32)(unsigned long)phys_set_virtual_address_map; status = efi64_thunk(func, memory_map_size, descriptor_size, descriptor_version, virtual_map); - write_cr3(efi_scratch.prev_cr3); - __flush_tlb_all(); + efi_switch_mm(efi_scratch.prev_mm); local_irq_restore(flags); return status; diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S index 189b218da87c..46c58b08739c 100644 --- a/arch/x86/platform/efi/efi_thunk_64.S +++ b/arch/x86/platform/efi/efi_thunk_64.S @@ -33,7 +33,7 @@ ENTRY(efi64_thunk) * Switch to 1:1 mapped 32-bit stack pointer. */ movq %rsp, efi_saved_sp(%rip) - movq efi_scratch+25(%rip), %rsp + movq efi_scratch(%rip), %rsp /* * Calculate the physical address of the kernel text. -- cgit v1.2.1 From 7beebaccd5083d61e975acb581bd528b9326e7c4 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 12 Mar 2018 13:02:43 +0300 Subject: x86/boot/compressed/64: Make sure we have a 32-bit code segment When kernel starts in 64-bit mode we inherit the GDT from the bootloader. It may cause a problem if the GDT doesn't have a 32-bit code segment where we expect it to be. Load our own GDT with known segments. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180312100246.89175-2-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index c813cb004056..f0c3a2f7e528 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -313,6 +313,11 @@ ENTRY(startup_64) * first. */ + /* Make sure we have GDT with 32-bit code segment */ + leaq gdt(%rip), %rax + movq %rax, gdt64+2(%rip) + lgdt gdt64(%rip) + /* * paging_prepare() sets up the trampoline and checks if we need to * enable 5-level paging. @@ -547,6 +552,11 @@ no_longmode: #include "../../kernel/verify_cpu.S" .data +gdt64: + .word gdt_end - gdt + .long 0 + .word 0 + .quad 0 gdt: .word gdt_end - gdt .long gdt -- cgit v1.2.1 From f7ff53e4702be9ac49deacd518dd243de45c9980 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 12 Mar 2018 13:02:44 +0300 Subject: x86/boot/compressed/64: Use stack from trampoline memory As the first step on using trampoline memory, let's make 32-bit code use stack there. Separate stack is required to return back from trampoline and we cannot user stack from 64-bit mode as it may be above 4G. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180312100246.89175-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index f0c3a2f7e528..12915511be61 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -33,6 +33,7 @@ #include #include #include +#include "pgtable.h" /* * Locally defined symbols should be marked hidden: @@ -359,6 +360,8 @@ ENTRY(startup_64) pushq %rax lretq lvl5: + /* Restore the stack, the 32-bit trampoline uses its own stack */ + leaq boot_stack_end(%rbx), %rsp /* * cleanup_trampoline() would restore trampoline memory. @@ -513,6 +516,9 @@ compatible_mode: movl %eax, %ds movl %eax, %ss + /* Set up new stack */ + leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp + /* Disable paging */ movl %cr0, %eax btrl $X86_CR0_PG_BIT, %eax -- cgit v1.2.1 From 0a1756bd2897951c03c1cb671bdfd40729ac2177 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 12 Mar 2018 13:02:45 +0300 Subject: x86/boot/compressed/64: Use page table in trampoline memory If a bootloader enables 64-bit mode with 4-level paging, we might need to switch over to 5-level paging. The switching requires the disabling paging. It works fine if kernel itself is loaded below 4G. But if the bootloader put the kernel above 4G (i.e. in kexec() case), we would lose control as soon as paging is disabled, because the code becomes unreachable to the CPU. To handle the situation, we need a trampoline in lower memory that would take care of switching on 5-level paging. Apart from the trampoline code itself we also need a place to store top-level page table in lower memory as we don't have a way to load 64-bit values into CR3 in 32-bit mode. We only really need 8 bytes there as we only use the very first entry of the page table. But we allocate a whole page anyway. This patch switches 32-bit code to use page table in trampoline memory. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180312100246.89175-4-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 47 +++++++++++++++++++------------------- 1 file changed, 23 insertions(+), 24 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 12915511be61..959ca07a58d9 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -336,23 +336,6 @@ ENTRY(startup_64) /* Save the trampoline address in RCX */ movq %rax, %rcx - /* Check if we need to enable 5-level paging */ - cmpq $0, %rdx - jz lvl5 - - /* Clear additional page table */ - leaq lvl5_pgtable(%rbx), %rdi - xorq %rax, %rax - movq $(PAGE_SIZE/8), %rcx - rep stosq - - /* - * Setup current CR3 as the first and only entry in a new top level - * page table. - */ - movq %cr3, %rdi - leaq 0x7 (%rdi), %rax - movq %rax, lvl5_pgtable(%rbx) /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ pushq $__KERNEL32_CS @@ -524,13 +507,31 @@ compatible_mode: btrl $X86_CR0_PG_BIT, %eax movl %eax, %cr0 - /* Point CR3 to 5-level paging */ - leal lvl5_pgtable(%ebx), %eax - movl %eax, %cr3 + /* Check what paging mode we want to be in after the trampoline */ + cmpl $0, %edx + jz 1f - /* Enable PAE and LA57 mode */ + /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */ movl %cr4, %eax - orl $(X86_CR4_PAE | X86_CR4_LA57), %eax + testl $X86_CR4_LA57, %eax + jnz 3f + jmp 2f +1: + /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */ + movl %cr4, %eax + testl $X86_CR4_LA57, %eax + jz 3f +2: + /* Point CR3 to the trampoline's new top level page table */ + leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax + movl %eax, %cr3 +3: + /* Enable PAE and LA57 (if required) paging modes */ + movl $X86_CR4_PAE, %eax + cmpl $0, %edx + jz 1f + orl $X86_CR4_LA57, %eax +1: movl %eax, %cr4 /* Calculate address we are running at */ @@ -611,5 +612,3 @@ boot_stack_end: .balign 4096 pgtable: .fill BOOT_PGT_SIZE, 1, 0 -lvl5_pgtable: - .fill PAGE_SIZE, 1, 0 -- cgit v1.2.1 From 194a9749c73d650c0b1dfdee04fb0bdf0a888ba8 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 12 Mar 2018 13:02:46 +0300 Subject: x86/boot/compressed/64: Handle 5-level paging boot if kernel is above 4G This patch addresses a shortcoming in current boot process on machines that supports 5-level paging. If a bootloader enables 64-bit mode with 4-level paging, we might need to switch over to 5-level paging. The switching requires the disabling paging. It works fine if kernel itself is loaded below 4G. But if the bootloader put the kernel above 4G (not sure if anybody does this), we would lose control as soon as paging is disabled, because the code becomes unreachable to the CPU. This patch implements a trampoline in lower memory to handle this situation. We only need the memory for a very short time, until the main kernel image sets up own page tables. We go through the trampoline even if we don't have to: if we're already in 5-level paging mode or if we don't need to switch to it. This way the trampoline gets tested on every boot. Signed-off-by: Kirill A. Shutemov Cc: Andy Lutomirski Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Borislav Petkov Cc: Brian Gerst Cc: Cyrill Gorcunov Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthew Wilcox Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180312100246.89175-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/boot/compressed/head_64.S | 69 +++++++++++++++++++++++++++++--------- 1 file changed, 53 insertions(+), 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 959ca07a58d9..fca012baba19 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -307,11 +307,27 @@ ENTRY(startup_64) /* * At this point we are in long mode with 4-level paging enabled, - * but we want to enable 5-level paging. + * but we might want to enable 5-level paging or vice versa. * - * The problem is that we cannot do it directly. Setting LA57 in - * long mode would trigger #GP. So we need to switch off long mode - * first. + * The problem is that we cannot do it directly. Setting or clearing + * CR4.LA57 in long mode would trigger #GP. So we need to switch off + * long mode and paging first. + * + * We also need a trampoline in lower memory to switch over from + * 4- to 5-level paging for cases when the bootloader puts the kernel + * above 4G, but didn't enable 5-level paging for us. + * + * The same trampoline can be used to switch from 5- to 4-level paging + * mode, like when starting 4-level paging kernel via kexec() when + * original kernel worked in 5-level paging mode. + * + * For the trampoline, we need the top page table to reside in lower + * memory as we don't have a way to load 64-bit values into CR3 in + * 32-bit mode. + * + * We go though the trampoline even if we don't have to: if we're + * already in a desired paging mode. This way the trampoline code gets + * tested on every boot. */ /* Make sure we have GDT with 32-bit code segment */ @@ -336,13 +352,18 @@ ENTRY(startup_64) /* Save the trampoline address in RCX */ movq %rax, %rcx + /* + * Load the address of trampoline_return() into RDI. + * It will be used by the trampoline to return to the main code. + */ + leaq trampoline_return(%rip), %rdi /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */ pushq $__KERNEL32_CS - leaq compatible_mode(%rip), %rax + leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax pushq %rax lretq -lvl5: +trampoline_return: /* Restore the stack, the 32-bit trampoline uses its own stack */ leaq boot_stack_end(%rbx), %rsp @@ -492,8 +513,14 @@ relocated: jmp *%rax .code32 +/* + * This is the 32-bit trampoline that will be copied over to low memory. + * + * RDI contains the return address (might be above 4G). + * ECX contains the base address of the trampoline memory. + * Non zero RDX on return means we need to enable 5-level paging. + */ ENTRY(trampoline_32bit_src) -compatible_mode: /* Set up data and stack segments */ movl $__KERNEL_DS, %eax movl %eax, %ds @@ -534,24 +561,34 @@ compatible_mode: 1: movl %eax, %cr4 - /* Calculate address we are running at */ - call 1f -1: popl %edi - subl $1b, %edi + /* Calculate address of paging_enabled() once we are executing in the trampoline */ + leal paging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax - /* Prepare stack for far return to Long Mode */ + /* Prepare the stack for far return to Long Mode */ pushl $__KERNEL_CS - leal lvl5(%edi), %eax - push %eax + pushl %eax - /* Enable paging back */ + /* Enable paging again */ movl $(X86_CR0_PG | X86_CR0_PE), %eax movl %eax, %cr0 lret + .code64 +paging_enabled: + /* Return from the trampoline */ + jmp *%rdi + + /* + * The trampoline code has a size limit. + * Make sure we fail to compile if the trampoline code grows + * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes. + */ + .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE + + .code32 no_longmode: - /* This isn't an x86-64 CPU so hang */ + /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */ 1: hlt jmp 1b -- cgit v1.2.1 From cb06d8e3d020c30fe10ae711c925a5319ab82c88 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 5 Mar 2018 19:25:50 +0300 Subject: x86/tme: Detect if TME and MKTME is activated by BIOS IA32_TME_ACTIVATE MSR (0x982) can be used to check if BIOS has enabled TME and MKTME. It includes which encryption policy/algorithm is selected for TME or available for MKTME. For MKTME, the MSR also enumerates how many KeyIDs are available. We would need to exclude KeyID bits from physical address bits. detect_tme() would adjust cpuinfo_x86::x86_phys_bits accordingly. We have to do this even if we are not going to use KeyID bits ourself. VM guests still have to know that these bits are not usable for physical address. Signed-off-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Kai Huang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tom Lendacky Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180305162610.37510-3-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 90 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 4aa9fd379390..b862067bb33c 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -510,6 +510,93 @@ static void detect_vmx_virtcap(struct cpuinfo_x86 *c) } } +#define MSR_IA32_TME_ACTIVATE 0x982 + +/* Helpers to access TME_ACTIVATE MSR */ +#define TME_ACTIVATE_LOCKED(x) (x & 0x1) +#define TME_ACTIVATE_ENABLED(x) (x & 0x2) + +#define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */ +#define TME_ACTIVATE_POLICY_AES_XTS_128 0 + +#define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */ + +#define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */ +#define TME_ACTIVATE_CRYPTO_AES_XTS_128 1 + +/* Values for mktme_status (SW only construct) */ +#define MKTME_ENABLED 0 +#define MKTME_DISABLED 1 +#define MKTME_UNINITIALIZED 2 +static int mktme_status = MKTME_UNINITIALIZED; + +static void detect_tme(struct cpuinfo_x86 *c) +{ + u64 tme_activate, tme_policy, tme_crypto_algs; + int keyid_bits = 0, nr_keyids = 0; + static u64 tme_activate_cpu0 = 0; + + rdmsrl(MSR_IA32_TME_ACTIVATE, tme_activate); + + if (mktme_status != MKTME_UNINITIALIZED) { + if (tme_activate != tme_activate_cpu0) { + /* Broken BIOS? */ + pr_err_once("x86/tme: configuation is inconsistent between CPUs\n"); + pr_err_once("x86/tme: MKTME is not usable\n"); + mktme_status = MKTME_DISABLED; + + /* Proceed. We may need to exclude bits from x86_phys_bits. */ + } + } else { + tme_activate_cpu0 = tme_activate; + } + + if (!TME_ACTIVATE_LOCKED(tme_activate) || !TME_ACTIVATE_ENABLED(tme_activate)) { + pr_info_once("x86/tme: not enabled by BIOS\n"); + mktme_status = MKTME_DISABLED; + return; + } + + if (mktme_status != MKTME_UNINITIALIZED) + goto detect_keyid_bits; + + pr_info("x86/tme: enabled by BIOS\n"); + + tme_policy = TME_ACTIVATE_POLICY(tme_activate); + if (tme_policy != TME_ACTIVATE_POLICY_AES_XTS_128) + pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); + + tme_crypto_algs = TME_ACTIVATE_CRYPTO_ALGS(tme_activate); + if (!(tme_crypto_algs & TME_ACTIVATE_CRYPTO_AES_XTS_128)) { + pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", + tme_crypto_algs); + mktme_status = MKTME_DISABLED; + } +detect_keyid_bits: + keyid_bits = TME_ACTIVATE_KEYID_BITS(tme_activate); + nr_keyids = (1UL << keyid_bits) - 1; + if (nr_keyids) { + pr_info_once("x86/mktme: enabled by BIOS\n"); + pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); + } else { + pr_info_once("x86/mktme: disabled by BIOS\n"); + } + + if (mktme_status == MKTME_UNINITIALIZED) { + /* MKTME is usable */ + mktme_status = MKTME_ENABLED; + } + + /* + * Exclude KeyID bits from physical address bits. + * + * We have to do this even if we are not going to use KeyID bits + * ourself. VM guests still have to know that these bits are not usable + * for physical address. + */ + c->x86_phys_bits -= keyid_bits; +} + static void init_intel_energy_perf(struct cpuinfo_x86 *c) { u64 epb; @@ -680,6 +767,9 @@ static void init_intel(struct cpuinfo_x86 *c) if (cpu_has(c, X86_FEATURE_VMX)) detect_vmx_virtcap(c); + if (cpu_has(c, X86_FEATURE_TME)) + detect_tme(c); + init_intel_energy_perf(c); init_intel_misc_features(c); -- cgit v1.2.1 From be7825c19b4866ddc7b1431740b69ede2eeb93c1 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 5 Mar 2018 19:25:52 +0300 Subject: x86/pconfig: Detect PCONFIG targets Intel PCONFIG targets are enumerated via new CPUID leaf 0x1b. This patch detects all supported targets of PCONFIG and implements helper to check if the target is supported. Signed-off-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Kai Huang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tom Lendacky Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180305162610.37510-5-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel_pconfig.h | 15 +++++++ arch/x86/kernel/cpu/Makefile | 2 +- arch/x86/kernel/cpu/intel_pconfig.c | 82 ++++++++++++++++++++++++++++++++++++ 3 files changed, 98 insertions(+), 1 deletion(-) create mode 100644 arch/x86/include/asm/intel_pconfig.h create mode 100644 arch/x86/kernel/cpu/intel_pconfig.c (limited to 'arch/x86') diff --git a/arch/x86/include/asm/intel_pconfig.h b/arch/x86/include/asm/intel_pconfig.h new file mode 100644 index 000000000000..fb7a37c3798b --- /dev/null +++ b/arch/x86/include/asm/intel_pconfig.h @@ -0,0 +1,15 @@ +#ifndef _ASM_X86_INTEL_PCONFIG_H +#define _ASM_X86_INTEL_PCONFIG_H + +#include +#include + +enum pconfig_target { + INVALID_TARGET = 0, + MKTME_TARGET = 1, + PCONFIG_TARGET_NR +}; + +int pconfig_target_supported(enum pconfig_target target); + +#endif /* _ASM_X86_INTEL_PCONFIG_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 570e8bb1f386..a66229f51b12 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -28,7 +28,7 @@ obj-y += cpuid-deps.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_X86_FEATURE_NAMES) += capflags.o powerflags.o -obj-$(CONFIG_CPU_SUP_INTEL) += intel.o +obj-$(CONFIG_CPU_SUP_INTEL) += intel.o intel_pconfig.o obj-$(CONFIG_CPU_SUP_AMD) += amd.o obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o obj-$(CONFIG_CPU_SUP_CENTAUR) += centaur.o diff --git a/arch/x86/kernel/cpu/intel_pconfig.c b/arch/x86/kernel/cpu/intel_pconfig.c new file mode 100644 index 000000000000..0771a905b286 --- /dev/null +++ b/arch/x86/kernel/cpu/intel_pconfig.c @@ -0,0 +1,82 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel PCONFIG instruction support. + * + * Copyright (C) 2017 Intel Corporation + * + * Author: + * Kirill A. Shutemov + */ + +#include +#include + +#define PCONFIG_CPUID 0x1b + +#define PCONFIG_CPUID_SUBLEAF_MASK ((1 << 12) - 1) + +/* Subleaf type (EAX) for PCONFIG CPUID leaf (0x1B) */ +enum { + PCONFIG_CPUID_SUBLEAF_INVALID = 0, + PCONFIG_CPUID_SUBLEAF_TARGETID = 1, +}; + +/* Bitmask of supported targets */ +static u64 targets_supported __read_mostly; + +int pconfig_target_supported(enum pconfig_target target) +{ + /* + * We would need to re-think the implementation once we get > 64 + * PCONFIG targets. Spec allows up to 2^32 targets. + */ + BUILD_BUG_ON(PCONFIG_TARGET_NR >= 64); + + if (WARN_ON_ONCE(target >= 64)) + return 0; + return targets_supported & (1ULL << target); +} + +static int __init intel_pconfig_init(void) +{ + int subleaf; + + if (!boot_cpu_has(X86_FEATURE_PCONFIG)) + return 0; + + /* + * Scan subleafs of PCONFIG CPUID leaf. + * + * Subleafs of the same type need not to be consecutive. + * + * Stop on the first invalid subleaf type. All subleafs after the first + * invalid are invalid too. + */ + for (subleaf = 0; subleaf < INT_MAX; subleaf++) { + struct cpuid_regs regs; + + cpuid_count(PCONFIG_CPUID, subleaf, + ®s.eax, ®s.ebx, ®s.ecx, ®s.edx); + + switch (regs.eax & PCONFIG_CPUID_SUBLEAF_MASK) { + case PCONFIG_CPUID_SUBLEAF_INVALID: + /* Stop on the first invalid subleaf */ + goto out; + case PCONFIG_CPUID_SUBLEAF_TARGETID: + /* Mark supported PCONFIG targets */ + if (regs.ebx < 64) + targets_supported |= (1ULL << regs.ebx); + if (regs.ecx < 64) + targets_supported |= (1ULL << regs.ecx); + if (regs.edx < 64) + targets_supported |= (1ULL << regs.edx); + break; + default: + /* Unknown CPUID.PCONFIG subleaf: ignore */ + break; + } + } +out: + return 0; +} +arch_initcall(intel_pconfig_init); -- cgit v1.2.1 From 24c517856af6511be1339dd55edd131160e37aac Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Mon, 5 Mar 2018 19:25:53 +0300 Subject: x86/pconfig: Provide defines and helper to run MKTME_KEY_PROG leaf MKTME_KEY_PROG allows to manipulate MKTME keys in the CPU. Signed-off-by: Kirill A. Shutemov Cc: Dave Hansen Cc: Kai Huang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Tom Lendacky Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/20180305162610.37510-6-kirill.shutemov@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/intel_pconfig.h | 50 ++++++++++++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/intel_pconfig.h b/arch/x86/include/asm/intel_pconfig.h index fb7a37c3798b..3cb002b1d0f9 100644 --- a/arch/x86/include/asm/intel_pconfig.h +++ b/arch/x86/include/asm/intel_pconfig.h @@ -12,4 +12,54 @@ enum pconfig_target { int pconfig_target_supported(enum pconfig_target target); +enum pconfig_leaf { + MKTME_KEY_PROGRAM = 0, + PCONFIG_LEAF_INVALID, +}; + +#define PCONFIG ".byte 0x0f, 0x01, 0xc5" + +/* Defines and structure for MKTME_KEY_PROGRAM of PCONFIG instruction */ + +/* mktme_key_program::keyid_ctrl COMMAND, bits [7:0] */ +#define MKTME_KEYID_SET_KEY_DIRECT 0 +#define MKTME_KEYID_SET_KEY_RANDOM 1 +#define MKTME_KEYID_CLEAR_KEY 2 +#define MKTME_KEYID_NO_ENCRYPT 3 + +/* mktme_key_program::keyid_ctrl ENC_ALG, bits [23:8] */ +#define MKTME_AES_XTS_128 (1 << 8) + +/* Return codes from the PCONFIG MKTME_KEY_PROGRAM */ +#define MKTME_PROG_SUCCESS 0 +#define MKTME_INVALID_PROG_CMD 1 +#define MKTME_ENTROPY_ERROR 2 +#define MKTME_INVALID_KEYID 3 +#define MKTME_INVALID_ENC_ALG 4 +#define MKTME_DEVICE_BUSY 5 + +/* Hardware requires the structure to be 256 byte alinged. Otherwise #GP(0). */ +struct mktme_key_program { + u16 keyid; + u32 keyid_ctrl; + u8 __rsvd[58]; + u8 key_field_1[64]; + u8 key_field_2[64]; +} __packed __aligned(256); + +static inline int mktme_key_program(struct mktme_key_program *key_program) +{ + unsigned long rax = MKTME_KEY_PROGRAM; + + if (!pconfig_target_supported(MKTME_TARGET)) + return -ENXIO; + + asm volatile(PCONFIG + : "=a" (rax), "=b" (key_program) + : "0" (rax), "1" (key_program) + : "memory", "cc"); + + return rax; +} + #endif /* _ASM_X86_INTEL_PCONFIG_H */ -- cgit v1.2.1 From 8bf705d130396e69c04cd8e6e010244ad2ce71f4 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Mon, 29 Jan 2018 18:26:05 +0100 Subject: locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h Add arch_ prefix to all atomic operations and include . This will allow to add KASAN instrumentation to all atomic ops. Signed-off-by: Dmitry Vyukov Cc: Andrew Morton Cc: Andrey Ryabinin Cc: Linus Torvalds Cc: Mark Rutland Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: kasan-dev@googlegroups.com Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/54f0eb64260b84199e538652e079a89b5423ad41.1517246437.git.dvyukov@google.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 102 ++++++++++++++++++----------------- arch/x86/include/asm/atomic64_32.h | 106 ++++++++++++++++++------------------ arch/x86/include/asm/atomic64_64.h | 108 ++++++++++++++++++------------------- arch/x86/include/asm/cmpxchg.h | 12 ++--- arch/x86/include/asm/cmpxchg_32.h | 8 +-- arch/x86/include/asm/cmpxchg_64.h | 4 +- 6 files changed, 172 insertions(+), 168 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 72759f131cc5..33afc966d6a9 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -17,36 +17,36 @@ #define ATOMIC_INIT(i) { (i) } /** - * atomic_read - read atomic variable + * arch_atomic_read - read atomic variable * @v: pointer of type atomic_t * * Atomically reads the value of @v. */ -static __always_inline int atomic_read(const atomic_t *v) +static __always_inline int arch_atomic_read(const atomic_t *v) { return READ_ONCE((v)->counter); } /** - * atomic_set - set atomic variable + * arch_atomic_set - set atomic variable * @v: pointer of type atomic_t * @i: required value * * Atomically sets the value of @v to @i. */ -static __always_inline void atomic_set(atomic_t *v, int i) +static __always_inline void arch_atomic_set(atomic_t *v, int i) { WRITE_ONCE(v->counter, i); } /** - * atomic_add - add integer to atomic variable + * arch_atomic_add - add integer to atomic variable * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v. */ -static __always_inline void atomic_add(int i, atomic_t *v) +static __always_inline void arch_atomic_add(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "addl %1,%0" : "+m" (v->counter) @@ -54,13 +54,13 @@ static __always_inline void atomic_add(int i, atomic_t *v) } /** - * atomic_sub - subtract integer from atomic variable + * arch_atomic_sub - subtract integer from atomic variable * @i: integer value to subtract * @v: pointer of type atomic_t * * Atomically subtracts @i from @v. */ -static __always_inline void atomic_sub(int i, atomic_t *v) +static __always_inline void arch_atomic_sub(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "subl %1,%0" : "+m" (v->counter) @@ -68,7 +68,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v) } /** - * atomic_sub_and_test - subtract value from variable and test result + * arch_atomic_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer of type atomic_t * @@ -76,63 +76,63 @@ static __always_inline void atomic_sub(int i, atomic_t *v) * true if the result is zero, or false for all * other cases. */ -static __always_inline bool atomic_sub_and_test(int i, atomic_t *v) +static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e); } /** - * atomic_inc - increment atomic variable + * arch_atomic_inc - increment atomic variable * @v: pointer of type atomic_t * * Atomically increments @v by 1. */ -static __always_inline void atomic_inc(atomic_t *v) +static __always_inline void arch_atomic_inc(atomic_t *v) { asm volatile(LOCK_PREFIX "incl %0" : "+m" (v->counter)); } /** - * atomic_dec - decrement atomic variable + * arch_atomic_dec - decrement atomic variable * @v: pointer of type atomic_t * * Atomically decrements @v by 1. */ -static __always_inline void atomic_dec(atomic_t *v) +static __always_inline void arch_atomic_dec(atomic_t *v) { asm volatile(LOCK_PREFIX "decl %0" : "+m" (v->counter)); } /** - * atomic_dec_and_test - decrement and test + * arch_atomic_dec_and_test - decrement and test * @v: pointer of type atomic_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ -static __always_inline bool atomic_dec_and_test(atomic_t *v) +static __always_inline bool arch_atomic_dec_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e); } /** - * atomic_inc_and_test - increment and test + * arch_atomic_inc_and_test - increment and test * @v: pointer of type atomic_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ -static __always_inline bool atomic_inc_and_test(atomic_t *v) +static __always_inline bool arch_atomic_inc_and_test(atomic_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e); } /** - * atomic_add_negative - add and test if negative + * arch_atomic_add_negative - add and test if negative * @i: integer value to add * @v: pointer of type atomic_t * @@ -140,65 +140,65 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static __always_inline bool atomic_add_negative(int i, atomic_t *v) +static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s); } /** - * atomic_add_return - add integer and return + * arch_atomic_add_return - add integer and return * @i: integer value to add * @v: pointer of type atomic_t * * Atomically adds @i to @v and returns @i + @v */ -static __always_inline int atomic_add_return(int i, atomic_t *v) +static __always_inline int arch_atomic_add_return(int i, atomic_t *v) { return i + xadd(&v->counter, i); } /** - * atomic_sub_return - subtract integer and return + * arch_atomic_sub_return - subtract integer and return * @v: pointer of type atomic_t * @i: integer value to subtract * * Atomically subtracts @i from @v and returns @v - @i */ -static __always_inline int atomic_sub_return(int i, atomic_t *v) +static __always_inline int arch_atomic_sub_return(int i, atomic_t *v) { - return atomic_add_return(-i, v); + return arch_atomic_add_return(-i, v); } -#define atomic_inc_return(v) (atomic_add_return(1, v)) -#define atomic_dec_return(v) (atomic_sub_return(1, v)) +#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v)) +#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v)) -static __always_inline int atomic_fetch_add(int i, atomic_t *v) +static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v) { return xadd(&v->counter, i); } -static __always_inline int atomic_fetch_sub(int i, atomic_t *v) +static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v) { return xadd(&v->counter, -i); } -static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) +static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new) { - return cmpxchg(&v->counter, old, new); + return arch_cmpxchg(&v->counter, old, new); } -#define atomic_try_cmpxchg atomic_try_cmpxchg -static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new) +#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg +static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new) { return try_cmpxchg(&v->counter, old, new); } -static inline int atomic_xchg(atomic_t *v, int new) +static inline int arch_atomic_xchg(atomic_t *v, int new) { return xchg(&v->counter, new); } -static inline void atomic_and(int i, atomic_t *v) +static inline void arch_atomic_and(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "andl %1,%0" : "+m" (v->counter) @@ -206,16 +206,16 @@ static inline void atomic_and(int i, atomic_t *v) : "memory"); } -static inline int atomic_fetch_and(int i, atomic_t *v) +static inline int arch_atomic_fetch_and(int i, atomic_t *v) { - int val = atomic_read(v); + int val = arch_atomic_read(v); - do { } while (!atomic_try_cmpxchg(v, &val, val & i)); + do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i)); return val; } -static inline void atomic_or(int i, atomic_t *v) +static inline void arch_atomic_or(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "orl %1,%0" : "+m" (v->counter) @@ -223,16 +223,16 @@ static inline void atomic_or(int i, atomic_t *v) : "memory"); } -static inline int atomic_fetch_or(int i, atomic_t *v) +static inline int arch_atomic_fetch_or(int i, atomic_t *v) { - int val = atomic_read(v); + int val = arch_atomic_read(v); - do { } while (!atomic_try_cmpxchg(v, &val, val | i)); + do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i)); return val; } -static inline void atomic_xor(int i, atomic_t *v) +static inline void arch_atomic_xor(int i, atomic_t *v) { asm volatile(LOCK_PREFIX "xorl %1,%0" : "+m" (v->counter) @@ -240,17 +240,17 @@ static inline void atomic_xor(int i, atomic_t *v) : "memory"); } -static inline int atomic_fetch_xor(int i, atomic_t *v) +static inline int arch_atomic_fetch_xor(int i, atomic_t *v) { - int val = atomic_read(v); + int val = arch_atomic_read(v); - do { } while (!atomic_try_cmpxchg(v, &val, val ^ i)); + do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i)); return val; } /** - * __atomic_add_unless - add unless the number is already a given value + * __arch_atomic_add_unless - add unless the number is already a given value * @v: pointer of type atomic_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -258,14 +258,14 @@ static inline int atomic_fetch_xor(int i, atomic_t *v) * Atomically adds @a to @v, so long as @v was not already @u. * Returns the old value of @v. */ -static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) +static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u) { - int c = atomic_read(v); + int c = arch_atomic_read(v); do { if (unlikely(c == u)) break; - } while (!atomic_try_cmpxchg(v, &c, c + a)); + } while (!arch_atomic_try_cmpxchg(v, &c, c + a)); return c; } @@ -276,4 +276,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u) # include #endif +#include + #endif /* _ASM_X86_ATOMIC_H */ diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 97c46b8169b7..46e1ef17d92d 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -62,7 +62,7 @@ ATOMIC64_DECL(add_unless); #undef ATOMIC64_EXPORT /** - * atomic64_cmpxchg - cmpxchg atomic64 variable + * arch_atomic64_cmpxchg - cmpxchg atomic64 variable * @v: pointer to type atomic64_t * @o: expected value * @n: new value @@ -71,20 +71,21 @@ ATOMIC64_DECL(add_unless); * the old value. */ -static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n) +static inline long long arch_atomic64_cmpxchg(atomic64_t *v, long long o, + long long n) { - return cmpxchg64(&v->counter, o, n); + return arch_cmpxchg64(&v->counter, o, n); } /** - * atomic64_xchg - xchg atomic64 variable + * arch_atomic64_xchg - xchg atomic64 variable * @v: pointer to type atomic64_t * @n: value to assign * * Atomically xchgs the value of @v to @n and returns * the old value. */ -static inline long long atomic64_xchg(atomic64_t *v, long long n) +static inline long long arch_atomic64_xchg(atomic64_t *v, long long n) { long long o; unsigned high = (unsigned)(n >> 32); @@ -96,13 +97,13 @@ static inline long long atomic64_xchg(atomic64_t *v, long long n) } /** - * atomic64_set - set atomic64 variable + * arch_atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: value to assign * * Atomically sets the value of @v to @n. */ -static inline void atomic64_set(atomic64_t *v, long long i) +static inline void arch_atomic64_set(atomic64_t *v, long long i) { unsigned high = (unsigned)(i >> 32); unsigned low = (unsigned)i; @@ -112,12 +113,12 @@ static inline void atomic64_set(atomic64_t *v, long long i) } /** - * atomic64_read - read atomic64 variable + * arch_atomic64_read - read atomic64 variable * @v: pointer to type atomic64_t * * Atomically reads the value of @v and returns it. */ -static inline long long atomic64_read(const atomic64_t *v) +static inline long long arch_atomic64_read(const atomic64_t *v) { long long r; alternative_atomic64(read, "=&A" (r), "c" (v) : "memory"); @@ -125,13 +126,13 @@ static inline long long atomic64_read(const atomic64_t *v) } /** - * atomic64_add_return - add and return + * arch_atomic64_add_return - add and return * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v and returns @i + *@v */ -static inline long long atomic64_add_return(long long i, atomic64_t *v) +static inline long long arch_atomic64_add_return(long long i, atomic64_t *v) { alternative_atomic64(add_return, ASM_OUTPUT2("+A" (i), "+c" (v)), @@ -142,7 +143,7 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v) /* * Other variants with different arithmetic operators: */ -static inline long long atomic64_sub_return(long long i, atomic64_t *v) +static inline long long arch_atomic64_sub_return(long long i, atomic64_t *v) { alternative_atomic64(sub_return, ASM_OUTPUT2("+A" (i), "+c" (v)), @@ -150,7 +151,7 @@ static inline long long atomic64_sub_return(long long i, atomic64_t *v) return i; } -static inline long long atomic64_inc_return(atomic64_t *v) +static inline long long arch_atomic64_inc_return(atomic64_t *v) { long long a; alternative_atomic64(inc_return, "=&A" (a), @@ -158,7 +159,7 @@ static inline long long atomic64_inc_return(atomic64_t *v) return a; } -static inline long long atomic64_dec_return(atomic64_t *v) +static inline long long arch_atomic64_dec_return(atomic64_t *v) { long long a; alternative_atomic64(dec_return, "=&A" (a), @@ -167,13 +168,13 @@ static inline long long atomic64_dec_return(atomic64_t *v) } /** - * atomic64_add - add integer to atomic64 variable + * arch_atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v. */ -static inline long long atomic64_add(long long i, atomic64_t *v) +static inline long long arch_atomic64_add(long long i, atomic64_t *v) { __alternative_atomic64(add, add_return, ASM_OUTPUT2("+A" (i), "+c" (v)), @@ -182,13 +183,13 @@ static inline long long atomic64_add(long long i, atomic64_t *v) } /** - * atomic64_sub - subtract the atomic64 variable + * arch_atomic64_sub - subtract the atomic64 variable * @i: integer value to subtract * @v: pointer to type atomic64_t * * Atomically subtracts @i from @v. */ -static inline long long atomic64_sub(long long i, atomic64_t *v) +static inline long long arch_atomic64_sub(long long i, atomic64_t *v) { __alternative_atomic64(sub, sub_return, ASM_OUTPUT2("+A" (i), "+c" (v)), @@ -197,7 +198,7 @@ static inline long long atomic64_sub(long long i, atomic64_t *v) } /** - * atomic64_sub_and_test - subtract value from variable and test result + * arch_atomic64_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer to type atomic64_t * @@ -205,46 +206,46 @@ static inline long long atomic64_sub(long long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -static inline int atomic64_sub_and_test(long long i, atomic64_t *v) +static inline int arch_atomic64_sub_and_test(long long i, atomic64_t *v) { - return atomic64_sub_return(i, v) == 0; + return arch_atomic64_sub_return(i, v) == 0; } /** - * atomic64_inc - increment atomic64 variable + * arch_atomic64_inc - increment atomic64 variable * @v: pointer to type atomic64_t * * Atomically increments @v by 1. */ -static inline void atomic64_inc(atomic64_t *v) +static inline void arch_atomic64_inc(atomic64_t *v) { __alternative_atomic64(inc, inc_return, /* no output */, "S" (v) : "memory", "eax", "ecx", "edx"); } /** - * atomic64_dec - decrement atomic64 variable + * arch_atomic64_dec - decrement atomic64 variable * @v: pointer to type atomic64_t * * Atomically decrements @v by 1. */ -static inline void atomic64_dec(atomic64_t *v) +static inline void arch_atomic64_dec(atomic64_t *v) { __alternative_atomic64(dec, dec_return, /* no output */, "S" (v) : "memory", "eax", "ecx", "edx"); } /** - * atomic64_dec_and_test - decrement and test + * arch_atomic64_dec_and_test - decrement and test * @v: pointer to type atomic64_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ -static inline int atomic64_dec_and_test(atomic64_t *v) +static inline int arch_atomic64_dec_and_test(atomic64_t *v) { - return atomic64_dec_return(v) == 0; + return arch_atomic64_dec_return(v) == 0; } /** @@ -255,13 +256,13 @@ static inline int atomic64_dec_and_test(atomic64_t *v) * and returns true if the result is zero, or false for all * other cases. */ -static inline int atomic64_inc_and_test(atomic64_t *v) +static inline int arch_atomic64_inc_and_test(atomic64_t *v) { - return atomic64_inc_return(v) == 0; + return arch_atomic64_inc_return(v) == 0; } /** - * atomic64_add_negative - add and test if negative + * arch_atomic64_add_negative - add and test if negative * @i: integer value to add * @v: pointer to type atomic64_t * @@ -269,13 +270,13 @@ static inline int atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline int atomic64_add_negative(long long i, atomic64_t *v) +static inline int arch_atomic64_add_negative(long long i, atomic64_t *v) { - return atomic64_add_return(i, v) < 0; + return arch_atomic64_add_return(i, v) < 0; } /** - * atomic64_add_unless - add unless the number is a given value + * arch_atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -283,7 +284,8 @@ static inline int atomic64_add_negative(long long i, atomic64_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if the add was done, zero otherwise. */ -static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) +static inline int arch_atomic64_add_unless(atomic64_t *v, long long a, + long long u) { unsigned low = (unsigned)u; unsigned high = (unsigned)(u >> 32); @@ -294,7 +296,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) } -static inline int atomic64_inc_not_zero(atomic64_t *v) +static inline int arch_atomic64_inc_not_zero(atomic64_t *v) { int r; alternative_atomic64(inc_not_zero, "=&a" (r), @@ -302,7 +304,7 @@ static inline int atomic64_inc_not_zero(atomic64_t *v) return r; } -static inline long long atomic64_dec_if_positive(atomic64_t *v) +static inline long long arch_atomic64_dec_if_positive(atomic64_t *v) { long long r; alternative_atomic64(dec_if_positive, "=&A" (r), @@ -313,70 +315,70 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v) #undef alternative_atomic64 #undef __alternative_atomic64 -static inline void atomic64_and(long long i, atomic64_t *v) +static inline void arch_atomic64_and(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c & i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) c = old; } -static inline long long atomic64_fetch_and(long long i, atomic64_t *v) +static inline long long arch_atomic64_fetch_and(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c & i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c & i)) != c) c = old; return old; } -static inline void atomic64_or(long long i, atomic64_t *v) +static inline void arch_atomic64_or(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c | i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) c = old; } -static inline long long atomic64_fetch_or(long long i, atomic64_t *v) +static inline long long arch_atomic64_fetch_or(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c | i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c | i)) != c) c = old; return old; } -static inline void atomic64_xor(long long i, atomic64_t *v) +static inline void arch_atomic64_xor(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) c = old; } -static inline long long atomic64_fetch_xor(long long i, atomic64_t *v) +static inline long long arch_atomic64_fetch_xor(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c ^ i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c ^ i)) != c) c = old; return old; } -static inline long long atomic64_fetch_add(long long i, atomic64_t *v) +static inline long long arch_atomic64_fetch_add(long long i, atomic64_t *v) { long long old, c = 0; - while ((old = atomic64_cmpxchg(v, c, c + i)) != c) + while ((old = arch_atomic64_cmpxchg(v, c, c + i)) != c) c = old; return old; } -#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v)) +#define arch_atomic64_fetch_sub(i, v) arch_atomic64_fetch_add(-(i), (v)) #endif /* _ASM_X86_ATOMIC64_32_H */ diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index 738495caf05f..6106b59d3260 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -11,37 +11,37 @@ #define ATOMIC64_INIT(i) { (i) } /** - * atomic64_read - read atomic64 variable + * arch_atomic64_read - read atomic64 variable * @v: pointer of type atomic64_t * * Atomically reads the value of @v. * Doesn't imply a read memory barrier. */ -static inline long atomic64_read(const atomic64_t *v) +static inline long arch_atomic64_read(const atomic64_t *v) { return READ_ONCE((v)->counter); } /** - * atomic64_set - set atomic64 variable + * arch_atomic64_set - set atomic64 variable * @v: pointer to type atomic64_t * @i: required value * * Atomically sets the value of @v to @i. */ -static inline void atomic64_set(atomic64_t *v, long i) +static inline void arch_atomic64_set(atomic64_t *v, long i) { WRITE_ONCE(v->counter, i); } /** - * atomic64_add - add integer to atomic64 variable + * arch_atomic64_add - add integer to atomic64 variable * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v. */ -static __always_inline void atomic64_add(long i, atomic64_t *v) +static __always_inline void arch_atomic64_add(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "addq %1,%0" : "=m" (v->counter) @@ -49,13 +49,13 @@ static __always_inline void atomic64_add(long i, atomic64_t *v) } /** - * atomic64_sub - subtract the atomic64 variable + * arch_atomic64_sub - subtract the atomic64 variable * @i: integer value to subtract * @v: pointer to type atomic64_t * * Atomically subtracts @i from @v. */ -static inline void atomic64_sub(long i, atomic64_t *v) +static inline void arch_atomic64_sub(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "subq %1,%0" : "=m" (v->counter) @@ -63,7 +63,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) } /** - * atomic64_sub_and_test - subtract value from variable and test result + * arch_atomic64_sub_and_test - subtract value from variable and test result * @i: integer value to subtract * @v: pointer to type atomic64_t * @@ -71,18 +71,18 @@ static inline void atomic64_sub(long i, atomic64_t *v) * true if the result is zero, or false for all * other cases. */ -static inline bool atomic64_sub_and_test(long i, atomic64_t *v) +static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e); } /** - * atomic64_inc - increment atomic64 variable + * arch_atomic64_inc - increment atomic64 variable * @v: pointer to type atomic64_t * * Atomically increments @v by 1. */ -static __always_inline void atomic64_inc(atomic64_t *v) +static __always_inline void arch_atomic64_inc(atomic64_t *v) { asm volatile(LOCK_PREFIX "incq %0" : "=m" (v->counter) @@ -90,12 +90,12 @@ static __always_inline void atomic64_inc(atomic64_t *v) } /** - * atomic64_dec - decrement atomic64 variable + * arch_atomic64_dec - decrement atomic64 variable * @v: pointer to type atomic64_t * * Atomically decrements @v by 1. */ -static __always_inline void atomic64_dec(atomic64_t *v) +static __always_inline void arch_atomic64_dec(atomic64_t *v) { asm volatile(LOCK_PREFIX "decq %0" : "=m" (v->counter) @@ -103,33 +103,33 @@ static __always_inline void atomic64_dec(atomic64_t *v) } /** - * atomic64_dec_and_test - decrement and test + * arch_atomic64_dec_and_test - decrement and test * @v: pointer to type atomic64_t * * Atomically decrements @v by 1 and * returns true if the result is 0, or false for all other * cases. */ -static inline bool atomic64_dec_and_test(atomic64_t *v) +static inline bool arch_atomic64_dec_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e); } /** - * atomic64_inc_and_test - increment and test + * arch_atomic64_inc_and_test - increment and test * @v: pointer to type atomic64_t * * Atomically increments @v by 1 * and returns true if the result is zero, or false for all * other cases. */ -static inline bool atomic64_inc_and_test(atomic64_t *v) +static inline bool arch_atomic64_inc_and_test(atomic64_t *v) { GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e); } /** - * atomic64_add_negative - add and test if negative + * arch_atomic64_add_negative - add and test if negative * @i: integer value to add * @v: pointer to type atomic64_t * @@ -137,59 +137,59 @@ static inline bool atomic64_inc_and_test(atomic64_t *v) * if the result is negative, or false when * result is greater than or equal to zero. */ -static inline bool atomic64_add_negative(long i, atomic64_t *v) +static inline bool arch_atomic64_add_negative(long i, atomic64_t *v) { GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s); } /** - * atomic64_add_return - add and return + * arch_atomic64_add_return - add and return * @i: integer value to add * @v: pointer to type atomic64_t * * Atomically adds @i to @v and returns @i + @v */ -static __always_inline long atomic64_add_return(long i, atomic64_t *v) +static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v) { return i + xadd(&v->counter, i); } -static inline long atomic64_sub_return(long i, atomic64_t *v) +static inline long arch_atomic64_sub_return(long i, atomic64_t *v) { - return atomic64_add_return(-i, v); + return arch_atomic64_add_return(-i, v); } -static inline long atomic64_fetch_add(long i, atomic64_t *v) +static inline long arch_atomic64_fetch_add(long i, atomic64_t *v) { return xadd(&v->counter, i); } -static inline long atomic64_fetch_sub(long i, atomic64_t *v) +static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v) { return xadd(&v->counter, -i); } -#define atomic64_inc_return(v) (atomic64_add_return(1, (v))) -#define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) +#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v))) +#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v))) -static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new) { - return cmpxchg(&v->counter, old, new); + return arch_cmpxchg(&v->counter, old, new); } -#define atomic64_try_cmpxchg atomic64_try_cmpxchg -static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) +#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg +static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new) { return try_cmpxchg(&v->counter, old, new); } -static inline long atomic64_xchg(atomic64_t *v, long new) +static inline long arch_atomic64_xchg(atomic64_t *v, long new) { return xchg(&v->counter, new); } /** - * atomic64_add_unless - add unless the number is a given value + * arch_atomic64_add_unless - add unless the number is a given value * @v: pointer of type atomic64_t * @a: the amount to add to v... * @u: ...unless v is equal to u. @@ -197,37 +197,37 @@ static inline long atomic64_xchg(atomic64_t *v, long new) * Atomically adds @a to @v, so long as it was not @u. * Returns the old value of @v. */ -static inline bool atomic64_add_unless(atomic64_t *v, long a, long u) +static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u) { - s64 c = atomic64_read(v); + s64 c = arch_atomic64_read(v); do { if (unlikely(c == u)) return false; - } while (!atomic64_try_cmpxchg(v, &c, c + a)); + } while (!arch_atomic64_try_cmpxchg(v, &c, c + a)); return true; } -#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0) /* - * atomic64_dec_if_positive - decrement by 1 if old value positive + * arch_atomic64_dec_if_positive - decrement by 1 if old value positive * @v: pointer of type atomic_t * * The function returns the old value of *v minus 1, even if * the atomic variable, v, was not decremented. */ -static inline long atomic64_dec_if_positive(atomic64_t *v) +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) { - s64 dec, c = atomic64_read(v); + s64 dec, c = arch_atomic64_read(v); do { dec = c - 1; if (unlikely(dec < 0)) break; - } while (!atomic64_try_cmpxchg(v, &c, dec)); + } while (!arch_atomic64_try_cmpxchg(v, &c, dec)); return dec; } -static inline void atomic64_and(long i, atomic64_t *v) +static inline void arch_atomic64_and(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "andq %1,%0" : "+m" (v->counter) @@ -235,16 +235,16 @@ static inline void atomic64_and(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_and(long i, atomic64_t *v) +static inline long arch_atomic64_fetch_and(long i, atomic64_t *v) { - s64 val = atomic64_read(v); + s64 val = arch_atomic64_read(v); do { - } while (!atomic64_try_cmpxchg(v, &val, val & i)); + } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); return val; } -static inline void atomic64_or(long i, atomic64_t *v) +static inline void arch_atomic64_or(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "orq %1,%0" : "+m" (v->counter) @@ -252,16 +252,16 @@ static inline void atomic64_or(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_or(long i, atomic64_t *v) +static inline long arch_atomic64_fetch_or(long i, atomic64_t *v) { - s64 val = atomic64_read(v); + s64 val = arch_atomic64_read(v); do { - } while (!atomic64_try_cmpxchg(v, &val, val | i)); + } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); return val; } -static inline void atomic64_xor(long i, atomic64_t *v) +static inline void arch_atomic64_xor(long i, atomic64_t *v) { asm volatile(LOCK_PREFIX "xorq %1,%0" : "+m" (v->counter) @@ -269,12 +269,12 @@ static inline void atomic64_xor(long i, atomic64_t *v) : "memory"); } -static inline long atomic64_fetch_xor(long i, atomic64_t *v) +static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v) { - s64 val = atomic64_read(v); + s64 val = arch_atomic64_read(v); do { - } while (!atomic64_try_cmpxchg(v, &val, val ^ i)); + } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); return val; } diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 56bd436ed01b..e3efd8a06066 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -145,13 +145,13 @@ extern void __add_wrong_size(void) # include #endif -#define cmpxchg(ptr, old, new) \ +#define arch_cmpxchg(ptr, old, new) \ __cmpxchg(ptr, old, new, sizeof(*(ptr))) -#define sync_cmpxchg(ptr, old, new) \ +#define arch_sync_cmpxchg(ptr, old, new) \ __sync_cmpxchg(ptr, old, new, sizeof(*(ptr))) -#define cmpxchg_local(ptr, old, new) \ +#define arch_cmpxchg_local(ptr, old, new) \ __cmpxchg_local(ptr, old, new, sizeof(*(ptr))) @@ -221,7 +221,7 @@ extern void __add_wrong_size(void) #define __try_cmpxchg(ptr, pold, new, size) \ __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) -#define try_cmpxchg(ptr, pold, new) \ +#define try_cmpxchg(ptr, pold, new) \ __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr))) /* @@ -250,10 +250,10 @@ extern void __add_wrong_size(void) __ret; \ }) -#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \ +#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \ __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2) -#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ +#define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \ __cmpxchg_double(, p1, p2, o1, o2, n1, n2) #endif /* ASM_X86_CMPXCHG_H */ diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h index 1732704f0445..1a2eafca7038 100644 --- a/arch/x86/include/asm/cmpxchg_32.h +++ b/arch/x86/include/asm/cmpxchg_32.h @@ -36,10 +36,10 @@ static inline void set_64bit(volatile u64 *ptr, u64 value) } #ifdef CONFIG_X86_CMPXCHG64 -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ (unsigned long long)(n))) -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \ (unsigned long long)(n))) #endif @@ -76,7 +76,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) * to simulate the cmpxchg8b on the 80386 and 80486 CPU. */ -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __old = (o); \ @@ -93,7 +93,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new) __ret; }) -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __old = (o); \ diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h index 03cad196a301..bfca3b346c74 100644 --- a/arch/x86/include/asm/cmpxchg_64.h +++ b/arch/x86/include/asm/cmpxchg_64.h @@ -7,13 +7,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val) *ptr = val; } -#define cmpxchg64(ptr, o, n) \ +#define arch_cmpxchg64(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg((ptr), (o), (n)); \ }) -#define cmpxchg64_local(ptr, o, n) \ +#define arch_cmpxchg64_local(ptr, o, n) \ ({ \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ cmpxchg_local((ptr), (o), (n)); \ -- cgit v1.2.1 From ac605bee0bfab40fd5d11964705e907d2d5a32de Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Mon, 29 Jan 2018 18:26:07 +0100 Subject: locking/atomic, asm-generic, x86: Add comments for atomic instrumentation The comments are factored out from the code changes to make them easier to read. Add them separately to explain some non-obvious aspects. Signed-off-by: Dmitry Vyukov Cc: Andrew Morton Cc: Andrey Ryabinin Cc: Linus Torvalds Cc: Mark Rutland Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: kasan-dev@googlegroups.com Cc: linux-mm@kvack.org Link: http://lkml.kernel.org/r/cc595efc644bb905407012d82d3eb8bac3368e7a.1517246437.git.dvyukov@google.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/atomic.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index 33afc966d6a9..0db6bec95489 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -24,6 +24,10 @@ */ static __always_inline int arch_atomic_read(const atomic_t *v) { + /* + * Note for KASAN: we deliberately don't use READ_ONCE_NOCHECK() here, + * it's non-inlined function that increases binary size and stack usage. + */ return READ_ONCE((v)->counter); } -- cgit v1.2.1 From 50beba07a0e42ebd4454adc97515a2a2a969645b Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 20 Feb 2018 20:05:04 +0200 Subject: ACPI, x86/boot: Split out acpi_generic_reduce_hw_init() and export This is a preparation patch to allow override the hardware reduced initialization on ACPI enabled platforms. No functional change intended. Signed-off-by: Andy Shevchenko Reviewed-by: Rafael J. Wysocki Cc: Eric Biederman Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linux-acpi@vger.kernel.org Link: http://lkml.kernel.org/r/20180220180506.65523-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/acpi.h | 4 ++++ arch/x86/kernel/acpi/boot.c | 22 +++++++++++++--------- 2 files changed, 17 insertions(+), 9 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 6609dd7289b5..a303d7b7d763 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -140,6 +140,8 @@ static inline u64 acpi_arch_get_root_pointer(void) return x86_init.acpi.get_root_pointer(); } +void acpi_generic_reduced_hw_init(void); + #else /* !CONFIG_ACPI */ #define acpi_lapic 0 @@ -149,6 +151,8 @@ static inline void acpi_noirq_set(void) { } static inline void acpi_disable_pci(void) { } static inline void disable_acpi(void) { } +static inline void acpi_generic_reduced_hw_init(void) { } + #endif /* !CONFIG_ACPI */ #define ARCH_HAS_POWER_INIT 1 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 2aa92094b59d..baa084ecffdb 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1376,17 +1376,21 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d) * * We initialize the Hardware-reduced ACPI model here: */ +void __init acpi_generic_reduced_hw_init(void) +{ + /* + * Override x86_init functions and bypass legacy PIC in + * hardware reduced ACPI mode. + */ + x86_init.timers.timer_init = x86_init_noop; + x86_init.irqs.pre_vector_init = x86_init_noop; + legacy_pic = &null_legacy_pic; +} + static void __init acpi_reduced_hw_init(void) { - if (acpi_gbl_reduced_hardware) { - /* - * Override x86_init functions and bypass legacy pic - * in Hardware-reduced ACPI mode - */ - x86_init.timers.timer_init = x86_init_noop; - x86_init.irqs.pre_vector_init = x86_init_noop; - legacy_pic = &null_legacy_pic; - } + if (acpi_gbl_reduced_hardware) + acpi_generic_reduced_hw_init(); } /* -- cgit v1.2.1 From 81b53e5ff21e09b42525cfa08f2b0af2b8c5f465 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 20 Feb 2018 20:05:05 +0200 Subject: ACPI, x86/boot: Introduce the ->reduced_hw_early_init() ACPI callback Some ACPI hardware reduced platforms need to initialize certain devices defined by the ACPI hardware specification even though in principle those devices should not be present in an ACPI hardware reduced platform. To allow that to happen, make it possible to override the generic x86_init callbacks and provide a custom legacy_pic value, add a new ->reduced_hw_early_init() callback to struct x86_init_acpi and make acpi_reduced_hw_init() use it. Signed-off-by: Andy Shevchenko Reviewed-by: Rafael J. Wysocki Cc: Eric Biederman Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linux-acpi@vger.kernel.org Link: http://lkml.kernel.org/r/20180220180506.65523-2-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/x86_init.h | 2 ++ arch/x86/kernel/acpi/boot.c | 2 +- arch/x86/kernel/x86_init.c | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index 2e2c34d2bb00..5bd45a8f5ae3 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -133,9 +133,11 @@ struct x86_hyper_init { /** * struct x86_init_acpi - x86 ACPI init functions * @get_root_pointer: get RSDP address + * @reduced_hw_early_init: hardware reduced platform early init */ struct x86_init_acpi { u64 (*get_root_pointer)(void); + void (*reduced_hw_early_init)(void); }; /** diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index baa084ecffdb..7a37d9357bc4 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -1390,7 +1390,7 @@ void __init acpi_generic_reduced_hw_init(void) static void __init acpi_reduced_hw_init(void) { if (acpi_gbl_reduced_hardware) - acpi_generic_reduced_hw_init(); + x86_init.acpi.reduced_hw_early_init(); } /* diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index b8cff22a8785..ac67ccffeef0 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -95,6 +96,7 @@ struct x86_init_ops x86_init __initdata = { .acpi = { .get_root_pointer = u64_x86_init_noop, + .reduced_hw_early_init = acpi_generic_reduced_hw_init, }, }; -- cgit v1.2.1 From 02428742639bc3300c8c527b054d0ec0bdf5571d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 20 Feb 2018 20:05:06 +0200 Subject: x86/platform/intel-mid: Add special handling for ACPI HW reduced platforms When switching to ACPI HW reduced platforms we still want to initialize timers. Override x86_init.acpi.reduced_hw_init to achieve that. Signed-off-by: Andy Shevchenko Reviewed-by: Rafael J. Wysocki Cc: Eric Biederman Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: Thomas Gleixner Cc: linux-acpi@vger.kernel.org Link: http://lkml.kernel.org/r/20180220180506.65523-3-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/platform/intel-mid/intel-mid.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c index 2c67bae6bb53..c556f1e8936e 100644 --- a/arch/x86/platform/intel-mid/intel-mid.c +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -199,6 +199,12 @@ void __init x86_intel_mid_early_setup(void) legacy_pic = &null_legacy_pic; + /* + * Do nothing for now as everything needed done in + * x86_intel_mid_early_setup() below. + */ + x86_init.acpi.reduced_hw_early_init = x86_init_noop; + pm_power_off = intel_mid_power_off; machine_ops.emergency_restart = intel_mid_reboot; -- cgit v1.2.1 From 8343aae66167df6708128a778e750d48dbe31302 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Nov 2017 14:28:33 +0100 Subject: perf/core: Remove perf_event::group_entry Now that all the grouping is done with RB trees, we no longer need group_entry and can replace the whole thing with sibling_list. Signed-off-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Cc: Alexander Shishkin Cc: Alexey Budankov Cc: Arnaldo Carvalho de Melo Cc: David Carrillo-Cisneros Cc: Dmitri Prokhorov Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Valery Cherepennikov Cc: Vince Weaver Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 2 +- arch/x86/events/intel/uncore.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 9c86e10f1196..77a4125b6b1f 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, group_entry) { + list_for_each_entry(event, &leader->sibling_list, sibling_list) { if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 7874c980d569..9e374cd22ad2 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, group_entry) { + list_for_each_entry(event, &leader->sibling_list, sibling_list) { if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; -- cgit v1.2.1 From 18a955219bf7d9008ce480d4451b6b8bf4483a22 Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Tue, 13 Mar 2018 11:03:46 -0600 Subject: x86/mm: Fix vmalloc_fault to use pXd_large Gratian Crisan reported that vmalloc_fault() crashes when CONFIG_HUGETLBFS is not set since the function inadvertently uses pXn_huge(), which always return 0 in this case. ioremap() does not depend on CONFIG_HUGETLBFS. Fix vmalloc_fault() to call pXd_large() instead. Fixes: f4eafd8bcd52 ("x86/mm: Fix vmalloc_fault() to handle large pages properly") Reported-by: Gratian Crisan Signed-off-by: Toshi Kani Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: linux-mm@kvack.org Cc: Borislav Petkov Cc: Andy Lutomirski Link: https://lkml.kernel.org/r/20180313170347.3829-2-toshi.kani@hpe.com --- arch/x86/mm/fault.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index c88573d90f3e..25a30b5d6582 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -330,7 +330,7 @@ static noinline int vmalloc_fault(unsigned long address) if (!pmd_k) return -1; - if (pmd_huge(*pmd_k)) + if (pmd_large(*pmd_k)) return 0; pte_k = pte_offset_kernel(pmd_k, address); @@ -475,7 +475,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) BUG(); - if (pud_huge(*pud)) + if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); @@ -486,7 +486,7 @@ static noinline int vmalloc_fault(unsigned long address) if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) BUG(); - if (pmd_huge(*pmd)) + if (pmd_large(*pmd)) return 0; pte_ref = pte_offset_kernel(pmd_ref, address); -- cgit v1.2.1 From a7e6c7015bf3e0cb467a2f6c0e1de985ee1a0ecb Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 13 Mar 2018 21:36:22 -0700 Subject: x86, memremap: fix altmap accounting at free Commit 24b6d4164348 "mm: pass the vmem_altmap to vmemmap_free" converted the vmemmap_free() path to pass the altmap argument all the way through the call chain rather than looking it up based on the page. Unfortunately that ends up over freeing altmap allocated pages in some cases since free_pagetable() is used to free both memmap space and pte space, where only the memmap stored in huge pages uses altmap allocations. Given that altmap allocations for memmap space are special cased in vmemmap_populate_hugepages() add a symmetric / special case free_hugepage_table() to handle altmap freeing, and cleanup the unneeded passing of altmap to leaf functions that do not require it. Without this change the sanity check accounting in devm_memremap_pages_release() will throw a warning with the following signature. nd_pmem pfn10.1: devm_memremap_pages_release: failed to free all reserved pages WARNING: CPU: 44 PID: 3539 at kernel/memremap.c:310 devm_memremap_pages_release+0x1c7/0x220 CPU: 44 PID: 3539 Comm: ndctl Tainted: G L 4.16.0-rc1-linux-stable #7 RIP: 0010:devm_memremap_pages_release+0x1c7/0x220 [..] Call Trace: release_nodes+0x225/0x270 device_release_driver_internal+0x15d/0x210 bus_remove_device+0xe2/0x160 device_del+0x130/0x310 ? klist_release+0x56/0x100 ? nd_region_notify+0xc0/0xc0 [libnvdimm] device_unregister+0x16/0x60 This was missed in testing since not all configurations will trigger this warning. Fixes: 24b6d4164348 ("mm: pass the vmem_altmap to vmemmap_free") Reported-by: Jane Chu Cc: Ross Zwisler Reviewed-by: Christoph Hellwig Signed-off-by: Dan Williams --- arch/x86/mm/init_64.c | 60 ++++++++++++++++++++++++--------------------------- 1 file changed, 28 insertions(+), 32 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 8b72923f1d35..af11a2890235 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -800,17 +800,11 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, #define PAGE_INUSE 0xFD -static void __meminit free_pagetable(struct page *page, int order, - struct vmem_altmap *altmap) +static void __meminit free_pagetable(struct page *page, int order) { unsigned long magic; unsigned int nr_pages = 1 << order; - if (altmap) { - vmem_altmap_free(altmap, nr_pages); - return; - } - /* bootmem page has reserved flag */ if (PageReserved(page)) { __ClearPageReserved(page); @@ -826,8 +820,16 @@ static void __meminit free_pagetable(struct page *page, int order, free_pages((unsigned long)page_address(page), order); } -static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, +static void __meminit free_hugepage_table(struct page *page, struct vmem_altmap *altmap) +{ + if (altmap) + vmem_altmap_free(altmap, PMD_SIZE / PAGE_SIZE); + else + free_pagetable(page, get_order(PMD_SIZE)); +} + +static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd) { pte_t *pte; int i; @@ -839,14 +841,13 @@ static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd, } /* free a pte talbe */ - free_pagetable(pmd_page(*pmd), 0, altmap); + free_pagetable(pmd_page(*pmd), 0); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); spin_unlock(&init_mm.page_table_lock); } -static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, - struct vmem_altmap *altmap) +static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud) { pmd_t *pmd; int i; @@ -858,14 +859,13 @@ static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud, } /* free a pmd talbe */ - free_pagetable(pud_page(*pud), 0, altmap); + free_pagetable(pud_page(*pud), 0); spin_lock(&init_mm.page_table_lock); pud_clear(pud); spin_unlock(&init_mm.page_table_lock); } -static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, - struct vmem_altmap *altmap) +static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d) { pud_t *pud; int i; @@ -877,7 +877,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, } /* free a pud talbe */ - free_pagetable(p4d_page(*p4d), 0, altmap); + free_pagetable(p4d_page(*p4d), 0); spin_lock(&init_mm.page_table_lock); p4d_clear(p4d); spin_unlock(&init_mm.page_table_lock); @@ -885,7 +885,7 @@ static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d, static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, - struct vmem_altmap *altmap, bool direct) + bool direct) { unsigned long next, pages = 0; pte_t *pte; @@ -916,7 +916,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, * freed when offlining, or simplely not in use. */ if (!direct) - free_pagetable(pte_page(*pte), 0, altmap); + free_pagetable(pte_page(*pte), 0); spin_lock(&init_mm.page_table_lock); pte_clear(&init_mm, addr, pte); @@ -939,7 +939,7 @@ remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end, page_addr = page_address(pte_page(*pte)); if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) { - free_pagetable(pte_page(*pte), 0, altmap); + free_pagetable(pte_page(*pte), 0); spin_lock(&init_mm.page_table_lock); pte_clear(&init_mm, addr, pte); @@ -974,9 +974,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) - free_pagetable(pmd_page(*pmd), - get_order(PMD_SIZE), - altmap); + free_hugepage_table(pmd_page(*pmd), + altmap); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); @@ -989,9 +988,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, page_addr = page_address(pmd_page(*pmd)); if (!memchr_inv(page_addr, PAGE_INUSE, PMD_SIZE)) { - free_pagetable(pmd_page(*pmd), - get_order(PMD_SIZE), - altmap); + free_hugepage_table(pmd_page(*pmd), + altmap); spin_lock(&init_mm.page_table_lock); pmd_clear(pmd); @@ -1003,8 +1001,8 @@ remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end, } pte_base = (pte_t *)pmd_page_vaddr(*pmd); - remove_pte_table(pte_base, addr, next, altmap, direct); - free_pte_table(pte_base, pmd, altmap); + remove_pte_table(pte_base, addr, next, direct); + free_pte_table(pte_base, pmd); } /* Call free_pmd_table() in remove_pud_table(). */ @@ -1033,8 +1031,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, IS_ALIGNED(next, PUD_SIZE)) { if (!direct) free_pagetable(pud_page(*pud), - get_order(PUD_SIZE), - altmap); + get_order(PUD_SIZE)); spin_lock(&init_mm.page_table_lock); pud_clear(pud); @@ -1048,8 +1045,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, if (!memchr_inv(page_addr, PAGE_INUSE, PUD_SIZE)) { free_pagetable(pud_page(*pud), - get_order(PUD_SIZE), - altmap); + get_order(PUD_SIZE)); spin_lock(&init_mm.page_table_lock); pud_clear(pud); @@ -1062,7 +1058,7 @@ remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end, pmd_base = pmd_offset(pud, 0); remove_pmd_table(pmd_base, addr, next, direct, altmap); - free_pmd_table(pmd_base, pud, altmap); + free_pmd_table(pmd_base, pud); } if (direct) @@ -1094,7 +1090,7 @@ remove_p4d_table(p4d_t *p4d_start, unsigned long addr, unsigned long end, * to adapt for boot-time switching between 4 and 5 level page tables. */ if (CONFIG_PGTABLE_LEVELS == 5) - free_pud_table(pud_base, p4d, altmap); + free_pud_table(pud_base, p4d); } if (direct) -- cgit v1.2.1 From 13cc36d76bc4f5a9801ae32630bc8240ba0cc522 Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Fri, 9 Mar 2018 10:42:50 -0800 Subject: x86/rtc: Stop using deprecated functions rtc_time_to_tm() and rtc_tm_to_time() are deprecated because they rely on 32bits variables and that will make rtc break in y2038/2016. Use the proper y2038 safe functions. Signed-off-by: Benjamin Gaignard Signed-off-by: John Stultz Signed-off-by: Thomas Gleixner Cc: Prarit Bhargava Cc: Arnd Bergmann Cc: Richard Cochran Cc: Stephen Boyd Cc: Miroslav Lichvar Cc: Alexandre Belloni Link: https://lkml.kernel.org/r/1520620971-9567-5-git-send-email-john.stultz@linaro.org --- arch/x86/kernel/rtc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 69ac9cb9cac6..f7b82ed7b5b5 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c @@ -41,11 +41,11 @@ EXPORT_SYMBOL(rtc_lock); */ int mach_set_rtc_mmss(const struct timespec *now) { - unsigned long nowtime = now->tv_sec; + unsigned long long nowtime = now->tv_sec; struct rtc_time tm; int retval = 0; - rtc_time_to_tm(nowtime, &tm); + rtc_time64_to_tm(nowtime, &tm); if (!rtc_valid_tm(&tm)) { retval = mc146818_set_time(&tm); if (retval) @@ -53,7 +53,7 @@ int mach_set_rtc_mmss(const struct timespec *now) __func__, retval); } else { printk(KERN_ERR - "%s: Invalid RTC value: write of %lx to RTC failed\n", + "%s: Invalid RTC value: write of %llx to RTC failed\n", __func__, nowtime); retval = -EINVAL; } -- cgit v1.2.1 From 565977a3d929fc4427769117a8ac976ec16776d5 Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Wed, 14 Mar 2018 14:59:32 -0600 Subject: x86/mm: Remove pointless checks in vmalloc_fault vmalloc_fault() sets user's pgd or p4d from the kernel page table. Once it's set, all tables underneath are identical. There is no point of following the same page table with two separate pointers and make sure they see the same with BUG(). Remove the pointless checks in vmalloc_fault(). Also rename the kernel pgd/p4d pointers to pgd_k/p4d_k so that their names are consistent in the file. Suggested-by: Andy Lutomirski Signed-off-by: Toshi Kani Signed-off-by: Thomas Gleixner Cc: linux-mm@kvack.org Cc: Borislav Petkov Cc: Gratian Crisan Link: https://lkml.kernel.org/r/20180314205932.7193-1-toshi.kani@hpe.com --- arch/x86/mm/fault.c | 56 ++++++++++++++++------------------------------------- 1 file changed, 17 insertions(+), 39 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 84d702a71afe..70c3b1c43676 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -417,11 +417,11 @@ void vmalloc_sync_all(void) */ static noinline int vmalloc_fault(unsigned long address) { - pgd_t *pgd, *pgd_ref; - p4d_t *p4d, *p4d_ref; - pud_t *pud, *pud_ref; - pmd_t *pmd, *pmd_ref; - pte_t *pte, *pte_ref; + pgd_t *pgd, *pgd_k; + p4d_t *p4d, *p4d_k; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; /* Make sure we are in vmalloc area: */ if (!(address >= VMALLOC_START && address < VMALLOC_END)) @@ -435,73 +435,51 @@ static noinline int vmalloc_fault(unsigned long address) * case just flush: */ pgd = (pgd_t *)__va(read_cr3_pa()) + pgd_index(address); - pgd_ref = pgd_offset_k(address); - if (pgd_none(*pgd_ref)) + pgd_k = pgd_offset_k(address); + if (pgd_none(*pgd_k)) return -1; if (pgtable_l5_enabled) { if (pgd_none(*pgd)) { - set_pgd(pgd, *pgd_ref); + set_pgd(pgd, *pgd_k); arch_flush_lazy_mmu_mode(); } else { - BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_k)); } } /* With 4-level paging, copying happens on the p4d level. */ p4d = p4d_offset(pgd, address); - p4d_ref = p4d_offset(pgd_ref, address); - if (p4d_none(*p4d_ref)) + p4d_k = p4d_offset(pgd_k, address); + if (p4d_none(*p4d_k)) return -1; if (p4d_none(*p4d) && !pgtable_l5_enabled) { - set_p4d(p4d, *p4d_ref); + set_p4d(p4d, *p4d_k); arch_flush_lazy_mmu_mode(); } else { - BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_ref)); + BUG_ON(p4d_pfn(*p4d) != p4d_pfn(*p4d_k)); } - /* - * Below here mismatches are bugs because these lower tables - * are shared: - */ BUILD_BUG_ON(CONFIG_PGTABLE_LEVELS < 4); pud = pud_offset(p4d, address); - pud_ref = pud_offset(p4d_ref, address); - if (pud_none(*pud_ref)) + if (pud_none(*pud)) return -1; - if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref)) - BUG(); - if (pud_large(*pud)) return 0; pmd = pmd_offset(pud, address); - pmd_ref = pmd_offset(pud_ref, address); - if (pmd_none(*pmd_ref)) + if (pmd_none(*pmd)) return -1; - if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref)) - BUG(); - if (pmd_large(*pmd)) return 0; - pte_ref = pte_offset_kernel(pmd_ref, address); - if (!pte_present(*pte_ref)) - return -1; - pte = pte_offset_kernel(pmd, address); - - /* - * Don't use pte_page here, because the mappings can point - * outside mem_map, and the NUMA hash lookup cannot handle - * that: - */ - if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref)) - BUG(); + if (!pte_present(*pte)) + return -1; return 0; } -- cgit v1.2.1 From daaf216c06fba4ee4dc3f62715667da929d68774 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Thu, 8 Mar 2018 17:17:31 -0600 Subject: KVM: x86: Fix device passthrough when SME is active When using device passthrough with SME active, the MMIO range that is mapped for the device should not be mapped encrypted. Add a check in set_spte() to insure that a page is not mapped encrypted if that page is a device MMIO page as indicated by kvm_is_mmio_pfn(). Cc: # 4.14.x- Signed-off-by: Tom Lendacky Signed-off-by: Paolo Bonzini --- arch/x86/kvm/mmu.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index f551962ac294..763bb3bade63 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -2770,8 +2770,10 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep, else pte_access &= ~ACC_WRITE_MASK; + if (!kvm_is_mmio_pfn(pfn)) + spte |= shadow_me_mask; + spte |= (u64)pfn << PAGE_SHIFT; - spte |= shadow_me_mask; if (pte_access & ACC_WRITE_MASK) { -- cgit v1.2.1 From fc804f65d46236c211f530174904c1ed70db5888 Mon Sep 17 00:00:00 2001 From: Rajvi Jingar Date: Thu, 8 Mar 2018 09:28:36 -0800 Subject: x86/tsc: Convert ART in nanoseconds to TSC Device drivers use get_device_system_crosststamp() to produce precise system/device cross-timestamps. The PHC clock and ALSA interfaces, for example, make the cross-timestamps available to user applications. On Intel platforms, get_device_system_crosststamp() requires a TSC value derived from ART (Always Running Timer) to compute the monotonic raw and realtime system timestamps. Starting with Intel Goldmont platforms, the PCIe root complex supports the PTM time sync protocol. PTM requires all timestamps to be in units of nanoseconds. The Intel root complex hardware propagates system time derived from ART in units of nanoseconds performing the conversion as follows: ART_NS = ART * 1e9 / When user software requests a cross-timestamp, the system timestamps (generally read from device registers) must be converted to TSC by the driver software as follows: TSC = ART_NS * TSC_KHZ / 1e6 This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set indicating that tsc_khz is derived from CPUID[15H]. Drivers should check whether this flag is set before conversion to TSC is attempted. Suggested-by: Christopher S. Hall Signed-off-by: Rajvi Jingar Signed-off-by: Thomas Gleixner Cc: peterz@infradead.org Link: https://lkml.kernel.org/r/1520530116-4925-1-git-send-email-rajvi.jingar@intel.com --- arch/x86/include/asm/tsc.h | 1 + arch/x86/kernel/tsc.c | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index cf5d53c3f9ea..2701d221583a 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -31,6 +31,7 @@ static inline cycles_t get_cycles(void) } extern struct system_counterval_t convert_art_to_tsc(u64 art); +extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns); extern void tsc_early_delay_calibrate(void); extern void tsc_init(void); diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index fb4302738410..ef32297ff17e 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1179,6 +1179,45 @@ struct system_counterval_t convert_art_to_tsc(u64 art) } EXPORT_SYMBOL(convert_art_to_tsc); +/** + * convert_art_ns_to_tsc() - Convert ART in nanoseconds to TSC. + * @art_ns: ART (Always Running Timer) in unit of nanoseconds + * + * PTM requires all timestamps to be in units of nanoseconds. When user + * software requests a cross-timestamp, this function converts system timestamp + * to TSC. + * + * This is valid when CPU feature flag X86_FEATURE_TSC_KNOWN_FREQ is set + * indicating the tsc_khz is derived from CPUID[15H]. Drivers should check + * that this flag is set before conversion to TSC is attempted. + * + * Return: + * struct system_counterval_t - system counter value with the pointer to the + * corresponding clocksource + * @cycles: System counter value + * @cs: Clocksource corresponding to system counter value. Used + * by timekeeping code to verify comparibility of two cycle + * values. + */ + +struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns) +{ + u64 tmp, res, rem; + + rem = do_div(art_ns, USEC_PER_SEC); + + res = art_ns * tsc_khz; + tmp = rem * tsc_khz; + + do_div(tmp, USEC_PER_SEC); + res += tmp; + + return (struct system_counterval_t) { .cs = art_related_clocksource, + .cycles = res}; +} +EXPORT_SYMBOL(convert_art_ns_to_tsc); + + static void tsc_refine_calibration_work(struct work_struct *work); static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); /** -- cgit v1.2.1 From edb39592a5877bd91b2e6ee15194268f35b04892 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 15 Mar 2018 17:36:56 +0100 Subject: perf: Fix sibling iteration Mark noticed that the change to sibling_list changed some iteration semantics; because previously we used group_list as list entry, sibling events would always have an empty sibling_list. But because we now use sibling_list for both list head and list entry, siblings will report as having siblings. Fix this with a custom for_each_sibling_event() iterator. Fixes: 8343aae66167 ("perf/core: Remove perf_event::group_entry") Reported-by: Mark Rutland Suggested-by: Mark Rutland Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: vincent.weaver@maine.edu Cc: alexander.shishkin@linux.intel.com Cc: torvalds@linux-foundation.org Cc: alexey.budankov@linux.intel.com Cc: valery.cherepennikov@intel.com Cc: eranian@google.com Cc: acme@redhat.com Cc: linux-tip-commits@vger.kernel.org Cc: davidcc@google.com Cc: kan.liang@intel.com Cc: Dmitry.Prohorov@intel.com Cc: jolsa@redhat.com Link: https://lkml.kernel.org/r/20180315170129.GX4043@hirez.programming.kicks-ass.net --- arch/x86/events/core.c | 2 +- arch/x86/events/intel/uncore.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 77a4125b6b1f..bfc8f43909c1 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -990,7 +990,7 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, sibling_list) { + for_each_sibling_event(event, leader) { if (!is_x86_event(event) || event->state <= PERF_EVENT_STATE_OFF) continue; diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index 9e374cd22ad2..a7956fc7ca1d 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -354,7 +354,7 @@ uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, if (!dogrp) return n; - list_for_each_entry(event, &leader->sibling_list, sibling_list) { + for_each_sibling_event(event, leader) { if (!is_box_event(box, event) || event->state <= PERF_EVENT_STATE_OFF) continue; -- cgit v1.2.1 From 2c2a9bbe7fecb2ad4981b6f4a56cacbfb849f848 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Feb 2018 14:20:35 -0800 Subject: perf/x86/intel: Disable userspace RDPMC usage for large PEBS Userspace RDPMC cannot possibly work for large PEBS, which was introduced in: b8241d20699e ("perf/x86/intel: Implement batched PEBS interrupt handling (large PEBS interrupt threshold)") When the PEBS interrupt threshold is larger than one, there is no way to get exact auto-reload times and value for userspace RDPMC. Disable the userspace RDPMC usage when large PEBS is enabled. The only exception is when the PEBS interrupt threshold is 1, in which case user-space RDPMC works well even with auto-reload events. Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: acme@kernel.org Fixes: b8241d20699e ("perf/x86/intel: Implement batched PEBS interrupt handling (large PEBS interrupt threshold)") Link: http://lkml.kernel.org/r/1518474035-21006-6-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar (cherry picked from commit 1af22eba248efe2de25658041a80a3d40fb3e92e) --- arch/x86/events/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 140d33288e78..3d24edfef3e4 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2118,7 +2118,8 @@ static int x86_pmu_event_init(struct perf_event *event) event->destroy(event); } - if (READ_ONCE(x86_pmu.attr_rdpmc)) + if (READ_ONCE(x86_pmu.attr_rdpmc) && + !(event->hw.flags & PERF_X86_EVENT_FREERUNNING)) event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; return err; -- cgit v1.2.1 From e5ea9b54a055619160bbfe527ebb7d7191823d66 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 17 Mar 2018 14:52:16 +0300 Subject: perf/x86/intel: Don't accidentally clear high bits in bdw_limit_period() We intended to clear the lowest 6 bits but because of a type bug we clear the high 32 bits as well. Andi says that periods are rarely more than U32_MAX so this bug probably doesn't have a huge runtime impact. Signed-off-by: Dan Carpenter Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Fixes: 294fe0f52a44 ("perf/x86/intel: Add INST_RETIRED.ALL workarounds") Link: http://lkml.kernel.org/r/20180317115216.GB4035@mwanda Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 56457cb73448..9b18a227fff7 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3194,7 +3194,7 @@ static unsigned bdw_limit_period(struct perf_event *event, unsigned left) X86_CONFIG(.event=0xc0, .umask=0x01)) { if (left < 128) left = 128; - left &= ~0x3fu; + left &= ~0x3fULL; } return left; } -- cgit v1.2.1 From e340895c9ed0b44548f08bbaaee4afc7bfacd354 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Mon, 12 Mar 2018 08:41:34 -0700 Subject: perf/x86/intel/uncore: Add missing filter constraint for SKX CHA event Adding a filter constraint for Intel Skylake CHA event UNC_CHA_UPI_CREDITS_ACQUIRED (0x38). The event supports core-id/thread-id and link filtering. Signed-off-by: Stephane Eranian Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/1520869294-14176-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snbep.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 22ec65bc033a..0876798f2ac9 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3343,6 +3343,7 @@ static struct extra_reg skx_uncore_cha_extra_regs[] = { SNBEP_CBO_EVENT_EXTRA_REG(0x9134, 0xffff, 0x4), SNBEP_CBO_EVENT_EXTRA_REG(0x35, 0xff, 0x8), SNBEP_CBO_EVENT_EXTRA_REG(0x36, 0xff, 0x8), + SNBEP_CBO_EVENT_EXTRA_REG(0x38, 0xff, 0x3), EVENT_EXTRA_END }; -- cgit v1.2.1 From 174afc3e7dd7823df8218e16e7768b834097184e Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Mon, 12 Mar 2018 10:45:37 -0400 Subject: perf/x86/intel: Rename confusing 'freerunning PEBS' API and implementation to 'large PEBS' The 'freerunning PEBS' and 'large PEBS' are the same thing. Both of these names appear in the code and in the API, which causes confusion. Rename 'freerunning PEBS' to 'large PEBS' to unify the code, which eliminates the confusion. No functional change. Reported-by: Vince Weaver Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/1520865937-22910-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 2 +- arch/x86/events/intel/core.c | 12 ++++++------ arch/x86/events/intel/ds.c | 6 +++--- arch/x86/events/perf_event.h | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 3d24edfef3e4..88797c80b3e0 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2119,7 +2119,7 @@ static int x86_pmu_event_init(struct perf_event *event) } if (READ_ONCE(x86_pmu.attr_rdpmc) && - !(event->hw.flags & PERF_X86_EVENT_FREERUNNING)) + !(event->hw.flags & PERF_X86_EVENT_LARGE_PEBS)) event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED; return err; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9b18a227fff7..1e41d7508d99 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2952,9 +2952,9 @@ static void intel_pebs_aliases_skl(struct perf_event *event) return intel_pebs_aliases_precdist(event); } -static unsigned long intel_pmu_free_running_flags(struct perf_event *event) +static unsigned long intel_pmu_large_pebs_flags(struct perf_event *event) { - unsigned long flags = x86_pmu.free_running_flags; + unsigned long flags = x86_pmu.large_pebs_flags; if (event->attr.use_clockid) flags &= ~PERF_SAMPLE_TIME; @@ -2976,8 +2976,8 @@ static int intel_pmu_hw_config(struct perf_event *event) if (!event->attr.freq) { event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD; if (!(event->attr.sample_type & - ~intel_pmu_free_running_flags(event))) - event->hw.flags |= PERF_X86_EVENT_FREERUNNING; + ~intel_pmu_large_pebs_flags(event))) + event->hw.flags |= PERF_X86_EVENT_LARGE_PEBS; } if (x86_pmu.pebs_aliases) x86_pmu.pebs_aliases(event); @@ -3460,7 +3460,7 @@ static __initconst const struct x86_pmu core_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, - .free_running_flags = PEBS_FREERUNNING_FLAGS, + .large_pebs_flags = LARGE_PEBS_FLAGS, /* * Intel PMCs cannot be accessed sanely above 32-bit width, @@ -3502,7 +3502,7 @@ static __initconst const struct x86_pmu intel_pmu = { .event_map = intel_pmu_event_map, .max_events = ARRAY_SIZE(intel_perfmon_event_map), .apic = 1, - .free_running_flags = PEBS_FREERUNNING_FLAGS, + .large_pebs_flags = LARGE_PEBS_FLAGS, /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index 18c25ab28557..d8015235ba76 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -935,7 +935,7 @@ void intel_pmu_pebs_add(struct perf_event *event) bool needed_cb = pebs_needs_sched_cb(cpuc); cpuc->n_pebs++; - if (hwc->flags & PERF_X86_EVENT_FREERUNNING) + if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs++; pebs_update_state(needed_cb, cpuc, event->ctx->pmu); @@ -975,7 +975,7 @@ void intel_pmu_pebs_del(struct perf_event *event) bool needed_cb = pebs_needs_sched_cb(cpuc); cpuc->n_pebs--; - if (hwc->flags & PERF_X86_EVENT_FREERUNNING) + if (hwc->flags & PERF_X86_EVENT_LARGE_PEBS) cpuc->n_large_pebs--; pebs_update_state(needed_cb, cpuc, event->ctx->pmu); @@ -1530,7 +1530,7 @@ void __init intel_ds_init(void) x86_pmu.pebs_record_size = sizeof(struct pebs_record_skl); x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm; - x86_pmu.free_running_flags |= PERF_SAMPLE_TIME; + x86_pmu.large_pebs_flags |= PERF_SAMPLE_TIME; break; default: diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 78f91ec1056e..39cd0615f04f 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -69,7 +69,7 @@ struct event_constraint { #define PERF_X86_EVENT_RDPMC_ALLOWED 0x0100 /* grant rdpmc permission */ #define PERF_X86_EVENT_EXCL_ACCT 0x0200 /* accounted EXCL event */ #define PERF_X86_EVENT_AUTO_RELOAD 0x0400 /* use PEBS auto-reload */ -#define PERF_X86_EVENT_FREERUNNING 0x0800 /* use freerunning PEBS */ +#define PERF_X86_EVENT_LARGE_PEBS 0x0800 /* use large PEBS */ struct amd_nb { @@ -88,7 +88,7 @@ struct amd_nb { * REGS_USER can be handled for events limited to ring 3. * */ -#define PEBS_FREERUNNING_FLAGS \ +#define LARGE_PEBS_FLAGS \ (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \ PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \ PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \ @@ -608,7 +608,7 @@ struct x86_pmu { struct event_constraint *pebs_constraints; void (*pebs_aliases)(struct perf_event *event); int max_pebs_events; - unsigned long free_running_flags; + unsigned long large_pebs_flags; /* * Intel LBR -- cgit v1.2.1 From 320b0651f32b830add6497fcdcfdcb6ae8c7b8a0 Mon Sep 17 00:00:00 2001 From: Kan Liang Date: Tue, 13 Mar 2018 11:51:34 -0700 Subject: perf/x86/intel/uncore: Fix multi-domain PCI CHA enumeration bug on Skylake servers The number of CHAs is miscalculated on multi-domain PCI Skylake server systems, resulting in an uncore driver initialization error. Gary Kroening explains: "For systems with a single PCI segment, it is sufficient to look for the bus number to change in order to determine that all of the CHa's have been counted for a single socket. However, for multi PCI segment systems, each socket is given a new segment and the bus number does NOT change. So looking only for the bus number to change ends up counting all of the CHa's on all sockets in the system. This leads to writing CPU MSRs beyond a valid range and causes an error in ivbep_uncore_msr_init_box()." To fix this bug, query the number of CHAs from the CAPID6 register: it should read bits 27:0 in the CAPID6 register located at Device 30, Function 3, Offset 0x9C. These 28 bits form a bit vector of available LLC slices and the CHAs that manage those slices. Reported-by: Kroening, Gary Tested-by: Kroening, Gary Signed-off-by: Kan Liang Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Andy Shevchenko Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: abanman@hpe.com Cc: dimitri.sivanich@hpe.com Cc: hpa@zytor.com Cc: mike.travis@hpe.com Cc: russ.anderson@hpe.com Fixes: cd34cd97b7b4 ("perf/x86/intel/uncore: Add Skylake server uncore support") Link: http://lkml.kernel.org/r/1520967094-13219-1-git-send-email-kan.liang@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore_snbep.c | 31 +++++++++++++++++-------------- 1 file changed, 17 insertions(+), 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 0876798f2ac9..c98b943e58b4 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3563,24 +3563,27 @@ static struct intel_uncore_type *skx_msr_uncores[] = { NULL, }; +/* + * To determine the number of CHAs, it should read bits 27:0 in the CAPID6 + * register which located at Device 30, Function 3, Offset 0x9C. PCI ID 0x2083. + */ +#define SKX_CAPID6 0x9c +#define SKX_CHA_BIT_MASK GENMASK(27, 0) + static int skx_count_chabox(void) { - struct pci_dev *chabox_dev = NULL; - int bus, count = 0; + struct pci_dev *dev = NULL; + u32 val = 0; - while (1) { - chabox_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x208d, chabox_dev); - if (!chabox_dev) - break; - if (count == 0) - bus = chabox_dev->bus->number; - if (bus != chabox_dev->bus->number) - break; - count++; - } + dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x2083, dev); + if (!dev) + goto out; - pci_dev_put(chabox_dev); - return count; + pci_read_config_dword(dev, SKX_CAPID6, &val); + val &= SKX_CHA_BIT_MASK; +out: + pci_dev_put(dev); + return hweight32(val); } void skx_uncore_cpu_init(void) -- cgit v1.2.1 From 038d07a283d62336b32cc23b62aecdf9418cfc11 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:14 +0100 Subject: x86/dma: Remove dma_alloc_coherent_mask() These days all devices (including the ISA fallback device) have a coherent DMA mask set, so remove the workaround. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-3-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/dma-mapping.h | 18 ++---------------- arch/x86/kernel/pci-dma.c | 10 ++++------ arch/x86/mm/mem_encrypt.c | 4 +--- 3 files changed, 7 insertions(+), 25 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 6277c83c0eb1..545bf3721bc0 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -44,26 +44,12 @@ extern void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs); -static inline unsigned long dma_alloc_coherent_mask(struct device *dev, - gfp_t gfp) -{ - unsigned long dma_mask = 0; - - dma_mask = dev->coherent_dma_mask; - if (!dma_mask) - dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32); - - return dma_mask; -} - static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { - unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); - - if (dma_mask <= DMA_BIT_MASK(24)) + if (dev->coherent_dma_mask <= DMA_BIT_MASK(24)) gfp |= GFP_DMA; #ifdef CONFIG_X86_64 - if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) + if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) gfp |= GFP_DMA32; #endif return gfp; diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index df7ab02f959f..b59820872ec7 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -80,13 +80,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, unsigned long attrs) { - unsigned long dma_mask; struct page *page; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t addr; - dma_mask = dma_alloc_coherent_mask(dev, flag); - again: page = NULL; /* CMA can be used only in the context which permits sleeping */ @@ -95,7 +92,7 @@ again: flag); if (page) { addr = phys_to_dma(dev, page_to_phys(page)); - if (addr + size > dma_mask) { + if (addr + size > dev->coherent_dma_mask) { dma_release_from_contiguous(dev, page, count); page = NULL; } @@ -108,10 +105,11 @@ again: return NULL; addr = phys_to_dma(dev, page_to_phys(page)); - if (addr + size > dma_mask) { + if (addr + size > dev->coherent_dma_mask) { __free_pages(page, get_order(size)); - if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { + if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && + !(flag & GFP_DMA)) { flag = (flag & ~GFP_DMA32) | GFP_DMA; goto again; } diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 3a1b5fe4c2ca..f6cd84beb610 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -198,12 +198,10 @@ void __init sme_early_init(void) static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) { - unsigned long dma_mask; unsigned int order; struct page *page; void *vaddr = NULL; - dma_mask = dma_alloc_coherent_mask(dev, gfp); order = get_order(size); /* @@ -221,7 +219,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * mask with it already cleared. */ addr = __sme_clr(phys_to_dma(dev, page_to_phys(page))); - if ((addr + size) > dma_mask) { + if ((addr + size) > dev->coherent_dma_mask) { __free_pages(page, get_order(size)); } else { vaddr = page_address(page); -- cgit v1.2.1 From fec777c385b6376048fc4b08f039366545b335cd Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:15 +0100 Subject: x86/dma: Use DMA-direct (CONFIG_DMA_DIRECT_OPS=y) The generic DMA-direct (CONFIG_DMA_DIRECT_OPS=y) implementation is now functionally equivalent to the x86 nommu dma_map implementation, so switch over to using it. That includes switching from using x86_dma_supported in various IOMMU drivers to use dma_direct_supported instead, which provides the same functionality. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-4-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/include/asm/dma-mapping.h | 8 ----- arch/x86/include/asm/iommu.h | 3 -- arch/x86/kernel/Makefile | 2 +- arch/x86/kernel/amd_gart_64.c | 7 ++-- arch/x86/kernel/pci-calgary_64.c | 3 +- arch/x86/kernel/pci-dma.c | 66 +------------------------------------- arch/x86/kernel/pci-swiotlb.c | 5 ++- arch/x86/pci/sta2x11-fixup.c | 2 +- 9 files changed, 11 insertions(+), 86 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 18233e459bff..7dc347217d3a 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -83,6 +83,7 @@ config X86 select CLOCKSOURCE_VALIDATE_LAST_CYCLE select CLOCKSOURCE_WATCHDOG select DCACHE_WORD_ACCESS + select DMA_DIRECT_OPS select EDAC_ATOMIC_SCRUB select EDAC_SUPPORT select GENERIC_CLOCKEVENTS diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index 545bf3721bc0..df9816b385eb 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -36,14 +36,6 @@ int arch_dma_supported(struct device *dev, u64 mask); bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define arch_dma_alloc_attrs arch_dma_alloc_attrs -extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs); - -extern void dma_generic_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_addr, - unsigned long attrs); - static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) { if (dev->coherent_dma_mask <= DMA_BIT_MASK(24)) diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h index 1e5d5d92eb40..baedab8ac538 100644 --- a/arch/x86/include/asm/iommu.h +++ b/arch/x86/include/asm/iommu.h @@ -2,13 +2,10 @@ #ifndef _ASM_X86_IOMMU_H #define _ASM_X86_IOMMU_H -extern const struct dma_map_ops nommu_dma_ops; extern int force_iommu, no_iommu; extern int iommu_detected; extern int iommu_pass_through; -int x86_dma_supported(struct device *dev, u64 mask); - /* 10 seconds */ #define DMAR_OPERATION_TIMEOUT ((cycles_t) tsc_khz*10*1000) diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 29786c87e864..2e8c8a09ecab 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -57,7 +57,7 @@ obj-$(CONFIG_X86_ESPFIX64) += espfix_64.o obj-$(CONFIG_SYSFS) += ksysfs.o obj-y += bootflag.o e820.o obj-y += pci-dma.o quirks.o topology.o kdebugfs.o -obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o +obj-y += alternative.o i8253.o hw_breakpoint.o obj-y += tsc.o tsc_msr.o io_delay.o rtc.o obj-y += pci-iommu_table.o obj-y += resource.o diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index ecd486cb06ab..52e3abcf3e70 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -501,8 +501,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, } __free_pages(page, get_order(size)); } else - return dma_generic_alloc_coherent(dev, size, dma_addr, flag, - attrs); + return dma_direct_alloc(dev, size, dma_addr, flag, attrs); return NULL; } @@ -513,7 +512,7 @@ gart_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_addr, unsigned long attrs) { gart_unmap_page(dev, dma_addr, size, DMA_BIDIRECTIONAL, 0); - dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); + dma_direct_free(dev, size, vaddr, dma_addr, attrs); } static int gart_mapping_error(struct device *dev, dma_addr_t dma_addr) @@ -705,7 +704,7 @@ static const struct dma_map_ops gart_dma_ops = { .alloc = gart_alloc_coherent, .free = gart_free_coherent, .mapping_error = gart_mapping_error, - .dma_supported = x86_dma_supported, + .dma_supported = dma_direct_supported, }; static void gart_iommu_shutdown(void) diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 35c461f21815..5647853053bd 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -493,7 +494,7 @@ static const struct dma_map_ops calgary_dma_ops = { .map_page = calgary_map_page, .unmap_page = calgary_unmap_page, .mapping_error = calgary_mapping_error, - .dma_supported = x86_dma_supported, + .dma_supported = dma_direct_supported, }; static inline void __iomem * busno_to_bbar(unsigned char num) diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index b59820872ec7..db0b88ea8d1b 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -18,7 +18,7 @@ static int forbid_dac __read_mostly; -const struct dma_map_ops *dma_ops = &nommu_dma_ops; +const struct dma_map_ops *dma_ops = &dma_direct_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; @@ -76,60 +76,6 @@ void __init pci_iommu_alloc(void) } } } -void *dma_generic_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addr, gfp_t flag, - unsigned long attrs) -{ - struct page *page; - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - dma_addr_t addr; - -again: - page = NULL; - /* CMA can be used only in the context which permits sleeping */ - if (gfpflags_allow_blocking(flag)) { - page = dma_alloc_from_contiguous(dev, count, get_order(size), - flag); - if (page) { - addr = phys_to_dma(dev, page_to_phys(page)); - if (addr + size > dev->coherent_dma_mask) { - dma_release_from_contiguous(dev, page, count); - page = NULL; - } - } - } - /* fallback */ - if (!page) - page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); - if (!page) - return NULL; - - addr = phys_to_dma(dev, page_to_phys(page)); - if (addr + size > dev->coherent_dma_mask) { - __free_pages(page, get_order(size)); - - if (dev->coherent_dma_mask < DMA_BIT_MASK(32) && - !(flag & GFP_DMA)) { - flag = (flag & ~GFP_DMA32) | GFP_DMA; - goto again; - } - - return NULL; - } - memset(page_address(page), 0, size); - *dma_addr = addr; - return page_address(page); -} - -void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_addr, unsigned long attrs) -{ - unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct page *page = virt_to_page(vaddr); - - if (!dma_release_from_contiguous(dev, page, count)) - free_pages((unsigned long)vaddr, get_order(size)); -} bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) { @@ -243,16 +189,6 @@ int arch_dma_supported(struct device *dev, u64 mask) } EXPORT_SYMBOL(arch_dma_supported); -int x86_dma_supported(struct device *dev, u64 mask) -{ - /* Copied from i386. Doesn't make much sense, because it will - only work for pci_alloc_coherent. - The caller just has to use GFP_DMA in this case. */ - if (mask < DMA_BIT_MASK(24)) - return 0; - return 1; -} - static int __init pci_iommu_init(void) { struct iommu_table_entry *p; diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index 0ee0f8f34251..bcb6a9bf64ad 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -30,8 +30,7 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ flags |= __GFP_NOWARN; - vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags, - attrs); + vaddr = dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); if (vaddr) return vaddr; @@ -45,7 +44,7 @@ void x86_swiotlb_free_coherent(struct device *dev, size_t size, if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) swiotlb_free_coherent(dev, size, vaddr, dma_addr); else - dma_generic_free_coherent(dev, size, vaddr, dma_addr, attrs); + dma_direct_free(dev, size, vaddr, dma_addr, attrs); } static const struct dma_map_ops x86_swiotlb_dma_ops = { diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 75577c1490c4..6c712fe11bdc 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -193,7 +193,7 @@ static const struct dma_map_ops sta2x11_dma_ops = { .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_device = swiotlb_sync_sg_for_device, .mapping_error = swiotlb_dma_mapping_error, - .dma_supported = x86_dma_supported, + .dma_supported = dma_direct_supported, }; /* At setup time, we use our own ops if the device is a ConneXt one */ -- cgit v1.2.1 From 6e4bf586778315b3fc53b728c53eefc247cfc3ff Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:16 +0100 Subject: x86/dma: Use generic swiotlb_ops The generic swiotlb DMA ops were based on the x86 ones and provide equivalent functionality, so use them. Also fix the sta2x11 case. For that SOC the DMA map ops need an additional physical to DMA address translations. For swiotlb buffers that is done throught the phys_to_dma helper, but the sta2x11_dma_ops also added an additional translation on the return value from x86_swiotlb_alloc_coherent, which is only correct if that functions returns a direct allocation and not a swiotlb buffer. With the generic swiotlb and DMA-direct code phys_to_dma is not always used and the separate sta2x11_dma_ops can be replaced with a simple bit that marks if the additional physical to DMA address translation is needed. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-5-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/device.h | 3 +++ arch/x86/include/asm/swiotlb.h | 8 ------- arch/x86/kernel/pci-swiotlb.c | 47 +----------------------------------------- arch/x86/pci/sta2x11-fixup.c | 46 +++++------------------------------------ 4 files changed, 9 insertions(+), 95 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h index 5e12c63b47aa..a8f6c809d9b1 100644 --- a/arch/x86/include/asm/device.h +++ b/arch/x86/include/asm/device.h @@ -6,6 +6,9 @@ struct dev_archdata { #if defined(CONFIG_INTEL_IOMMU) || defined(CONFIG_AMD_IOMMU) void *iommu; /* hook for IOMMU specific extension */ #endif +#ifdef CONFIG_STA2X11 + bool is_sta2x11; +#endif }; #if defined(CONFIG_X86_DEV_DMA_OPS) && defined(CONFIG_PCI_DOMAINS) diff --git a/arch/x86/include/asm/swiotlb.h b/arch/x86/include/asm/swiotlb.h index 1c6a6cb230ff..ff6c92eff035 100644 --- a/arch/x86/include/asm/swiotlb.h +++ b/arch/x86/include/asm/swiotlb.h @@ -27,12 +27,4 @@ static inline void pci_swiotlb_late_init(void) { } #endif - -extern void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs); -extern void x86_swiotlb_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_addr, - unsigned long attrs); - #endif /* _ASM_X86_SWIOTLB_H */ diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c index bcb6a9bf64ad..661583662430 100644 --- a/arch/x86/kernel/pci-swiotlb.c +++ b/arch/x86/kernel/pci-swiotlb.c @@ -17,51 +17,6 @@ int swiotlb __read_mostly; -void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, - unsigned long attrs) -{ - void *vaddr; - - /* - * Don't print a warning when the first allocation attempt fails. - * swiotlb_alloc_coherent() will print a warning when the DMA - * memory allocation ultimately failed. - */ - flags |= __GFP_NOWARN; - - vaddr = dma_direct_alloc(hwdev, size, dma_handle, flags, attrs); - if (vaddr) - return vaddr; - - return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); -} - -void x86_swiotlb_free_coherent(struct device *dev, size_t size, - void *vaddr, dma_addr_t dma_addr, - unsigned long attrs) -{ - if (is_swiotlb_buffer(dma_to_phys(dev, dma_addr))) - swiotlb_free_coherent(dev, size, vaddr, dma_addr); - else - dma_direct_free(dev, size, vaddr, dma_addr, attrs); -} - -static const struct dma_map_ops x86_swiotlb_dma_ops = { - .mapping_error = swiotlb_dma_mapping_error, - .alloc = x86_swiotlb_alloc_coherent, - .free = x86_swiotlb_free_coherent, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .dma_supported = NULL, -}; - /* * pci_swiotlb_detect_override - set swiotlb to 1 if necessary * @@ -111,7 +66,7 @@ void __init pci_swiotlb_init(void) { if (swiotlb) { swiotlb_init(0); - dma_ops = &x86_swiotlb_dma_ops; + dma_ops = &swiotlb_dma_ops; } } diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index 6c712fe11bdc..eac58e03f43c 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -159,43 +159,6 @@ static dma_addr_t a2p(dma_addr_t a, struct pci_dev *pdev) return p; } -/** - * sta2x11_swiotlb_alloc_coherent - Allocate swiotlb bounce buffers - * returns virtual address. This is the only "special" function here. - * @dev: PCI device - * @size: Size of the buffer - * @dma_handle: DMA address - * @flags: memory flags - */ -static void *sta2x11_swiotlb_alloc_coherent(struct device *dev, - size_t size, - dma_addr_t *dma_handle, - gfp_t flags, - unsigned long attrs) -{ - void *vaddr; - - vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, flags, attrs); - *dma_handle = p2a(*dma_handle, to_pci_dev(dev)); - return vaddr; -} - -/* We have our own dma_ops: the same as swiotlb but from alloc (above) */ -static const struct dma_map_ops sta2x11_dma_ops = { - .alloc = sta2x11_swiotlb_alloc_coherent, - .free = x86_swiotlb_free_coherent, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = swiotlb_dma_mapping_error, - .dma_supported = dma_direct_supported, -}; - /* At setup time, we use our own ops if the device is a ConneXt one */ static void sta2x11_setup_pdev(struct pci_dev *pdev) { @@ -205,7 +168,8 @@ static void sta2x11_setup_pdev(struct pci_dev *pdev) return; pci_set_consistent_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); pci_set_dma_mask(pdev, STA2X11_AMBA_SIZE - 1); - pdev->dev.dma_ops = &sta2x11_dma_ops; + pdev->dev.dma_ops = &swiotlb_dma_ops; + pdev->dev.archdata.is_sta2x11 = true; /* We must enable all devices as master, for audio DMA to work */ pci_set_master(pdev); @@ -225,7 +189,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) { struct sta2x11_mapping *map; - if (dev->dma_ops != &sta2x11_dma_ops) { + if (!dev->archdata.is_sta2x11) { if (!dev->dma_mask) return false; return addr + size - 1 <= *dev->dma_mask; @@ -249,7 +213,7 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) */ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) { - if (dev->dma_ops != &sta2x11_dma_ops) + if (!dev->archdata.is_sta2x11) return paddr; return p2a(paddr, to_pci_dev(dev)); } @@ -261,7 +225,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) */ phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) { - if (dev->dma_ops != &sta2x11_dma_ops) + if (!dev->archdata.is_sta2x11) return daddr; return a2p(daddr, to_pci_dev(dev)); } -- cgit v1.2.1 From f3c39d51043dcee5dd4d51dffcfe9ce01a263582 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:17 +0100 Subject: x86/dma/amd_gart: Look at dev->coherent_dma_mask instead of GFP_DMA We want to phase out looking at the magic GFP_DMA flag in the DMA mapping routines, so switch the gart driver to use the dev->coherent_dma_mask instead, which is used to select the GFP_DMA flag in the caller. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-6-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_gart_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 52e3abcf3e70..79ac6caaaabb 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -484,7 +484,7 @@ gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, unsigned long align_mask; struct page *page; - if (force_iommu && !(flag & GFP_DMA)) { + if (force_iommu && dev->coherent_dma_mask > DMA_BIT_MASK(24)) { flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); page = alloc_pages(flag | __GFP_ZERO, get_order(size)); if (!page) -- cgit v1.2.1 From 51c7eeba7975c1d2a02eefd00ece6de25176f5f3 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:18 +0100 Subject: x86/dma/amd_gart: Use dma_direct_{alloc,free}() This gains support for CMA allocations for the force_iommu case, and cleans up the code a bit. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-7-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/amd_gart_64.c | 36 ++++++++++++++---------------------- 1 file changed, 14 insertions(+), 22 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/amd_gart_64.c b/arch/x86/kernel/amd_gart_64.c index 79ac6caaaabb..f299d8a479bb 100644 --- a/arch/x86/kernel/amd_gart_64.c +++ b/arch/x86/kernel/amd_gart_64.c @@ -480,29 +480,21 @@ static void * gart_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag, unsigned long attrs) { - dma_addr_t paddr; - unsigned long align_mask; - struct page *page; - - if (force_iommu && dev->coherent_dma_mask > DMA_BIT_MASK(24)) { - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - page = alloc_pages(flag | __GFP_ZERO, get_order(size)); - if (!page) - return NULL; - - align_mask = (1UL << get_order(size)) - 1; - paddr = dma_map_area(dev, page_to_phys(page), size, - DMA_BIDIRECTIONAL, align_mask); - - flush_gart(); - if (paddr != bad_dma_addr) { - *dma_addr = paddr; - return page_address(page); - } - __free_pages(page, get_order(size)); - } else - return dma_direct_alloc(dev, size, dma_addr, flag, attrs); + void *vaddr; + + vaddr = dma_direct_alloc(dev, size, dma_addr, flag, attrs); + if (!vaddr || + !force_iommu || dev->coherent_dma_mask <= DMA_BIT_MASK(24)) + return vaddr; + *dma_addr = dma_map_area(dev, virt_to_phys(vaddr), size, + DMA_BIDIRECTIONAL, (1UL << get_order(size)) - 1); + flush_gart(); + if (unlikely(*dma_addr == bad_dma_addr)) + goto out_free; + return vaddr; +out_free: + dma_direct_free(dev, size, vaddr, *dma_addr, attrs); return NULL; } -- cgit v1.2.1 From 178c5682447ac0e315f0f3e27664fd4e0d2721cc Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:21 +0100 Subject: x86/dma: Remove dma_alloc_coherent_gfp_flags() All dma_ops implementations used on x86 now take care of setting their own required GFP_ masks for the allocation. And given that the common code now clears harmful flags itself that means we can stop the flags in all the IOMMU implementations as well. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-10-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/dma-mapping.h | 11 ----------- arch/x86/kernel/pci-calgary_64.c | 2 -- arch/x86/kernel/pci-dma.c | 2 -- arch/x86/mm/mem_encrypt.c | 7 ------- 4 files changed, 22 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h index df9816b385eb..89ce4bfd241f 100644 --- a/arch/x86/include/asm/dma-mapping.h +++ b/arch/x86/include/asm/dma-mapping.h @@ -36,15 +36,4 @@ int arch_dma_supported(struct device *dev, u64 mask); bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp); #define arch_dma_alloc_attrs arch_dma_alloc_attrs -static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) -{ - if (dev->coherent_dma_mask <= DMA_BIT_MASK(24)) - gfp |= GFP_DMA; -#ifdef CONFIG_X86_64 - if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) - gfp |= GFP_DMA32; -#endif - return gfp; -} - #endif diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c index 5647853053bd..bbfc8b1e9104 100644 --- a/arch/x86/kernel/pci-calgary_64.c +++ b/arch/x86/kernel/pci-calgary_64.c @@ -446,8 +446,6 @@ static void* calgary_alloc_coherent(struct device *dev, size_t size, npages = size >> PAGE_SHIFT; order = get_order(size); - flag &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); - /* alloc enough pages (and possibly more) */ ret = (void *)__get_free_pages(flag, order); if (!ret) diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c index db0b88ea8d1b..14437116ffea 100644 --- a/arch/x86/kernel/pci-dma.c +++ b/arch/x86/kernel/pci-dma.c @@ -82,8 +82,6 @@ bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp) if (!*dev) *dev = &x86_dma_fallback_dev; - *gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp); - if (!is_device_dma_capable(*dev)) return false; return true; diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index f6cd84beb610..1217a4fab915 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -203,13 +203,6 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *vaddr = NULL; order = get_order(size); - - /* - * Memory will be memset to zero after marking decrypted, so don't - * bother clearing it before. - */ - gfp &= ~__GFP_ZERO; - page = alloc_pages_node(dev_to_node(dev), gfp, order); if (page) { dma_addr_t addr; -- cgit v1.2.1 From e7de6c7cc207be78369d45fb833d7d53aeda47f8 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:23 +0100 Subject: dma/swiotlb: Remove swiotlb_set_mem_attributes() Now that set_memory_decrypted() is always available we can just call it directly. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Reviewed-by: Konrad Rzeszutek Wilk Reviewed-by: Tom Lendacky Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-12-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/include/asm/mem_encrypt.h | 2 -- arch/x86/mm/mem_encrypt.c | 8 -------- 2 files changed, 10 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 8fe61ad21047..c0643831706e 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -49,8 +49,6 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void); -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size); - bool sme_active(void); bool sev_active(void); diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 1217a4fab915..d243e8d80d89 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -441,11 +441,3 @@ void __init mem_encrypt_init(void) : "Secure Memory Encryption (SME)"); } -void swiotlb_set_mem_attributes(void *vaddr, unsigned long size) -{ - WARN(PAGE_ALIGN(size) != size, - "size is not page-aligned (%#lx)\n", size); - - /* Make the SWIOTLB buffer area decrypted */ - set_memory_decrypted((unsigned long)vaddr, size >> PAGE_SHIFT); -} -- cgit v1.2.1 From b6e05477c10c12e36141558fc14f04b00ea634d4 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:24 +0100 Subject: dma/direct: Handle the memory encryption bit in common code Give the basic phys_to_dma() and dma_to_phys() helpers a __-prefix and add the memory encryption mask to the non-prefixed versions. Use the __-prefixed versions directly instead of clearing the mask again in various places. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-13-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/include/asm/dma-direct.h | 25 ++----------------------- arch/x86/mm/mem_encrypt.c | 2 +- arch/x86/pci/sta2x11-fixup.c | 6 +++--- 4 files changed, 7 insertions(+), 28 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 7dc347217d3a..5b4899de076f 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -54,7 +54,6 @@ config X86 select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_KCOV if X86_64 - select ARCH_HAS_PHYS_TO_DMA select ARCH_HAS_MEMBARRIER_SYNC_CORE select ARCH_HAS_PMEM_API if X86_64 select ARCH_HAS_REFCOUNT @@ -692,6 +691,7 @@ config X86_SUPPORTS_MEMORY_FAILURE config STA2X11 bool "STA2X11 Companion Chip Support" depends on X86_32_NON_STANDARD && PCI + select ARCH_HAS_PHYS_TO_DMA select X86_DEV_DMA_OPS select X86_DMA_REMAP select SWIOTLB diff --git a/arch/x86/include/asm/dma-direct.h b/arch/x86/include/asm/dma-direct.h index 1295bc622ebe..1a19251eaac9 100644 --- a/arch/x86/include/asm/dma-direct.h +++ b/arch/x86/include/asm/dma-direct.h @@ -2,29 +2,8 @@ #ifndef ASM_X86_DMA_DIRECT_H #define ASM_X86_DMA_DIRECT_H 1 -#include - -#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr); -#else -static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) -{ - if (!dev->dma_mask) - return 0; - - return addr + size - 1 <= *dev->dma_mask; -} - -static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) -{ - return __sme_set(paddr); -} +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr); +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr); -static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) -{ - return __sme_clr(daddr); -} -#endif /* CONFIG_X86_DMA_REMAP */ #endif /* ASM_X86_DMA_DIRECT_H */ diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index d243e8d80d89..1b396422d26f 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -211,7 +211,7 @@ static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, * Since we will be clearing the encryption bit, check the * mask with it already cleared. */ - addr = __sme_clr(phys_to_dma(dev, page_to_phys(page))); + addr = __phys_to_dma(dev, page_to_phys(page)); if ((addr + size) > dev->coherent_dma_mask) { __free_pages(page, get_order(size)); } else { diff --git a/arch/x86/pci/sta2x11-fixup.c b/arch/x86/pci/sta2x11-fixup.c index eac58e03f43c..7a5bafb76d77 100644 --- a/arch/x86/pci/sta2x11-fixup.c +++ b/arch/x86/pci/sta2x11-fixup.c @@ -207,11 +207,11 @@ bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) } /** - * phys_to_dma - Return the DMA AMBA address used for this STA2x11 device + * __phys_to_dma - Return the DMA AMBA address used for this STA2x11 device * @dev: device for a PCI device * @paddr: Physical address */ -dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) +dma_addr_t __phys_to_dma(struct device *dev, phys_addr_t paddr) { if (!dev->archdata.is_sta2x11) return paddr; @@ -223,7 +223,7 @@ dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) * @dev: device for a PCI device * @daddr: STA2x11 AMBA DMA address */ -phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) +phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t daddr) { if (!dev->archdata.is_sta2x11) return daddr; -- cgit v1.2.1 From c10f07aa27dadf5ab5b3d58c48c91a467f80db49 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 19 Mar 2018 11:38:25 +0100 Subject: dma/direct: Handle force decryption for DMA coherent buffers in common code With that in place the generic DMA-direct routines can be used to allocate non-encrypted bounce buffers, and the x86 SEV case can use the generic swiotlb ops including nice features such as using CMA allocations. Note that I'm not too happy about using sev_active() in DMA-direct, but I couldn't come up with a good enough name for a wrapper to make it worth adding. Tested-by: Tom Lendacky Signed-off-by: Christoph Hellwig Reviewed-by: Thomas Gleixner Cc: David Woodhouse Cc: Joerg Roedel Cc: Jon Mason Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Muli Ben-Yehuda Cc: Peter Zijlstra Cc: iommu@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20180319103826.12853-14-hch@lst.de Signed-off-by: Ingo Molnar --- arch/x86/mm/mem_encrypt.c | 73 ++--------------------------------------------- 1 file changed, 3 insertions(+), 70 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/mem_encrypt.c b/arch/x86/mm/mem_encrypt.c index 1b396422d26f..b2de398d1fd3 100644 --- a/arch/x86/mm/mem_encrypt.c +++ b/arch/x86/mm/mem_encrypt.c @@ -195,58 +195,6 @@ void __init sme_early_init(void) swiotlb_force = SWIOTLB_FORCE; } -static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, - gfp_t gfp, unsigned long attrs) -{ - unsigned int order; - struct page *page; - void *vaddr = NULL; - - order = get_order(size); - page = alloc_pages_node(dev_to_node(dev), gfp, order); - if (page) { - dma_addr_t addr; - - /* - * Since we will be clearing the encryption bit, check the - * mask with it already cleared. - */ - addr = __phys_to_dma(dev, page_to_phys(page)); - if ((addr + size) > dev->coherent_dma_mask) { - __free_pages(page, get_order(size)); - } else { - vaddr = page_address(page); - *dma_handle = addr; - } - } - - if (!vaddr) - vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp); - - if (!vaddr) - return NULL; - - /* Clear the SME encryption bit for DMA use if not swiotlb area */ - if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) { - set_memory_decrypted((unsigned long)vaddr, 1 << order); - memset(vaddr, 0, PAGE_SIZE << order); - *dma_handle = __sme_clr(*dma_handle); - } - - return vaddr; -} - -static void sev_free(struct device *dev, size_t size, void *vaddr, - dma_addr_t dma_handle, unsigned long attrs) -{ - /* Set the SME encryption bit for re-use if not swiotlb area */ - if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle))) - set_memory_encrypted((unsigned long)vaddr, - 1 << get_order(size)); - - swiotlb_free_coherent(dev, size, vaddr, dma_handle); -} - static void __init __set_clr_pte_enc(pte_t *kpte, int level, bool enc) { pgprot_t old_prot, new_prot; @@ -399,20 +347,6 @@ bool sev_active(void) } EXPORT_SYMBOL(sev_active); -static const struct dma_map_ops sev_dma_ops = { - .alloc = sev_alloc, - .free = sev_free, - .map_page = swiotlb_map_page, - .unmap_page = swiotlb_unmap_page, - .map_sg = swiotlb_map_sg_attrs, - .unmap_sg = swiotlb_unmap_sg_attrs, - .sync_single_for_cpu = swiotlb_sync_single_for_cpu, - .sync_single_for_device = swiotlb_sync_single_for_device, - .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, - .sync_sg_for_device = swiotlb_sync_sg_for_device, - .mapping_error = swiotlb_dma_mapping_error, -}; - /* Architecture __weak replacement functions */ void __init mem_encrypt_init(void) { @@ -423,12 +357,11 @@ void __init mem_encrypt_init(void) swiotlb_update_mem_attributes(); /* - * With SEV, DMA operations cannot use encryption. New DMA ops - * are required in order to mark the DMA areas as decrypted or - * to use bounce buffers. + * With SEV, DMA operations cannot use encryption, we need to use + * SWIOTLB to bounce buffer DMA operation. */ if (sev_active()) - dma_ops = &sev_dma_ops; + dma_ops = &swiotlb_dma_ops; /* * With SEV, we need to unroll the rep string I/O instructions. -- cgit v1.2.1 From e501ce957a786ecd076ea0cfb10b114e6e4d0f40 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 17 Jan 2018 11:42:07 +0100 Subject: x86: Force asm-goto We want to start using asm-goto to guarantee the absence of dynamic branches (and thus speculation). A primary prerequisite for this is of course that the compiler supports asm-goto. This effecively lifts the minimum GCC version to build an x86 kernel to gcc-4.5. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: torvalds@linux-foundation.org Link: https://lkml.kernel.org/r/20180319201327.GJ4043@hirez.programming.kicks-ass.net --- arch/x86/Makefile | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 498c1b812300..d798e36d103c 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -181,6 +181,10 @@ ifdef CONFIG_FUNCTION_GRAPH_TRACER endif endif +ifndef CC_HAVE_ASM_GOTO + $(error Compiler lacks asm-goto support.) +endif + # # Jump labels need '-maccumulate-outgoing-args' for gcc < 4.5.2 to prevent a # GCC bug (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=46226). There's no way -- cgit v1.2.1 From d0266046ad54e0c964941364cd82a0d0478ce286 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 19 Mar 2018 16:41:26 +0100 Subject: x86: Remove FAST_FEATURE_TESTS Since we want to rely on static branches to avoid speculation, remove any possible fallback code for static_cpu_has. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Thomas Gleixner Cc: torvalds@linux-foundation.org Link: https://lkml.kernel.org/r/20180319154717.705383007@infradead.org --- arch/x86/Kconfig | 11 ----------- arch/x86/include/asm/cpufeature.h | 8 -------- 2 files changed, 19 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 0fa71a78ec99..cb5b5907dbd6 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -393,17 +393,6 @@ config X86_FEATURE_NAMES If in doubt, say Y. -config X86_FAST_FEATURE_TESTS - bool "Fast CPU feature tests" if EMBEDDED - default y - ---help--- - Some fast-paths in the kernel depend on the capabilities of the CPU. - Say Y here for the kernel to patch in the appropriate code at runtime - based on the capabilities of the CPU. The infrastructure for patching - code at runtime takes up some additional space; space-constrained - embedded systems may wish to say N here to produce smaller, slightly - slower code. - config X86_X2APIC bool "Support x2apic" depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index 736771c9822e..b27da9602a6d 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -140,7 +140,6 @@ extern void clear_cpu_cap(struct cpuinfo_x86 *c, unsigned int bit); #define setup_force_cpu_bug(bit) setup_force_cpu_cap(bit) -#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_X86_FAST_FEATURE_TESTS) /* * Static testing of CPU features. Used the same as boot_cpu_has(). * These will statically patch the target code for additional @@ -196,13 +195,6 @@ t_no: boot_cpu_has(bit) : \ _static_cpu_has(bit) \ ) -#else -/* - * Fall back to dynamic for gcc versions which don't support asm goto. Should be - * a minority now anyway. - */ -#define static_cpu_has(bit) boot_cpu_has(bit) -#endif #define cpu_has_bug(c, bit) cpu_has(c, (bit)) #define set_cpu_bug(c, bit) set_cpu_cap(c, (bit)) -- cgit v1.2.1 From 32d43cd391bacb5f0814c2624399a5dad3501d09 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Tue, 20 Mar 2018 12:16:59 -0700 Subject: kvm/x86: fix icebp instruction handling The undocumented 'icebp' instruction (aka 'int1') works pretty much like 'int3' in the absense of in-circuit probing equipment (except, obviously, that it raises #DB instead of raising #BP), and is used by some validation test-suites as such. But Andy Lutomirski noticed that his test suite acted differently in kvm than on bare hardware. The reason is that kvm used an inexact test for the icebp instruction: it just assumed that an all-zero VM exit qualification value meant that the VM exit was due to icebp. That is not unlike the guess that do_debug() does for the actual exception handling case, but it's purely a heuristic, not an absolute rule. do_debug() does it because it wants to ascribe _some_ reasons to the #DB that happened, and an empty %dr6 value means that 'icebp' is the most likely casue and we have no better information. But kvm can just do it right, because unlike the do_debug() case, kvm actually sees the real reason for the #DB in the VM-exit interruption information field. So instead of relying on an inexact heuristic, just use the actual VM exit information that says "it was 'icebp'". Right now the 'icebp' instruction isn't technically documented by Intel, but that will hopefully change. The special "privileged software exception" information _is_ actually mentioned in the Intel SDM, even though the cause of it isn't enumerated. Reported-by: Andy Lutomirski Tested-by: Paolo Bonzini Signed-off-by: Linus Torvalds --- arch/x86/include/asm/vmx.h | 1 + arch/x86/kvm/vmx.c | 9 ++++++++- 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h index 8b6780751132..5db8b0b10766 100644 --- a/arch/x86/include/asm/vmx.h +++ b/arch/x86/include/asm/vmx.h @@ -352,6 +352,7 @@ enum vmcs_field { #define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */ #define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */ #define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */ +#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */ #define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */ /* GUEST_INTERRUPTIBILITY_INFO flags. */ diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 051dab74e4e9..2d87603f9179 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1045,6 +1045,13 @@ static inline bool is_machine_check(u32 intr_info) (INTR_TYPE_HARD_EXCEPTION | MC_VECTOR | INTR_INFO_VALID_MASK); } +/* Undocumented: icebp/int1 */ +static inline bool is_icebp(u32 intr_info) +{ + return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VALID_MASK)) + == (INTR_TYPE_PRIV_SW_EXCEPTION | INTR_INFO_VALID_MASK); +} + static inline bool cpu_has_vmx_msr_bitmap(void) { return vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS; @@ -6179,7 +6186,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { vcpu->arch.dr6 &= ~15; vcpu->arch.dr6 |= dr6 | DR6_RTM; - if (!(dr6 & ~DR6_RESERVED)) /* icebp */ + if (is_icebp(intr_info)) skip_emulated_instruction(vcpu); kvm_queue_exception(vcpu, DB_VECTOR); -- cgit v1.2.1 From b6bdb7517c3d3f41f20e5c2948d6bc3f8897394e Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Thu, 22 Mar 2018 16:17:20 -0700 Subject: mm/vmalloc: add interfaces to free unmapped page table On architectures with CONFIG_HAVE_ARCH_HUGE_VMAP set, ioremap() may create pud/pmd mappings. A kernel panic was observed on arm64 systems with Cortex-A75 in the following steps as described by Hanjun Guo. 1. ioremap a 4K size, valid page table will build, 2. iounmap it, pte0 will set to 0; 3. ioremap the same address with 2M size, pgd/pmd is unchanged, then set the a new value for pmd; 4. pte0 is leaked; 5. CPU may meet exception because the old pmd is still in TLB, which will lead to kernel panic. This panic is not reproducible on x86. INVLPG, called from iounmap, purges all levels of entries associated with purged address on x86. x86 still has memory leak. The patch changes the ioremap path to free unmapped page table(s) since doing so in the unmap path has the following issues: - The iounmap() path is shared with vunmap(). Since vmap() only supports pte mappings, making vunmap() to free a pte page is an overhead for regular vmap users as they do not need a pte page freed up. - Checking if all entries in a pte page are cleared in the unmap path is racy, and serializing this check is expensive. - The unmap path calls free_vmap_area_noflush() to do lazy TLB purges. Clearing a pud/pmd entry before the lazy TLB purges needs extra TLB purge. Add two interfaces, pud_free_pmd_page() and pmd_free_pte_page(), which clear a given pud/pmd entry and free up a page for the lower level entries. This patch implements their stub functions on x86 and arm64, which work as workaround. [akpm@linux-foundation.org: fix typo in pmd_free_pte_page() stub] Link: http://lkml.kernel.org/r/20180314180155.19492-2-toshi.kani@hpe.com Fixes: e61ce6ade404e ("mm: change ioremap to set up huge I/O mappings") Reported-by: Lei Li Signed-off-by: Toshi Kani Cc: Catalin Marinas Cc: Wang Xuefeng Cc: Will Deacon Cc: Hanjun Guo Cc: Michal Hocko Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Borislav Petkov Cc: Matthew Wilcox Cc: Chintan Pandya Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pgtable.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 004abf9ebf12..1eed7ed518e6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -702,4 +702,28 @@ int pmd_clear_huge(pmd_t *pmd) return 0; } + +/** + * pud_free_pmd_page - Clear pud entry and free pmd page. + * @pud: Pointer to a PUD. + * + * Context: The pud range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pud_free_pmd_page(pud_t *pud) +{ + return pud_none(*pud); +} + +/** + * pmd_free_pte_page - Clear pmd entry and free pte page. + * @pmd: Pointer to a PMD. + * + * Context: The pmd range has been unmaped and TLB purged. + * Return: 1 if clearing the entry succeeded. 0 otherwise. + */ +int pmd_free_pte_page(pmd_t *pmd) +{ + return pmd_none(*pmd); +} #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ -- cgit v1.2.1 From 28ee90fe6048fa7b7ceaeb8831c0e4e454a4cf89 Mon Sep 17 00:00:00 2001 From: Toshi Kani Date: Thu, 22 Mar 2018 16:17:24 -0700 Subject: x86/mm: implement free pmd/pte page interfaces Implement pud_free_pmd_page() and pmd_free_pte_page() on x86, which clear a given pud/pmd entry and free up lower level page table(s). The address range associated with the pud/pmd entry must have been purged by INVLPG. Link: http://lkml.kernel.org/r/20180314180155.19492-3-toshi.kani@hpe.com Fixes: e61ce6ade404e ("mm: change ioremap to set up huge I/O mappings") Signed-off-by: Toshi Kani Reported-by: Lei Li Cc: Michal Hocko Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Borislav Petkov Cc: Matthew Wilcox Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/pgtable.c | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 1eed7ed518e6..34cda7e0551b 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -712,7 +712,22 @@ int pmd_clear_huge(pmd_t *pmd) */ int pud_free_pmd_page(pud_t *pud) { - return pud_none(*pud); + pmd_t *pmd; + int i; + + if (pud_none(*pud)) + return 1; + + pmd = (pmd_t *)pud_page_vaddr(*pud); + + for (i = 0; i < PTRS_PER_PMD; i++) + if (!pmd_free_pte_page(&pmd[i])) + return 0; + + pud_clear(pud); + free_page((unsigned long)pmd); + + return 1; } /** @@ -724,6 +739,15 @@ int pud_free_pmd_page(pud_t *pud) */ int pmd_free_pte_page(pmd_t *pmd) { - return pmd_none(*pmd); + pte_t *pte; + + if (pmd_none(*pmd)) + return 1; + + pte = (pte_t *)pmd_page_vaddr(*pmd); + pmd_clear(pmd); + free_page((unsigned long)pte); + + return 1; } #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ -- cgit v1.2.1 From 9d1887ef3252a45f4e692525d04e23ce8091c17c Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Mon, 5 Mar 2018 09:33:27 -0800 Subject: KVM: nVMX: sync vmcs02 segment regs prior to vmx_set_cr0 Segment registers must be synchronized prior to any code that may trigger a call to emulation_required()/guest_state_valid(), e.g. vmx_set_cr0(). Because preparing vmcs02 writes segmentation fields directly, i.e. doesn't use vmx_set_segment(), emulation_required will not be re-evaluated when synchronizing the segment registers, which can result in L0 incorrectly starting emulation of L2. Fixes: 8665c3f97320 ("KVM: nVMX: initialize descriptor cache fields in prepare_vmcs02_full") Signed-off-by: Sean Christopherson [Move all of prepare_vmcs02_full earlier, not just segment registers. - Paolo] Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 051dab74e4e9..f0fd50ba284c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10704,6 +10704,11 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control, vmcs12_exec_ctrl; + if (vmx->nested.dirty_vmcs12) { + prepare_vmcs02_full(vcpu, vmcs12, from_vmentry); + vmx->nested.dirty_vmcs12 = false; + } + /* * First, the fields that are shadowed. This must be kept in sync * with vmx_shadow_fields.h. @@ -10941,11 +10946,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, /* Note: modifies VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */ vmx_set_efer(vcpu, vcpu->arch.efer); - if (vmx->nested.dirty_vmcs12) { - prepare_vmcs02_full(vcpu, vmcs12, from_vmentry); - vmx->nested.dirty_vmcs12 = false; - } - /* Shadow page tables on either EPT or shadow page tables. */ if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) -- cgit v1.2.1 From ea89c065482179b2bf9f9b6788b06a6e0c68a73b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 23 Mar 2018 00:05:29 +0100 Subject: x86/tsc: Get rid of rdtscll() Commit 99770737ca7e ("x86/asm/tsc: Add rdtscll() merge helper") added rdtscll() in August 2015 along with the comment: /* Deprecated, keep it for a cycle for easier merging: */ 12 cycles later it's really overdue for removal. Signed-off-by: Thomas Gleixner --- arch/x86/events/msr.c | 3 ++- arch/x86/include/asm/msr.h | 3 --- arch/x86/kernel/cpu/mcheck/mce-inject.c | 2 +- 3 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c index 18e2628e2d8f..e7edf19e64c2 100644 --- a/arch/x86/events/msr.c +++ b/arch/x86/events/msr.c @@ -188,10 +188,11 @@ static inline u64 msr_read_counter(struct perf_event *event) if (event->hw.event_base) rdmsrl(event->hw.event_base, now); else - rdtscll(now); + now = rdtsc_ordered(); return now; } + static void msr_event_update(struct perf_event *event) { u64 prev, now; diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h index 30df295f6d94..392f05d57e78 100644 --- a/arch/x86/include/asm/msr.h +++ b/arch/x86/include/asm/msr.h @@ -218,9 +218,6 @@ static __always_inline unsigned long long rdtsc_ordered(void) return rdtsc(); } -/* Deprecated, keep it for a cycle for easier merging: */ -#define rdtscll(now) do { (now) = rdtsc_ordered(); } while (0) - static inline unsigned long long native_read_pmc(int counter) { DECLARE_ARGS(val, low, high); diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 231ad23b24a9..475cb4f5f14f 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c @@ -491,7 +491,7 @@ static void do_inject(void) unsigned int cpu = i_mce.extcpu; u8 b = i_mce.bank; - rdtscll(i_mce.tsc); + i_mce.tsc = rdtsc_ordered(); if (i_mce.misc) i_mce.status |= MCI_STATUS_MISCV; -- cgit v1.2.1 From e847f6aaf68f6156a5e9b26afe1a7316b9ab697e Mon Sep 17 00:00:00 2001 From: Sven Wegener Date: Sat, 24 Mar 2018 22:21:13 +0100 Subject: x86/purgatory: Avoid creating stray ..d files, remove -MD from KBUILD_CFLAGS The kernel build system already takes care of generating the dependency files. Having the additional -MD in KBUILD_CFLAGS leads to stray ..d files in the build directory when we call the cc-option macro. Signed-off-by: Sven Wegener Cc: H. Peter Anvin Cc: Kees Cook Cc: Linus Torvalds Cc: Matthias Kaehlcke Cc: Peter Zijlstra Cc: Sam Ravnborg Cc: Thomas Gleixner Cc: Vivek Goyal Link: http://lkml.kernel.org/r/alpine.LNX.2.21.1803242219380.30139@titan.int.lan.stealer.net Signed-off-by: Ingo Molnar --- arch/x86/purgatory/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 2f15a2ac4209..d70c15de417b 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -16,7 +16,7 @@ KCOV_INSTRUMENT := n # in turn leaves some undefined symbols like __fentry__ in purgatory and not # sure how to relocate those. Like kexec-tools, use custom flags. -KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large +KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -Os -mcmodel=large KBUILD_CFLAGS += -m$(BITS) KBUILD_CFLAGS += $(call cc-option,-fno-PIE) -- cgit v1.2.1 From e25283bf83bd97a61007ab383695f4872e2eb43f Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Sun, 25 Mar 2018 14:04:30 -0700 Subject: x86/apic: Finish removing unused callbacks The ->cpu_mask_to_apicid() and ->vector_allocation_domain() callbacks are now unused, so remove them. Signed-off-by: David Rientjes Cc: Juergen Gross Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: baab1e84b112 ("x86/apic: Remove unused callbacks") Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1803251403540.80485@chino.kir.corp.google.com Signed-off-by: Ingo Molnar --- arch/x86/include/asm/apic.h | 16 ---------------- 1 file changed, 16 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index c6a32019ff54..40a3d3642f3a 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -304,12 +304,6 @@ struct apic { u32 irq_delivery_mode; u32 irq_dest_mode; - /* Functions and data related to vector allocation */ - void (*vector_allocation_domain)(int cpu, struct cpumask *retmask, - const struct cpumask *mask); - int (*cpu_mask_to_apicid)(const struct cpumask *cpumask, - struct irq_data *irqdata, - unsigned int *apicid); u32 (*calc_dest_apicid)(unsigned int cpu); /* ICR related functions */ @@ -499,17 +493,7 @@ extern void default_setup_apic_routing(void); extern u32 apic_default_calc_apicid(unsigned int cpu); extern u32 apic_flat_calc_apicid(unsigned int cpu); -extern int flat_cpu_mask_to_apicid(const struct cpumask *cpumask, - struct irq_data *irqdata, - unsigned int *apicid); -extern int default_cpu_mask_to_apicid(const struct cpumask *cpumask, - struct irq_data *irqdata, - unsigned int *apicid); extern bool default_check_apicid_used(physid_mask_t *map, int apicid); -extern void flat_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask); -extern void default_vector_allocation_domain(int cpu, struct cpumask *retmask, - const struct cpumask *mask); extern void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap); extern int default_cpu_present_to_apicid(int mps_cpu); extern int default_check_phys_apicid_present(int phys_apicid); -- cgit v1.2.1 From 4e07db9c8db87e3b71be4c3cd626ddc9bacd5a1e Mon Sep 17 00:00:00 2001 From: Ivan Gorinov Date: Thu, 22 Mar 2018 14:35:47 -0700 Subject: x86/devicetree: Use CPU description from Device Tree Current x86 Device Tree implementation does not support multiprocessing. Use new DT bindings to describe the processors. Signed-off-by: Ivan Gorinov Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Cc: Mark Rutland Cc: Rob Herring Cc: Frank Rowand Link: https://lkml.kernel.org/r/c291fb2cef51b730b59916d7745be0eaa4378c6c.1521753738.git.ivan.gorinov@intel.com --- arch/x86/kernel/devicetree.c | 45 ++++++++++++++++++++++++++++++++------------ 1 file changed, 33 insertions(+), 12 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 5cd387fcc777..c9d2b19749b8 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c @@ -131,34 +131,52 @@ static void __init dtb_setup_hpet(void) #endif } +#ifdef CONFIG_X86_LOCAL_APIC + +static void __init dtb_cpu_setup(void) +{ + struct device_node *dn; + u32 apic_id, version; + int ret; + + version = GET_APIC_VERSION(apic_read(APIC_LVR)); + for_each_node_by_type(dn, "cpu") { + ret = of_property_read_u32(dn, "reg", &apic_id); + if (ret < 0) { + pr_warn("%pOF: missing local APIC ID\n", dn); + continue; + } + generic_processor_info(apic_id, version); + } +} + static void __init dtb_lapic_setup(void) { -#ifdef CONFIG_X86_LOCAL_APIC struct device_node *dn; struct resource r; + unsigned long lapic_addr = APIC_DEFAULT_PHYS_BASE; int ret; dn = of_find_compatible_node(NULL, NULL, "intel,ce4100-lapic"); - if (!dn) - return; - - ret = of_address_to_resource(dn, 0, &r); - if (WARN_ON(ret)) - return; + if (dn) { + ret = of_address_to_resource(dn, 0, &r); + if (WARN_ON(ret)) + return; + lapic_addr = r.start; + } /* Did the boot loader setup the local APIC ? */ if (!boot_cpu_has(X86_FEATURE_APIC)) { - if (apic_force_enable(r.start)) + if (apic_force_enable(lapic_addr)) return; } smp_found_config = 1; pic_mode = 1; - register_lapic_address(r.start); - generic_processor_info(boot_cpu_physical_apicid, - GET_APIC_VERSION(apic_read(APIC_LVR))); -#endif + register_lapic_address(lapic_addr); } +#endif /* CONFIG_X86_LOCAL_APIC */ + #ifdef CONFIG_X86_IO_APIC static unsigned int ioapic_id; @@ -259,7 +277,10 @@ static void __init dtb_ioapic_setup(void) {} static void __init dtb_apic_setup(void) { +#ifdef CONFIG_X86_LOCAL_APIC dtb_lapic_setup(); + dtb_cpu_setup(); +#endif dtb_ioapic_setup(); } -- cgit v1.2.1 From 631fe154edb0a37308d0116a0f9b7bba9dca6218 Mon Sep 17 00:00:00 2001 From: Davidlohr Bueso Date: Mon, 26 Mar 2018 14:09:27 -0700 Subject: perf/x86: Update rdpmc_always_available static key to the modern API No changes in refcount semantics -- use DEFINE_STATIC_KEY_FALSE() for initialization and replace: static_key_slow_inc|dec() => static_branch_inc|dec() static_key_false() => static_branch_unlikely() Added a '_key' suffix to rdpmc_always_available, for better self-documentation. Signed-off-by: Davidlohr Bueso Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Link: http://lkml.kernel.org/r/20180326210929.5244-5-dave@stgolabs.net Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 6 +++--- arch/x86/include/asm/mmu_context.h | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 4823695c459f..a6006e7bb729 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -48,7 +48,7 @@ DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; -struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE; +DEFINE_STATIC_KEY_FALSE(rdpmc_always_available_key); u64 __read_mostly hw_cache_event_ids [PERF_COUNT_HW_CACHE_MAX] @@ -2206,9 +2206,9 @@ static ssize_t set_attr_rdpmc(struct device *cdev, * but only root can trigger it, so it's okay. */ if (val == 2) - static_key_slow_inc(&rdpmc_always_available); + static_branch_inc(&rdpmc_always_available_key); else - static_key_slow_dec(&rdpmc_always_available); + static_branch_dec(&rdpmc_always_available_key); on_each_cpu(refresh_pce, NULL, 1); } diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h index 1de72ce514cd..57e3785d0d26 100644 --- a/arch/x86/include/asm/mmu_context.h +++ b/arch/x86/include/asm/mmu_context.h @@ -24,11 +24,12 @@ static inline void paravirt_activate_mm(struct mm_struct *prev, #endif /* !CONFIG_PARAVIRT */ #ifdef CONFIG_PERF_EVENTS -extern struct static_key rdpmc_always_available; + +DECLARE_STATIC_KEY_FALSE(rdpmc_always_available_key); static inline void load_mm_cr4(struct mm_struct *mm) { - if (static_key_false(&rdpmc_always_available) || + if (static_branch_unlikely(&rdpmc_always_available_key) || atomic_read(&mm->context.perf_rdpmc_allowed)) cr4_set_bits(X86_CR4_PCE); else -- cgit v1.2.1 From 71eb9ee9596d8df3d5723c3cfc18774c6235e8b1 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 23 Mar 2018 00:01:47 -0700 Subject: perf/x86/intel: Fix linear IP of PEBS real_ip on Haswell and later CPUs this patch fix a bug in how the pebs->real_ip is handled in the PEBS handler. real_ip only exists in Haswell and later processor. It is actually the eventing IP, i.e., where the event occurred. As opposed to the pebs->ip which is the PEBS interrupt IP which is always off by one. The problem is that the real_ip just like the IP needs to be fixed up because PEBS does not record all the machine state registers, and in particular the code segement (cs). This is why we have the set_linear_ip() function. The problem was that set_linear_ip() was only used on the pebs->ip and not the pebs->real_ip. We have profiles which ran into invalid callstacks because of this. Here is an example: ..... 0: ffffffffffffff80 recent entry, marker kernel v ..... 1: 000000000040044d <= user address in kernel space! ..... 2: fffffffffffffe00 marker enter user v ..... 3: 000000000040044d ..... 4: 00000000004004b6 oldest entry Debugging output in get_perf_callchain(): [ 857.769909] CALLCHAIN: CPU8 ip=40044d regs->cs=10 user_mode(regs)=0 The problem is that the kernel entry in 1: points to a user level address. How can that be? The reason is that with PEBS sampling the instruction that caused the event to occur and the instruction where the CPU was when the interrupt was posted may be far apart. And sometime during that time window, the privilege level may change. This happens, for instance, when the PEBS sample is taken close to a kernel entry point. Here PEBS, eventing IP (real_ip) captured a user level instruction. But by the time the PMU interrupt fired, the processor had already entered kernel space. This is why the debug output shows a user address with user_mode() false. The problem comes from PEBS not recording the code segment (cs) register. The register is used in x86_64 to determine if executing in kernel vs user space. This is okay because the kernel has a software workaround called set_linear_ip(). But the issue in setup_pebs_sample_data() is that set_linear_ip() is never called on the real_ip value when it is available (Haswell and later) and precise_ip > 1. This patch fixes this problem and eliminates the callchain discrepancy. The patch restructures the code around set_linear_ip() to minimize the number of times the IP has to be set. Signed-off-by: Stephane Eranian Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vince Weaver Cc: kan.liang@intel.com Link: http://lkml.kernel.org/r/1521788507-10231-1-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/ds.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c index d8015235ba76..5e526c54247e 100644 --- a/arch/x86/events/intel/ds.c +++ b/arch/x86/events/intel/ds.c @@ -1153,6 +1153,7 @@ static void setup_pebs_sample_data(struct perf_event *event, if (pebs == NULL) return; + regs->flags &= ~PERF_EFLAGS_EXACT; sample_type = event->attr.sample_type; dsrc = sample_type & PERF_SAMPLE_DATA_SRC; @@ -1197,7 +1198,6 @@ static void setup_pebs_sample_data(struct perf_event *event, */ *regs = *iregs; regs->flags = pebs->flags; - set_linear_ip(regs, pebs->ip); if (sample_type & PERF_SAMPLE_REGS_INTR) { regs->ax = pebs->ax; @@ -1233,13 +1233,22 @@ static void setup_pebs_sample_data(struct perf_event *event, #endif } - if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) { - regs->ip = pebs->real_ip; - regs->flags |= PERF_EFLAGS_EXACT; - } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs)) - regs->flags |= PERF_EFLAGS_EXACT; - else - regs->flags &= ~PERF_EFLAGS_EXACT; + if (event->attr.precise_ip > 1) { + /* Haswell and later have the eventing IP, so use it: */ + if (x86_pmu.intel_cap.pebs_format >= 2) { + set_linear_ip(regs, pebs->real_ip); + regs->flags |= PERF_EFLAGS_EXACT; + } else { + /* Otherwise use PEBS off-by-1 IP: */ + set_linear_ip(regs, pebs->ip); + + /* ... and try to fix it up using the LBR entries: */ + if (intel_pmu_pebs_fixup_ip(regs)) + regs->flags |= PERF_EFLAGS_EXACT; + } + } else + set_linear_ip(regs, pebs->ip); + if ((sample_type & (PERF_SAMPLE_ADDR | PERF_SAMPLE_PHYS_ADDR)) && x86_pmu.intel_cap.pebs_format >= 1) -- cgit v1.2.1 From fc5d1073cae299de4517755a910df4f12a6a438f Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Mon, 26 Mar 2018 23:27:21 -0700 Subject: x86/mm/32: Remove unused node_memmap_size_bytes() & CONFIG_NEED_NODE_MEMMAP_SIZE logic node_memmap_size_bytes() has been unused since the v3.9 kernel, so remove it. Signed-off-by: David Rientjes Cc: Dave Hansen Cc: Laura Abbott Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mm@kvack.org Fixes: f03574f2d5b2 ("x86-32, mm: Rip out x86_32 NUMA remapping code") Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1803262325540.256524@chino.kir.corp.google.com Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 4 ---- arch/x86/mm/numa_32.c | 11 ----------- 2 files changed, 15 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 18233e459bff..739aff253d17 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1608,10 +1608,6 @@ config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM -config NEED_NODE_MEMMAP_SIZE - def_bool y - depends on X86_32 && (DISCONTIGMEM || SPARSEMEM) - config ARCH_FLATMEM_ENABLE def_bool y depends on X86_32 && !NUMA diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index aca6295350f3..e8a4a09e20f1 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -60,17 +60,6 @@ void memory_present(int nid, unsigned long start, unsigned long end) } printk(KERN_CONT "\n"); } - -unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, - unsigned long end_pfn) -{ - unsigned long nr_pages = end_pfn - start_pfn; - - if (!nr_pages) - return 0; - - return (nr_pages + 1) * sizeof(struct page); -} #endif extern unsigned long highend_pfn, highstart_pfn; -- cgit v1.2.1 From 47a9973d3ed8994589c845c8b1a293a475a549a9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 20 Mar 2018 20:08:00 +0200 Subject: x86/PCI: Fix a potential regression when using dmi_get_bios_year() dmi_get_bios_year() may return 0 when it cannot parse the BIOS date string. Previously this has been checked in pci_acpi_crs_quirks(). Update the code to restore old behaviour. Reported-by: Jean Delvare Signed-off-by: Andy Shevchenko Reviewed-by: Jean Delvare Reviewed-by: Rafael J. Wysocki Acked-by: Thomas Gleixner Cc: Bjorn Helgaas Cc: Linus Torvalds Cc: Lukas Wunner Cc: Peter Zijlstra Cc: Rafael J . Wysocki Cc: linux-acpi@vger.kernel.org Cc: linux-pci@vger.kernel.org Fixes: 69c42d493db4 ("x86/pci: Simplify code by using the new dmi_get_bios_year() helper") Signed-off-by: Ingo Molnar --- arch/x86/pci/acpi.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 00e60de30328..5559dcaddd5e 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c @@ -140,7 +140,9 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = { void __init pci_acpi_crs_quirks(void) { - if ((dmi_get_bios_year() < 2008) && (iomem_resource.end <= 0xffffffff)) + int year = dmi_get_bios_year(); + + if (year >= 0 && year < 2008 && iomem_resource.end <= 0xffffffff) pci_use_crs = false; dmi_check_system(pci_crs_quirks); -- cgit v1.2.1 From 547edaca247abf910e32f0cd883ba83b8fc6d0ed Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 15 Mar 2018 16:49:06 +0300 Subject: x86/mm: Update comment in detect_tme() regarding x86_phys_bits As Kai pointed out, the primary reason for adjusting x86_phys_bits is to reflect that the the address space is reduced and not the ability to communicate the available physical address space to virtual machines. Suggested-by: Kai Huang Signed-off-by: Kirill A. Shutemov Signed-off-by: Thomas Gleixner Cc: Tom Lendacky Cc: Dave Hansen Cc: linux-mm@kvack.org Link: https://lkml.kernel.org/r/20180315134907.9311-2-kirill.shutemov@linux.intel.com --- arch/x86/kernel/cpu/intel.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 3f8d7a3b6447..6106d11ceb6b 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -587,11 +587,8 @@ detect_keyid_bits: } /* - * Exclude KeyID bits from physical address bits. - * - * We have to do this even if we are not going to use KeyID bits - * ourself. VM guests still have to know that these bits are not usable - * for physical address. + * KeyID bits effectively lower the number of physical address + * bits. Update cpuinfo_x86::x86_phys_bits accordingly. */ c->x86_phys_bits -= keyid_bits; } -- cgit v1.2.1 From 07cde313b2d21f728cec2836db7cdb55476f7a26 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 23 Mar 2018 14:58:17 -0700 Subject: x86/msr: Allow rdmsr_safe_on_cpu() to schedule High latencies can be observed caused by a daemon periodically reading various MSR on all cpus. On KASAN enabled kernels ~10ms latencies can be observed simply reading one MSR. Even without KASAN, sending an IPI to a CPU, which is in a deep sleep state or in a long hard IRQ disabled section, waiting for the answer can consume hundreds of microseconds. All usage sites are in preemptible context, convert rdmsr_safe_on_cpu() to use a completion instead of busy polling. Overall daemon cpu usage was reduced by 35 %, and latencies caused by msr_read() disappeared. Signed-off-by: Eric Dumazet Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Hugh Dickins Cc: Borislav Petkov Cc: Eric Dumazet Link: https://lkml.kernel.org/r/20180323215818.127774-1-edumazet@google.com --- arch/x86/lib/msr-smp.c | 32 ++++++++++++++++++++++++-------- 1 file changed, 24 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index 693cce0be82d..761ba062afda 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -2,6 +2,7 @@ #include #include #include +#include #include static void __rdmsr_on_cpu(void *info) @@ -143,13 +144,19 @@ void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) } EXPORT_SYMBOL(wrmsr_on_cpus); +struct msr_info_completion { + struct msr_info msr; + struct completion done; +}; + /* These "safe" variants are slower and should be used when the target MSR may not actually exist. */ static void __rdmsr_safe_on_cpu(void *info) { - struct msr_info *rv = info; + struct msr_info_completion *rv = info; - rv->err = rdmsr_safe(rv->msr_no, &rv->reg.l, &rv->reg.h); + rv->msr.err = rdmsr_safe(rv->msr.msr_no, &rv->msr.reg.l, &rv->msr.reg.h); + complete(&rv->done); } static void __wrmsr_safe_on_cpu(void *info) @@ -161,17 +168,26 @@ static void __wrmsr_safe_on_cpu(void *info) int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h) { + struct msr_info_completion rv; + call_single_data_t csd = { + .func = __rdmsr_safe_on_cpu, + .info = &rv, + }; int err; - struct msr_info rv; memset(&rv, 0, sizeof(rv)); + init_completion(&rv.done); + rv.msr.msr_no = msr_no; - rv.msr_no = msr_no; - err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); - *l = rv.reg.l; - *h = rv.reg.h; + err = smp_call_function_single_async(cpu, &csd); + if (!err) { + wait_for_completion(&rv.done); + err = rv.msr.err; + } + *l = rv.msr.reg.l; + *h = rv.msr.reg.h; - return err ? err : rv.err; + return err; } EXPORT_SYMBOL(rdmsr_safe_on_cpu); -- cgit v1.2.1 From 67bbd7a8d6bcdc44cc27105ae8c374e9176ceaf1 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 23 Mar 2018 14:58:18 -0700 Subject: x86/cpuid: Allow cpuid_read() to schedule High latencies can be observed caused by a daemon periodically reading CPUID on all cpus. On KASAN enabled kernels ~10ms latencies can be observed. Even without KASAN, sending an IPI to a CPU, which is in a deep sleep state or in a long hard IRQ disabled section, waiting for the answer can consume hundreds of microseconds. cpuid_read() is invoked in preemptible context, so it can be converted to sleep instead of busy wait. Switching to smp_call_function_single_async() and a completion allows to reschedule and reduces CPU usage and latencies. Signed-off-by: Eric Dumazet Signed-off-by: Thomas Gleixner Acked-by: Ingo Molnar Cc: Hugh Dickins Cc: Borislav Petkov Cc: Eric Dumazet Link: https://lkml.kernel.org/r/20180323215818.127774-2-edumazet@google.com --- arch/x86/kernel/cpuid.c | 34 ++++++++++++++++++++++++++-------- 1 file changed, 26 insertions(+), 8 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 0931a105ffe1..1d300f96df4b 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c @@ -40,6 +40,7 @@ #include #include #include +#include #include #include @@ -47,19 +48,27 @@ static struct class *cpuid_class; static enum cpuhp_state cpuhp_cpuid_state; +struct cpuid_regs_done { + struct cpuid_regs regs; + struct completion done; +}; + static void cpuid_smp_cpuid(void *cmd_block) { - struct cpuid_regs *cmd = (struct cpuid_regs *)cmd_block; + struct cpuid_regs_done *cmd = cmd_block; + + cpuid_count(cmd->regs.eax, cmd->regs.ecx, + &cmd->regs.eax, &cmd->regs.ebx, + &cmd->regs.ecx, &cmd->regs.edx); - cpuid_count(cmd->eax, cmd->ecx, - &cmd->eax, &cmd->ebx, &cmd->ecx, &cmd->edx); + complete(&cmd->done); } static ssize_t cpuid_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char __user *tmp = buf; - struct cpuid_regs cmd; + struct cpuid_regs_done cmd; int cpu = iminor(file_inode(file)); u64 pos = *ppos; ssize_t bytes = 0; @@ -68,19 +77,28 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, if (count % 16) return -EINVAL; /* Invalid chunk size */ + init_completion(&cmd.done); for (; count; count -= 16) { - cmd.eax = pos; - cmd.ecx = pos >> 32; - err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); + call_single_data_t csd = { + .func = cpuid_smp_cpuid, + .info = &cmd, + }; + + cmd.regs.eax = pos; + cmd.regs.ecx = pos >> 32; + + err = smp_call_function_single_async(cpu, &csd); if (err) break; - if (copy_to_user(tmp, &cmd, 16)) { + wait_for_completion(&cmd.done); + if (copy_to_user(tmp, &cmd.regs, 16)) { err = -EFAULT; break; } tmp += 16; bytes += 16; *ppos = ++pos; + reinit_completion(&cmd.done); } return bytes ? bytes : err; -- cgit v1.2.1 From 9b9a51354cae933f5640b5bb73bbcd32f989122f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Tue, 27 Mar 2018 20:22:33 -0700 Subject: x86/msr: Make rdmsrl_safe_on_cpu() scheduling safe as well When changing rdmsr_safe_on_cpu() to schedule, it was missed that __rdmsr_safe_on_cpu() was also used by rdmsrl_safe_on_cpu() Make rdmsrl_safe_on_cpu() a wrapper instead of copy/pasting the code which was added for the completion handling. Fixes: 07cde313b2d2 ("x86/msr: Allow rdmsr_safe_on_cpu() to schedule") Reported-by: kbuild test robot Signed-off-by: Eric Dumazet Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Eric Dumazet Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/20180328032233.153055-1-edumazet@google.com --- arch/x86/lib/msr-smp.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index 761ba062afda..fee8b9c0520c 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -225,16 +225,13 @@ EXPORT_SYMBOL(wrmsrl_safe_on_cpu); int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) { + u32 low, high; int err; - struct msr_info rv; - memset(&rv, 0, sizeof(rv)); + err = rdmsr_safe_on_cpu(cpu, msr_no, &low, &high); + *q = (u64)high << 32 | low; - rv.msr_no = msr_no; - err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); - *q = rv.reg.q; - - return err ? err : rv.err; + return err; } EXPORT_SYMBOL(rdmsrl_safe_on_cpu); -- cgit v1.2.1 From 151ad17fbe5e56afa59709f41980508672c777ce Mon Sep 17 00:00:00 2001 From: Andrew Banman Date: Tue, 27 Mar 2018 17:09:06 -0500 Subject: x86/platform/uv/BAU: Add APIC idt entry BAU uses the old alloc_initr_gate90 method to setup its interrupt. This fails silently as the BAU vector is in the range of APIC vectors that are registered to the spurious interrupt handler. As a consequence BAU broadcasts are not handled, and the broadcast source CPU hangs. Update BAU to use new idt structure. Fixes: dc20b2d52653 ("x86/idt: Move interrupt gate initialization to IDT code") Signed-off-by: Andrew Banman Signed-off-by: Thomas Gleixner Acked-by: Mike Travis Cc: Dimitri Sivanich Cc: Russ Anderson Cc: stable@vger.kernel.org Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/1522188546-196177-1-git-send-email-abanman@hpe.com --- arch/x86/include/asm/hw_irq.h | 1 + arch/x86/kernel/idt.c | 3 +++ arch/x86/platform/uv/tlb_uv.c | 2 -- 3 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index 2851077b6051..32e666e1231e 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h @@ -36,6 +36,7 @@ extern asmlinkage void kvm_posted_intr_wakeup_ipi(void); extern asmlinkage void kvm_posted_intr_nested_ipi(void); extern asmlinkage void error_interrupt(void); extern asmlinkage void irq_work_interrupt(void); +extern asmlinkage void uv_bau_message_intr1(void); extern asmlinkage void spurious_interrupt(void); extern asmlinkage void thermal_interrupt(void); diff --git a/arch/x86/kernel/idt.c b/arch/x86/kernel/idt.c index 56d99be3706a..5d039c848bd9 100644 --- a/arch/x86/kernel/idt.c +++ b/arch/x86/kernel/idt.c @@ -140,6 +140,9 @@ static const __initconst struct idt_data apic_idts[] = { # ifdef CONFIG_IRQ_WORK INTG(IRQ_WORK_VECTOR, irq_work_interrupt), # endif +#ifdef CONFIG_X86_UV + INTG(UV_BAU_MESSAGE, uv_bau_message_intr1), +#endif INTG(SPURIOUS_APIC_VECTOR, spurious_interrupt), INTG(ERROR_APIC_VECTOR, error_interrupt), #endif diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index db77e087adaf..b36caae0fb2f 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -2255,8 +2255,6 @@ static int __init uv_bau_init(void) init_uvhub(uvhub, vector, uv_base_pnode); } - alloc_intr_gate(vector, uv_bau_message_intr1); - for_each_possible_blade(uvhub) { if (uv_blade_nr_possible_cpus(uvhub)) { unsigned long val; -- cgit v1.2.1 From 07344b15a994acadbe439aa4e75127ed1ccca099 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Tue, 27 Mar 2018 17:07:11 -0500 Subject: x86/boot: Fix SEV boot failure from change to __PHYSICAL_MASK_SHIFT In arch/x86/boot/compressed/kaslr_64.c, CONFIG_AMD_MEM_ENCRYPT support was initially #undef'd to support SME with minimal effort. When support for SEV was added, the #undef remained and some minimal support for setting the encryption bit was added for building identity mapped pagetable entries. Commit b83ce5ee9147 ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") changed __PHYSICAL_MASK_SHIFT from 46 to 52 in support of 5-level paging. This change resulted in SEV guests failing to boot because the encryption bit was no longer being automatically masked out. The compressed boot path now requires sme_me_mask to be defined in order for the pagetable functions, such as pud_present(), to properly mask out the encryption bit (currently bit 47) when evaluating pagetable entries. Add an sme_me_mask variable in arch/x86/boot/compressed/mem_encrypt.S, which is set when SEV is active, delete the #undef CONFIG_AMD_MEM_ENCRYPT from arch/x86/boot/compressed/kaslr_64.c and use sme_me_mask when building the identify mapped pagetable entries. Fixes: b83ce5ee9147 ("x86/mm/64: Make __PHYSICAL_MASK_SHIFT always 52") Signed-off-by: Tom Lendacky Signed-off-by: Thomas Gleixner Cc: Borislav Petkov Cc: Brijesh Singh Cc: "Kirill A. Shutemov" Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/20180327220711.8702.55842.stgit@tlendack-t1.amdoffice.net --- arch/x86/boot/compressed/kaslr_64.c | 14 ++++---------- arch/x86/boot/compressed/mem_encrypt.S | 17 ++++++++++------- arch/x86/boot/compressed/misc.h | 2 +- 3 files changed, 15 insertions(+), 18 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c index b5e5e02f8cde..522d11431433 100644 --- a/arch/x86/boot/compressed/kaslr_64.c +++ b/arch/x86/boot/compressed/kaslr_64.c @@ -16,13 +16,6 @@ #define __pa(x) ((unsigned long)(x)) #define __va(x) ((void *)((unsigned long)(x))) -/* - * The pgtable.h and mm/ident_map.c includes make use of the SME related - * information which is not used in the compressed image support. Un-define - * the SME support to avoid any compile and link errors. - */ -#undef CONFIG_AMD_MEM_ENCRYPT - /* No PAGE_TABLE_ISOLATION support needed either: */ #undef CONFIG_PAGE_TABLE_ISOLATION @@ -85,13 +78,14 @@ static struct x86_mapping_info mapping_info; /* Locates and clears a region for a new top level page table. */ void initialize_identity_maps(void) { - unsigned long sev_me_mask = get_sev_encryption_mask(); + /* If running as an SEV guest, the encryption mask is required. */ + set_sev_encryption_mask(); /* Init mapping_info with run-time function/buffer pointers. */ mapping_info.alloc_pgt_page = alloc_pgt_page; mapping_info.context = &pgt_data; - mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask; - mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask; + mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sme_me_mask; + mapping_info.kernpg_flag = _KERNPG_TABLE; /* * It should be impossible for this not to already be true, diff --git a/arch/x86/boot/compressed/mem_encrypt.S b/arch/x86/boot/compressed/mem_encrypt.S index 54f5f6625a73..eaa843a52907 100644 --- a/arch/x86/boot/compressed/mem_encrypt.S +++ b/arch/x86/boot/compressed/mem_encrypt.S @@ -88,9 +88,7 @@ ENTRY(get_sev_encryption_bit) ENDPROC(get_sev_encryption_bit) .code64 -ENTRY(get_sev_encryption_mask) - xor %rax, %rax - +ENTRY(set_sev_encryption_mask) #ifdef CONFIG_AMD_MEM_ENCRYPT push %rbp push %rdx @@ -101,9 +99,7 @@ ENTRY(get_sev_encryption_mask) testl %eax, %eax jz .Lno_sev_mask - xor %rdx, %rdx - bts %rax, %rdx /* Create the encryption mask */ - mov %rdx, %rax /* ... and return it */ + bts %rax, sme_me_mask(%rip) /* Create the encryption mask */ .Lno_sev_mask: movq %rbp, %rsp /* Restore original stack pointer */ @@ -112,9 +108,16 @@ ENTRY(get_sev_encryption_mask) pop %rbp #endif + xor %rax, %rax ret -ENDPROC(get_sev_encryption_mask) +ENDPROC(set_sev_encryption_mask) .data enc_bit: .int 0xffffffff + +#ifdef CONFIG_AMD_MEM_ENCRYPT + .balign 8 +GLOBAL(sme_me_mask) + .quad 0 +#endif diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index 4d369c308ed7..9e11be4cae19 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -114,6 +114,6 @@ static inline void console_init(void) { } #endif -unsigned long get_sev_encryption_mask(void); +void set_sev_encryption_mask(void); #endif -- cgit v1.2.1 From 17a1079d9c63579d6e392a526405b8888e9ede37 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Sat, 24 Mar 2018 21:18:35 -0700 Subject: KVM: x86: Fix pv tlb flush dependencies MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PV TLB FLUSH can only be turned on when steal time is enabled. The condition got reversed during conflict resolution. Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Fixes: 4f2f61fc5071 ("KVM: X86: Avoid traversing all the cpus for pv tlb flush when steal time is disabled") [Rebased on top of kvm/master and reworded the commit message. - Radim] Signed-off-by: Radim Krčmář --- arch/x86/kernel/kvm.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index bc1a27280c4b..fae86e36e399 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -546,7 +546,7 @@ static void __init kvm_guest_init(void) } if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) pv_mmu_ops.flush_tlb_others = kvm_flush_tlb_others; if (kvm_para_has_feature(KVM_FEATURE_PV_EOI)) @@ -635,7 +635,7 @@ static __init int kvm_setup_pv_tlb_flush(void) int cpu; if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) && - !kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { + kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) { for_each_possible_cpu(cpu) { zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu), GFP_KERNEL, cpu_to_node(cpu)); -- cgit v1.2.1 From bd47a85acd727e27b7283daff557865ad04c59f6 Mon Sep 17 00:00:00 2001 From: "mike.travis@hpe.com" Date: Wed, 28 Mar 2018 12:40:11 -0500 Subject: x86/platform/UV: Fix critical UV MMR address error A critical error was found testing the fixed UV4 HUB in that an MMR address was found to be incorrect. This causes the virtual address space for accessing the MMIOH1 region to be allocated with the incorrect size. Fixes: 673aa20c55a1 ("x86/platform/UV: Update uv_mmrs.h to prepare for UV4A fixes") Signed-off-by: Mike Travis Signed-off-by: Thomas Gleixner Cc: Dimitri Sivanich Cc: Russ Anderson Cc: Andrew Banman Link: https://lkml.kernel.org/r/20180328174011.041801248@stormcage.americas.sgi.com --- arch/x86/include/asm/uv/uv_mmrs.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/include/asm/uv/uv_mmrs.h b/arch/x86/include/asm/uv/uv_mmrs.h index ecb9ddef128f..62c79e26a59a 100644 --- a/arch/x86/include/asm/uv/uv_mmrs.h +++ b/arch/x86/include/asm/uv/uv_mmrs.h @@ -3833,7 +3833,7 @@ union uvh_rh_gam_mmioh_overlay_config0_mmr_u { #define UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR") #define UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR uv_undefined("UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR") #define UV3H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x1603000UL -#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x483000UL +#define UV4H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR 0x484000UL #define UVH_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR ( \ is_uv1_hub() ? UV1H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \ is_uv2_hub() ? UV2H_RH_GAM_MMIOH_OVERLAY_CONFIG1_MMR : \ -- cgit v1.2.1 From e2efacb6a54ab54626da3507be1008d0040492cc Mon Sep 17 00:00:00 2001 From: Yazen Ghannam Date: Mon, 26 Mar 2018 14:15:25 -0500 Subject: Revert "x86/mce/AMD: Collect error info even if valid bits are not set" This reverts commit 4b1e84276a6172980c5bf39aa091ba13e90d6dad. Software uses the valid bits to decide if the values can be used for further processing or other actions. So setting the valid bits will have software act on values that it shouldn't be acting on. The recommendation to save all the register values does not mean that the values are always valid. Signed-off-by: Yazen Ghannam Signed-off-by: Thomas Gleixner Cc: tony.luck@intel.com Cc: Yazen Ghannam Cc: bp@suse.de Cc: linux-edac@vger.kernel.org Link: https://lkml.kernel.org/r/20180326191526.64314-1-Yazen.Ghannam@amd.com --- arch/x86/kernel/cpu/mcheck/mce.c | 14 -------------- 1 file changed, 14 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 21962c48dad7..3c1eec17312b 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -446,20 +446,6 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs) if (mca_cfg.rip_msr) m->ip = mce_rdmsrl(mca_cfg.rip_msr); } - - /* - * Error handlers should save the values in MCA_ADDR, MCA_MISC0, and - * MCA_SYND even if MCA_STATUS[AddrV], MCA_STATUS[MiscV], and - * MCA_STATUS[SyndV] are zero. - */ - if (m->cpuvendor == X86_VENDOR_AMD) { - u64 status = MCI_STATUS_ADDRV | MCI_STATUS_MISCV; - - if (mce_flags.smca) - status |= MCI_STATUS_SYNDV; - - m->status |= status; - } } int mce_available(struct cpuinfo_x86 *c) -- cgit v1.2.1 From 6ed70cf342de03c7b11cd4eb032705faeb29d284 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Thu, 29 Mar 2018 15:06:48 +0300 Subject: perf/x86/pt, coresight: Clean up address filter structure This is a cosmetic patch that deals with the address filter structure's ambiguous fields 'filter' and 'range'. The former stands to mean that the filter's *action* should be to filter the traces to its address range if it's set or stop tracing if it's unset. This is confusing and hard on the eyes, so this patch replaces it with 'action' enum. The 'range' field is completely redundant (meaning that the filter is an address range as opposed to a single address trigger), as we can use zero size to mean the same thing. Signed-off-by: Alexander Shishkin Acked-by: Mathieu Poirier Acked-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: Will Deacon Link: http://lkml.kernel.org/r/20180329120648.11902-1-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 81fd41d5a0d9..3b993942a0e4 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1186,8 +1186,12 @@ static int pt_event_addr_filters_validate(struct list_head *filters) int range = 0; list_for_each_entry(filter, filters, entry) { - /* PT doesn't support single address triggers */ - if (!filter->range || !filter->size) + /* + * PT doesn't support single address triggers and + * 'start' filters. + */ + if (!filter->size || + filter->action == PERF_ADDR_FILTER_ACTION_START) return -EOPNOTSUPP; if (!filter->inode) { @@ -1227,7 +1231,10 @@ static void pt_event_addr_filters_sync(struct perf_event *event) filters->filter[range].msr_a = msr_a; filters->filter[range].msr_b = msr_b; - filters->filter[range].config = filter->filter ? 1 : 2; + if (filter->action == PERF_ADDR_FILTER_ACTION_FILTER) + filters->filter[range].config = 1; + else + filters->filter[range].config = 2; range++; } -- cgit v1.2.1 From d6289f36aa7d5893d091a7a0c67eee7798719f03 Mon Sep 17 00:00:00 2001 From: Cao jin Date: Fri, 16 Mar 2018 16:49:44 +0800 Subject: x86/build: Don't pass in -D__KERNEL__ multiple times Some ..cmd files under arch/x86 are showing two instances of -D__KERNEL__, like arch/x86/boot/ and arch/x86/realmode/rm/. __KERNEL__ is already defined in KBUILD_CPPFLAGS in the top Makefile, so it can be dropped safely. Signed-off-by: Cao jin Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Masahiro Yamada Cc: Michal Marek Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/20180316084944.3997-1-caoj.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/Makefile | 3 +-- arch/x86/boot/compressed/Makefile | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/Makefile b/arch/x86/Makefile index d798e36d103c..a517852dad55 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -31,8 +31,7 @@ endif CODE16GCC_CFLAGS := -m32 -Wa,$(srctree)/arch/x86/boot/code16gcc.h M16_CFLAGS := $(call cc-option, -m16, $(CODE16GCC_CFLAGS)) -REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -D__KERNEL__ \ - -DDISABLE_BRANCH_PROFILING \ +REALMODE_CFLAGS := $(M16_CFLAGS) -g -Os -DDISABLE_BRANCH_PROFILING \ -Wall -Wstrict-prototypes -march=i386 -mregparm=3 \ -fno-strict-aliasing -fomit-frame-pointer -fno-pic \ -mno-mmx -mno-sse diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile index f25e1530e064..f484ae0ece93 100644 --- a/arch/x86/boot/compressed/Makefile +++ b/arch/x86/boot/compressed/Makefile @@ -26,7 +26,7 @@ KCOV_INSTRUMENT := n targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma \ vmlinux.bin.xz vmlinux.bin.lzo vmlinux.bin.lz4 -KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ -O2 +KBUILD_CFLAGS := -m$(BITS) -O2 KBUILD_CFLAGS += -fno-strict-aliasing $(call cc-option, -fPIE, -fPIC) KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING cflags-$(CONFIG_X86_32) := -march=i386 -- cgit v1.2.1 From eaeb8e76cd5751e805f6e4a3fcec91d283e3b0c2 Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 13 Mar 2018 15:47:09 +0000 Subject: x86/cpu/tme: Fix spelling: "configuation" -> "configuration" Trivial fix to spelling mistake in the pr_err_once() error message text. Signed-off-by: Colin Ian King Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-janitors@vger.kernel.org Link: http://lkml.kernel.org/r/20180313154709.1015-1-colin.king@canonical.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/intel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86') diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 6106d11ceb6b..b9693b80fc21 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -540,7 +540,7 @@ static void detect_tme(struct cpuinfo_x86 *c) if (mktme_status != MKTME_UNINITIALIZED) { if (tme_activate != tme_activate_cpu0) { /* Broken BIOS? */ - pr_err_once("x86/tme: configuation is inconsistent between CPUs\n"); + pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); pr_err_once("x86/tme: MKTME is not usable\n"); mktme_status = MKTME_DISABLED; -- cgit v1.2.1 From 490d03e83da2a5e9d7db84b1ec30a9c95415787e Mon Sep 17 00:00:00 2001 From: Harry Pan Date: Fri, 9 Mar 2018 20:15:47 +0800 Subject: perf/x86/intel: Add Cannon Lake support for RAPL profiling This patch enables RAPL counters (energy consumption counters) support for Cannon Lake processors. ( ESU and power domains refer to Intel Software Developers' Manual, Vol. 4, Order No. 335592. ) Usage example: $ perf list $ perf stat -a -e power/energy-cores/,power/energy-pkg/ sleep 10 Tested-by: Puthikorn Voravootivat Signed-off-by: Harry Pan Reviewed-by: Benson Leung Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: colin.king@canonical.com Cc: gs0622@gmail.com Cc: kan.liang@linux.intel.com Link: http://lkml.kernel.org/r/20180309121549.630-2-harry.pan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/rapl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index a2efb490f743..32f3e9423e99 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -774,6 +774,8 @@ static const struct x86_cpu_id rapl_cpu_match[] __initconst = { X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_MOBILE, skl_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_KABYLAKE_DESKTOP, skl_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_CANNONLAKE_MOBILE, skl_rapl_init), + X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_GOLDMONT, hsw_rapl_init), X86_RAPL_MODEL_MATCH(INTEL_FAM6_ATOM_DENVERTON, hsw_rapl_init), -- cgit v1.2.1 From 1159e09476536250c2a0173d4298d15114df7a89 Mon Sep 17 00:00:00 2001 From: Harry Pan Date: Fri, 9 Mar 2018 20:15:48 +0800 Subject: perf/x86/intel: Enable C-state residency events for Cannon Lake Cannon Lake supports C1/C3/C6/C7, PC2/PC3/PC6/PC7/PC8/PC9/PC10 state residency counters, this patch enables those counters. ( The MSR information is based on Intel Software Developers' Manual, Vol. 4, Order No. 335592. ) Tested-by: Puthikorn Voravootivat Signed-off-by: Harry Pan Reviewed-by: Benson Leung Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Kan.liang@intel.com Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: gs0622@gmail.com Link: http://lkml.kernel.org/r/20180309121549.630-3-harry.pan@intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/cstate.c | 44 +++++++++++++++++++++++++++++------------- 1 file changed, 31 insertions(+), 13 deletions(-) (limited to 'arch/x86') diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 72db0664a53d..9aca448bb8e6 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -40,50 +40,51 @@ * Model specific counters: * MSR_CORE_C1_RES: CORE C1 Residency Counter * perf code: 0x00 - * Available model: SLM,AMT,GLM + * Available model: SLM,AMT,GLM,CNL * Scope: Core (each processor core has a MSR) * MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter * perf code: 0x01 - * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM, + CNL * Scope: Core * MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter * perf code: 0x02 - * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW - * SKL,KNL,GLM + * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW, + * SKL,KNL,GLM,CNL * Scope: Core * MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter * perf code: 0x03 - * Available model: SNB,IVB,HSW,BDW,SKL + * Available model: SNB,IVB,HSW,BDW,SKL,CNL * Scope: Core * MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter. * perf code: 0x00 - * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM + * Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL * Scope: Package (physical package) * MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter. * perf code: 0x01 - * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL - * GLM + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL, + * GLM,CNL * Scope: Package (physical package) * MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter. * perf code: 0x02 * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW - * SKL,KNL,GLM + * SKL,KNL,GLM,CNL * Scope: Package (physical package) * MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter. * perf code: 0x03 - * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL + * Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,CNL * Scope: Package (physical package) * MSR_PKG_C8_RESIDENCY: Package C8 Residency Counter. * perf code: 0x04 - * Available model: HSW ULT only + * Available model: HSW ULT,CNL * Scope: Package (physical package) * MSR_PKG_C9_RESIDENCY: Package C9 Residency Counter. * perf code: 0x05 - * Available model: HSW ULT only + * Available model: HSW ULT,CNL * Scope: Package (physical package) * MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter. * perf code: 0x06 - * Available model: HSW ULT, GLM + * Available model: HSW ULT,GLM,CNL * Scope: Package (physical package) * */ @@ -486,6 +487,21 @@ static const struct cstate_model hswult_cstates __initconst = { BIT(PERF_CSTATE_PKG_C10_RES), }; +static const struct cstate_model cnl_cstates __initconst = { + .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | + BIT(PERF_CSTATE_CORE_C3_RES) | + BIT(PERF_CSTATE_CORE_C6_RES) | + BIT(PERF_CSTATE_CORE_C7_RES), + + .pkg_events = BIT(PERF_CSTATE_PKG_C2_RES) | + BIT(PERF_CSTATE_PKG_C3_RES) | + BIT(PERF_CSTATE_PKG_C6_RES) | + BIT(PERF_CSTATE_PKG_C7_RES) | + BIT(PERF_CSTATE_PKG_C8_RES) | + BIT(PERF_CSTATE_PKG_C9_RES) | + BIT(PERF_CSTATE_PKG_C10_RES), +}; + static const struct cstate_model slm_cstates __initconst = { .core_events = BIT(PERF_CSTATE_CORE_C1_RES) | BIT(PERF_CSTATE_CORE_C6_RES), @@ -557,6 +573,8 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = { X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_MOBILE, snb_cstates), X86_CSTATES_MODEL(INTEL_FAM6_KABYLAKE_DESKTOP, snb_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_CANNONLAKE_MOBILE, cnl_cstates), + X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNL, knl_cstates), X86_CSTATES_MODEL(INTEL_FAM6_XEON_PHI_KNM, knl_cstates), -- cgit v1.2.1