From f79b1c573cb4dc551919f81ed5797419f6fc1f3a Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Thu, 29 Mar 2018 20:36:55 +0530 Subject: x86/i8237: Register device based on FADT legacy boot flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit From Skylake onwards, the platform controller hub (Sunrisepoint PCH) does not support legacy DMA operations to IO ports 81h-83h, 87h, 89h-8Bh, 8Fh. Currently this driver registers as syscore ops and its resume function is called on every resume from S3. On Skylake and Kabylake, this causes a resume delay of around 100ms due to port IO operations, which is a problem. This change allows to load the driver only when the platform bios explicitly supports such devices or has a cut-off date earlier than 2017 due to the following reasons: - The platforms released before year 2017 have support for the 8237. (except Sunrisepoint PCH e.g. Skylake) - Some of the BIOS that were released for platforms (Skylake, Kabylake) during 2016-17 are buggy. These BIOS do not set/unset the ACPI_FADT_LEGACY_DEVICES field in FADT table properly based on the presence or absence of the DMA device. Very recently, open source system firmware like coreboot started unsetting ACPI_FADT_LEGACY_DEVICES field in FADT table if the 8237 DMA device is not present on the PCH. Please refer to chapter 21 of 6th Generation Intel® Core™ Processor Platform Controller Hub Family: BIOS Specification. Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Anshuman Gupta Signed-off-by: Thomas Gleixner Reviewed-by: Andy Shevchenko Cc: rjw@rjwysocki.net Cc: hpa@zytor.com Cc: Alan Cox Link: https://lkml.kernel.org/r/1522336015-22994-1-git-send-email-anshuman.gupta@intel.com --- arch/x86/include/asm/x86_init.h | 1 + 1 file changed, 1 insertion(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h index ce8b4da07e35..db98e3ab3295 100644 --- a/arch/x86/include/asm/x86_init.h +++ b/arch/x86/include/asm/x86_init.h @@ -301,5 +301,6 @@ extern struct x86_apic_ops x86_apic_ops; extern void x86_early_init_platform_quirks(void); extern void x86_init_noop(void); extern void x86_init_uint_noop(unsigned int unused); +extern bool x86_pnpbios_disabled(void); #endif -- cgit v1.2.1 From 046c0dbec0238c25b7526c26c9a9687664229ce2 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 5 Jun 2018 13:35:15 +0200 Subject: x86: Mark native_set_p4d() as __always_inline When CONFIG_OPTIMIZE_INLINING is enabled, the function native_set_p4d() may not be fully inlined into the caller, resulting in a false-positive warning about an access to the __pgtable_l5_enabled variable from a non-__init function, despite the original caller being an __init function: WARNING: vmlinux.o(.text.unlikely+0x1429): Section mismatch in reference from the function native_set_p4d() to the variable .init.data:__pgtable_l5_enabled WARNING: vmlinux.o(.text.unlikely+0x1429): Section mismatch in reference from the function native_p4d_clear() to the variable .init.data:__pgtable_l5_enabled The function native_set_p4d() references the variable __initdata __pgtable_l5_enabled. This is often because native_set_p4d lacks a __initdata annotation or the annotation of __pgtable_l5_enabled is wrong. Marking the native_set_p4d function and its caller native_p4d_clear() avoids this problem. I did not bisect the original cause, but I assume this is related to the recent rework that turned pgtable_l5_enabled() into an inline function, which in turn caused the compiler to make different inlining decisions. Fixes: ad3fe525b950 ("x86/mm: Unify pgtable_l5_enabled usage in early boot code") Signed-off-by: Arnd Bergmann Signed-off-by: Thomas Gleixner Acked-by: Kirill A. Shutemov Cc: Greg Kroah-Hartman Cc: Dave Hansen Cc: "H. Peter Anvin" Cc: Andrew Morton Cc: Zi Yan Cc: Naoya Horiguchi Link: https://lkml.kernel.org/r/20180605113715.1133726-1-arnd@arndb.de --- arch/x86/include/asm/pgtable_64.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 877bc27718ae..c750112cb416 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd) } #endif -static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) +static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) { pgd_t pgd; @@ -230,7 +230,7 @@ static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d) *p4dp = native_make_p4d(native_pgd_val(pgd)); } -static inline void native_p4d_clear(p4d_t *p4d) +static __always_inline void native_p4d_clear(p4d_t *p4d) { native_set_p4d(p4d, native_make_p4d(0)); } -- cgit v1.2.1 From 94d49eb30e854c84d1319095b5dd0405a7da9362 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Fri, 18 May 2018 14:30:28 +0300 Subject: x86/mm: Decouple dynamic __PHYSICAL_MASK from AMD SME AMD SME claims one bit from physical address to indicate whether the page is encrypted or not. To achieve that we clear out the bit from __PHYSICAL_MASK. The capability to adjust __PHYSICAL_MASK is required beyond AMD SME. For instance for upcoming Intel Multi-Key Total Memory Encryption. Factor it out into a separate feature with own Kconfig handle. It also helps with overhead of AMD SME. It saves more than 3k in .text on defconfig + AMD_MEM_ENCRYPT: add/remove: 3/2 grow/shrink: 5/110 up/down: 189/-3753 (-3564) We would need to return to this once we have infrastructure to patch constants in code. That's good candidate for it. Signed-off-by: Kirill A. Shutemov Signed-off-by: Thomas Gleixner Reviewed-by: Tom Lendacky Cc: linux-mm@kvack.org Cc: "H. Peter Anvin" Link: https://lkml.kernel.org/r/20180518113028.79825-1-kirill.shutemov@linux.intel.com --- arch/x86/include/asm/page_types.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/page_types.h b/arch/x86/include/asm/page_types.h index 1e53560a84bb..c85e15010f48 100644 --- a/arch/x86/include/asm/page_types.h +++ b/arch/x86/include/asm/page_types.h @@ -17,7 +17,6 @@ #define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) #define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) -#define __PHYSICAL_MASK ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1))) #define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1) /* Cast *PAGE_MASK to a signed type so that it is sign-extended if @@ -55,6 +54,13 @@ #ifndef __ASSEMBLY__ +#ifdef CONFIG_DYNAMIC_PHYSICAL_MASK +extern phys_addr_t physical_mask; +#define __PHYSICAL_MASK physical_mask +#else +#define __PHYSICAL_MASK ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1)) +#endif + extern int devmem_is_allowed(unsigned long pagenr); extern unsigned long max_low_pfn_mapped; -- cgit v1.2.1 From 838d76d63ec4eaeaa12bedfa50f261480f615200 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 1 Jun 2018 14:50:31 +0800 Subject: x86/vector: Fix the args of vector_alloc tracepoint The vector_alloc tracepont reversed the reserved and ret aggs, that made the trace print wrong. Exchange them. Fixes: 8d1e3dca7de6 ("x86/vector: Add tracepoints for vector management") Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: hpa@zytor.com Cc: stable@vger.kernel.org Link: https://lkml.kernel.org/r/20180601065031.21872-1-douly.fnst@cn.fujitsu.com --- arch/x86/include/asm/trace/irq_vectors.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/trace/irq_vectors.h b/arch/x86/include/asm/trace/irq_vectors.h index 22647a642e98..0af81b590a0c 100644 --- a/arch/x86/include/asm/trace/irq_vectors.h +++ b/arch/x86/include/asm/trace/irq_vectors.h @@ -236,7 +236,7 @@ TRACE_EVENT(vector_alloc, TP_PROTO(unsigned int irq, unsigned int vector, bool reserved, int ret), - TP_ARGS(irq, vector, ret, reserved), + TP_ARGS(irq, vector, reserved, ret), TP_STRUCT__entry( __field( unsigned int, irq ) -- cgit v1.2.1 From c0255770ccdc77ef2184d2a0a2e0cde09d2b44a4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Mon, 4 Jun 2018 17:33:55 +0200 Subject: x86/apic: Provide apic_ack_irq() apic_ack_edge() is explicitely for handling interrupt affinity cleanup when interrupt remapping is not available or disable. Remapped interrupts and also some of the platform specific special interrupts, e.g. UV, invoke ack_APIC_irq() directly. To address the issue of failing an affinity update with -EBUSY the delayed affinity mechanism can be reused, but ack_APIC_irq() does not handle that. Adding this to ack_APIC_irq() is not possible, because that function is also used for exceptions and directly handled interrupts like IPIs. Create a new function, which just contains the conditional invocation of irq_move_irq() and the final ack_APIC_irq(). Reuse the new function in apic_ack_edge(). Preparatory change for the real fix. Fixes: dccfe3147b42 ("x86/vector: Simplify vector move cleanup") Signed-off-by: Thomas Gleixner Tested-by: Song Liu Cc: Joerg Roedel Cc: Peter Zijlstra Cc: Song Liu Cc: Dmitry Safonov <0x7f454c46@gmail.com> Cc: stable@vger.kernel.org Cc: Mike Travis Cc: Borislav Petkov Cc: Tariq Toukan Link: https://lkml.kernel.org/r/20180604162224.471925894@linutronix.de --- arch/x86/include/asm/apic.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch/x86/include') diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h index 08acd954f00e..74a9e06b6cfd 100644 --- a/arch/x86/include/asm/apic.h +++ b/arch/x86/include/asm/apic.h @@ -436,6 +436,8 @@ static inline void apic_set_eoi_write(void (*eoi_write)(u32 reg, u32 v)) {} #endif /* CONFIG_X86_LOCAL_APIC */ +extern void apic_ack_irq(struct irq_data *data); + static inline void ack_APIC_irq(void) { /* -- cgit v1.2.1