diff options
author | Tom Rini <trini@konsulko.com> | 2019-10-03 18:50:01 -0400 |
---|---|---|
committer | Tom Rini <trini@konsulko.com> | 2019-10-03 18:50:01 -0400 |
commit | 207b3825d9262e2420db91a5e3403c6dc86c93f1 (patch) | |
tree | 5ed1c9b31e921e14c02fb586941d9894d650f73f | |
parent | ea4316cdb340ce1c05db5d6a869d1840dad68508 (diff) | |
download | u-boot-WIP/KLUDGE-update-compiler-headers.tar.gz |
compiler*h: Sync with v5.4-rc1WIP/KLUDGE-update-compiler-headers
Signed-off-by: Tom Rini <trini@konsulko.com>
-rw-r--r-- | arch/arc/include/asm/barrier.h | 51 | ||||
-rw-r--r-- | arch/arm/include/asm/barrier.h | 103 | ||||
-rw-r--r-- | arch/m68k/include/asm/barrier.h | 1 | ||||
-rw-r--r-- | arch/microblaze/include/asm/barrier.h | 1 | ||||
-rw-r--r-- | arch/mips/include/asm/barrier.h | 294 | ||||
-rw-r--r-- | arch/nds32/include/asm/barrier.h | 15 | ||||
-rw-r--r-- | arch/nios2/include/asm/barrier.h | 1 | ||||
-rw-r--r-- | arch/powerpc/include/asm/barrier.h | 104 | ||||
-rw-r--r-- | arch/riscv/include/asm/barrier.h | 9 | ||||
-rw-r--r-- | arch/sandbox/include/asm/barrier.h | 1 | ||||
-rw-r--r-- | arch/sh/include/asm/barrier.h | 45 | ||||
-rw-r--r-- | arch/x86/include/asm/barrier.h | 87 | ||||
-rw-r--r-- | arch/xtensa/include/asm/barrier.h | 25 | ||||
-rw-r--r-- | include/asm-generic/barrier.h | 261 | ||||
-rw-r--r-- | include/linux/compiler-clang.h | 44 | ||||
-rw-r--r-- | include/linux/compiler-gcc.h | 259 | ||||
-rw-r--r-- | include/linux/compiler-intel.h | 17 | ||||
-rw-r--r-- | include/linux/compiler.h | 504 | ||||
-rw-r--r-- | include/linux/compiler_attributes.h | 256 | ||||
-rw-r--r-- | include/linux/compiler_types.h | 246 | ||||
-rw-r--r-- | include/linux/kasan-checks.h | 44 | ||||
-rw-r--r-- | lib/vsprintf.c | 2 |
22 files changed, 1807 insertions, 563 deletions
diff --git a/arch/arc/include/asm/barrier.h b/arch/arc/include/asm/barrier.h new file mode 100644 index 0000000000..7823811e7c --- /dev/null +++ b/arch/arc/include/asm/barrier.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) + */ + +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifdef CONFIG_ISA_ARCV2 + +/* + * ARCv2 based HS38 cores are in-order issue, but still weakly ordered + * due to micro-arch buffering/queuing of load/store, cache hit vs. miss ... + * + * Explicit barrier provided by DMB instruction + * - Operand supports fine grained load/store/load+store semantics + * - Ensures that selected memory operation issued before it will complete + * before any subsequent memory operation of same type + * - DMB guarantees SMP as well as local barrier semantics + * (asm-generic/barrier.h ensures sane smp_*mb if not defined here, i.e. + * UP: barrier(), SMP: smp_*mb == *mb) + * - DSYNC provides DMB+completion_of_cache_bpu_maintenance_ops hence not needed + * in the general case. Plus it only provides full barrier. + */ + +#define mb() asm volatile("dmb 3\n" : : : "memory") +#define rmb() asm volatile("dmb 1\n" : : : "memory") +#define wmb() asm volatile("dmb 2\n" : : : "memory") + +#elif !defined(CONFIG_ARC_PLAT_EZNPS) /* CONFIG_ISA_ARCOMPACT */ + +/* + * ARCompact based cores (ARC700) only have SYNC instruction which is super + * heavy weight as it flushes the pipeline as well. + * There are no real SMP implementations of such cores. + */ + +#define mb() asm volatile("sync\n" : : : "memory") + +#else /* CONFIG_ARC_PLAT_EZNPS */ + +#include <plat/ctop.h> + +#define mb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RW) : "memory") +#define rmb() asm volatile (".word %0" : : "i"(CTOP_INST_SCHD_RD) : "memory") + +#endif + +#include <asm-generic/barrier.h> + +#endif diff --git a/arch/arm/include/asm/barrier.h b/arch/arm/include/asm/barrier.h new file mode 100644 index 0000000000..83ae97c049 --- /dev/null +++ b/arch/arm/include/asm/barrier.h @@ -0,0 +1,103 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t"); + +#if __LINUX_ARM_ARCH__ >= 7 || \ + (__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K)) +#define sev() __asm__ __volatile__ ("sev" : : : "memory") +#define wfe() __asm__ __volatile__ ("wfe" : : : "memory") +#define wfi() __asm__ __volatile__ ("wfi" : : : "memory") +#else +#define wfe() do { } while (0) +#endif + +#if __LINUX_ARM_ARCH__ >= 7 +#define isb(option) __asm__ __volatile__ ("isb " #option : : : "memory") +#define dsb(option) __asm__ __volatile__ ("dsb " #option : : : "memory") +#define dmb(option) __asm__ __volatile__ ("dmb " #option : : : "memory") +#ifdef CONFIG_THUMB2_KERNEL +#define CSDB ".inst.w 0xf3af8014" +#else +#define CSDB ".inst 0xe320f014" +#endif +#define csdb() __asm__ __volatile__(CSDB : : : "memory") +#elif defined(CONFIG_CPU_XSC3) || __LINUX_ARM_ARCH__ == 6 +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \ + : : "r" (0) : "memory") +#elif defined(CONFIG_CPU_FA526) +#define isb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c5, 4" \ + : : "r" (0) : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#else +#define isb(x) __asm__ __volatile__ ("" : : : "memory") +#define dsb(x) __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 4" \ + : : "r" (0) : "memory") +#define dmb(x) __asm__ __volatile__ ("" : : : "memory") +#endif + +#ifndef CSDB +#define CSDB +#endif +#ifndef csdb +#define csdb() +#endif + +#ifdef CONFIG_ARM_HEAVY_MB +extern void (*soc_mb)(void); +extern void arm_heavy_mb(void); +#define __arm_heavy_mb(x...) do { dsb(x); arm_heavy_mb(); } while (0) +#else +#define __arm_heavy_mb(x...) dsb(x) +#endif + +#if defined(CONFIG_ARM_DMA_MEM_BUFFERABLE) || defined(CONFIG_SMP) +#define mb() __arm_heavy_mb() +#define rmb() dsb() +#define wmb() __arm_heavy_mb(st) +#define dma_rmb() dmb(osh) +#define dma_wmb() dmb(oshst) +#else +#define mb() barrier() +#define rmb() barrier() +#define wmb() barrier() +#define dma_rmb() barrier() +#define dma_wmb() barrier() +#endif + +#define __smp_mb() dmb(ish) +#define __smp_rmb() __smp_mb() +#define __smp_wmb() dmb(ishst) + +#ifdef CONFIG_CPU_SPECTRE +static inline unsigned long array_index_mask_nospec(unsigned long idx, + unsigned long sz) +{ + unsigned long mask; + + asm volatile( + "cmp %1, %2\n" + " sbc %0, %1, %1\n" + CSDB + : "=r" (mask) + : "r" (idx), "Ir" (sz) + : "cc"); + + return mask; +} +#define array_index_mask_nospec array_index_mask_nospec +#endif + +#include <asm-generic/barrier.h> + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_BARRIER_H */ diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h new file mode 100644 index 0000000000..918df5c298 --- /dev/null +++ b/arch/m68k/include/asm/barrier.h @@ -0,0 +1 @@ +#include <asm-generic/barrier.h> diff --git a/arch/microblaze/include/asm/barrier.h b/arch/microblaze/include/asm/barrier.h new file mode 100644 index 0000000000..918df5c298 --- /dev/null +++ b/arch/microblaze/include/asm/barrier.h @@ -0,0 +1 @@ +#include <asm-generic/barrier.h> diff --git a/arch/mips/include/asm/barrier.h b/arch/mips/include/asm/barrier.h new file mode 100644 index 0000000000..9228f73862 --- /dev/null +++ b/arch/mips/include/asm/barrier.h @@ -0,0 +1,294 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) + */ +#ifndef __ASM_BARRIER_H +#define __ASM_BARRIER_H + +#include <asm/addrspace.h> + +/* + * Sync types defined by the MIPS architecture (document MD00087 table 6.5) + * These values are used with the sync instruction to perform memory barriers. + * Types of ordering guarantees available through the SYNC instruction: + * - Completion Barriers + * - Ordering Barriers + * As compared to the completion barrier, the ordering barrier is a + * lighter-weight operation as it does not require the specified instructions + * before the SYNC to be already completed. Instead it only requires that those + * specified instructions which are subsequent to the SYNC in the instruction + * stream are never re-ordered for processing ahead of the specified + * instructions which are before the SYNC in the instruction stream. + * This potentially reduces how many cycles the barrier instruction must stall + * before it completes. + * Implementations that do not use any of the non-zero values of stype to define + * different barriers, such as ordering barriers, must make those stype values + * act the same as stype zero. + */ + +/* + * Completion barriers: + * - Every synchronizable specified memory instruction (loads or stores or both) + * that occurs in the instruction stream before the SYNC instruction must be + * already globally performed before any synchronizable specified memory + * instructions that occur after the SYNC are allowed to be performed, with + * respect to any other processor or coherent I/O module. + * + * - The barrier does not guarantee the order in which instruction fetches are + * performed. + * + * - A stype value of zero will always be defined such that it performs the most + * complete set of synchronization operations that are defined.This means + * stype zero always does a completion barrier that affects both loads and + * stores preceding the SYNC instruction and both loads and stores that are + * subsequent to the SYNC instruction. Non-zero values of stype may be defined + * by the architecture or specific implementations to perform synchronization + * behaviors that are less complete than that of stype zero. If an + * implementation does not use one of these non-zero values to define a + * different synchronization behavior, then that non-zero value of stype must + * act the same as stype zero completion barrier. This allows software written + * for an implementation with a lighter-weight barrier to work on another + * implementation which only implements the stype zero completion barrier. + * + * - A completion barrier is required, potentially in conjunction with SSNOP (in + * Release 1 of the Architecture) or EHB (in Release 2 of the Architecture), + * to guarantee that memory reference results are visible across operating + * mode changes. For example, a completion barrier is required on some + * implementations on entry to and exit from Debug Mode to guarantee that + * memory effects are handled correctly. + */ + +/* + * stype 0 - A completion barrier that affects preceding loads and stores and + * subsequent loads and stores. + * Older instructions which must reach the load/store ordering point before the + * SYNC instruction completes: Loads, Stores + * Younger instructions which must reach the load/store ordering point only + * after the SYNC instruction completes: Loads, Stores + * Older instructions which must be globally performed when the SYNC instruction + * completes: Loads, Stores + */ +#define STYPE_SYNC 0x0 + +/* + * Ordering barriers: + * - Every synchronizable specified memory instruction (loads or stores or both) + * that occurs in the instruction stream before the SYNC instruction must + * reach a stage in the load/store datapath after which no instruction + * re-ordering is possible before any synchronizable specified memory + * instruction which occurs after the SYNC instruction in the instruction + * stream reaches the same stage in the load/store datapath. + * + * - If any memory instruction before the SYNC instruction in program order, + * generates a memory request to the external memory and any memory + * instruction after the SYNC instruction in program order also generates a + * memory request to external memory, the memory request belonging to the + * older instruction must be globally performed before the time the memory + * request belonging to the younger instruction is globally performed. + * + * - The barrier does not guarantee the order in which instruction fetches are + * performed. + */ + +/* + * stype 0x10 - An ordering barrier that affects preceding loads and stores and + * subsequent loads and stores. + * Older instructions which must reach the load/store ordering point before the + * SYNC instruction completes: Loads, Stores + * Younger instructions which must reach the load/store ordering point only + * after the SYNC instruction completes: Loads, Stores + * Older instructions which must be globally performed when the SYNC instruction + * completes: N/A + */ +#define STYPE_SYNC_MB 0x10 + +/* + * stype 0x14 - A completion barrier specific to global invalidations + * + * When a sync instruction of this type completes any preceding GINVI or GINVT + * operation has been globalized & completed on all coherent CPUs. Anything + * that the GINV* instruction should invalidate will have been invalidated on + * all coherent CPUs when this instruction completes. It is implementation + * specific whether the GINV* instructions themselves will ensure completion, + * or this sync type will. + * + * In systems implementing global invalidates (ie. with Config5.GI == 2 or 3) + * this sync type also requires that previous SYNCI operations have completed. + */ +#define STYPE_GINV 0x14 + +#ifdef CONFIG_CPU_HAS_SYNC +#define __sync() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + ".set mips2\n\t" \ + "sync\n\t" \ + ".set pop" \ + : /* no output */ \ + : /* no input */ \ + : "memory") +#else +#define __sync() do { } while(0) +#endif + +#define __fast_iob() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "lw $0,%0\n\t" \ + "nop\n\t" \ + ".set pop" \ + : /* no output */ \ + : "m" (*(int *)CKSEG1) \ + : "memory") +#ifdef CONFIG_CPU_CAVIUM_OCTEON +# define OCTEON_SYNCW_STR ".set push\n.set arch=octeon\nsyncw\nsyncw\n.set pop\n" +# define __syncw() __asm__ __volatile__(OCTEON_SYNCW_STR : : : "memory") + +# define fast_wmb() __syncw() +# define fast_rmb() barrier() +# define fast_mb() __sync() +# define fast_iob() do { } while (0) +#else /* ! CONFIG_CPU_CAVIUM_OCTEON */ +# define fast_wmb() __sync() +# define fast_rmb() __sync() +# define fast_mb() __sync() +# ifdef CONFIG_SGI_IP28 +# define fast_iob() \ + __asm__ __volatile__( \ + ".set push\n\t" \ + ".set noreorder\n\t" \ + "lw $0,%0\n\t" \ + "sync\n\t" \ + "lw $0,%0\n\t" \ + ".set pop" \ + : /* no output */ \ + : "m" (*(int *)CKSEG1ADDR(0x1fa00004)) \ + : "memory") +# else +# define fast_iob() \ + do { \ + __sync(); \ + __fast_iob(); \ + } while (0) +# endif +#endif /* CONFIG_CPU_CAVIUM_OCTEON */ + +#ifdef CONFIG_CPU_HAS_WB + +#include <asm/wbflush.h> + +#define mb() wbflush() +#define iob() wbflush() + +#else /* !CONFIG_CPU_HAS_WB */ + +#define mb() fast_mb() +#define iob() fast_iob() + +#endif /* !CONFIG_CPU_HAS_WB */ + +#define wmb() fast_wmb() +#define rmb() fast_rmb() + +#if defined(CONFIG_WEAK_ORDERING) +# ifdef CONFIG_CPU_CAVIUM_OCTEON +# define __smp_mb() __sync() +# define __smp_rmb() barrier() +# define __smp_wmb() __syncw() +# else +# define __smp_mb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_rmb() __asm__ __volatile__("sync" : : :"memory") +# define __smp_wmb() __asm__ __volatile__("sync" : : :"memory") +# endif +#else +#define __smp_mb() barrier() +#define __smp_rmb() barrier() +#define __smp_wmb() barrier() +#endif + +/* + * When LL/SC does imply order, it must also be a compiler barrier to avoid the + * compiler from reordering where the CPU will not. When it does not imply + * order, the compiler is also free to reorder across the LL/SC loop and + * ordering will be done by smp_llsc_mb() and friends. + */ +#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) +#define __WEAK_LLSC_MB " sync \n" +#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") +#define __LLSC_CLOBBER +#else +#define __WEAK_LLSC_MB " \n" +#define smp_llsc_mb() do { } while (0) +#define __LLSC_CLOBBER "memory" +#endif + +#ifdef CONFIG_CPU_CAVIUM_OCTEON +#define smp_mb__before_llsc() smp_wmb() +#define __smp_mb__before_llsc() __smp_wmb() +/* Cause previous writes to become visible on all CPUs as soon as possible */ +#define nudge_writes() __asm__ __volatile__(".set push\n\t" \ + ".set arch=octeon\n\t" \ + "syncw\n\t" \ + ".set pop" : : : "memory") +#else +#define smp_mb__before_llsc() smp_llsc_mb() +#define __smp_mb__before_llsc() smp_llsc_mb() +#define nudge_writes() mb() +#endif + +#define __smp_mb__before_atomic() __smp_mb__before_llsc() +#define __smp_mb__after_atomic() smp_llsc_mb() + +/* + * Some Loongson 3 CPUs have a bug wherein execution of a memory access (load, + * store or prefetch) in between an LL & SC can cause the SC instruction to + * erroneously succeed, breaking atomicity. Whilst it's unusual to write code + * containing such sequences, this bug bites harder than we might otherwise + * expect due to reordering & speculation: + * + * 1) A memory access appearing prior to the LL in program order may actually + * be executed after the LL - this is the reordering case. + * + * In order to avoid this we need to place a memory barrier (ie. a SYNC + * instruction) prior to every LL instruction, in between it and any earlier + * memory access instructions. + * + * This reordering case is fixed by 3A R2 CPUs, ie. 3A2000 models and later. + * + * 2) If a conditional branch exists between an LL & SC with a target outside + * of the LL-SC loop, for example an exit upon value mismatch in cmpxchg() + * or similar, then misprediction of the branch may allow speculative + * execution of memory accesses from outside of the LL-SC loop. + * + * In order to avoid this we need a memory barrier (ie. a SYNC instruction) + * at each affected branch target, for which we also use loongson_llsc_mb() + * defined below. + * + * This case affects all current Loongson 3 CPUs. + * + * The above described cases cause an error in the cache coherence protocol; + * such that the Invalidate of a competing LL-SC goes 'missing' and SC + * erroneously observes its core still has Exclusive state and lets the SC + * proceed. + * + * Therefore the error only occurs on SMP systems. + */ +#ifdef CONFIG_CPU_LOONGSON3_WORKAROUNDS /* Loongson-3's LLSC workaround */ +#define loongson_llsc_mb() __asm__ __volatile__("sync" : : :"memory") +#else +#define loongson_llsc_mb() do { } while (0) +#endif + +static inline void sync_ginv(void) +{ + asm volatile("sync\t%0" :: "i"(STYPE_GINV)); +} + +#include <asm-generic/barrier.h> + +#endif /* __ASM_BARRIER_H */ diff --git a/arch/nds32/include/asm/barrier.h b/arch/nds32/include/asm/barrier.h new file mode 100644 index 0000000000..16413172fd --- /dev/null +++ b/arch/nds32/include/asm/barrier.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +// Copyright (C) 2005-2017 Andes Technology Corporation + +#ifndef __NDS32_ASM_BARRIER_H +#define __NDS32_ASM_BARRIER_H + +#ifndef __ASSEMBLY__ +#define mb() asm volatile("msync all":::"memory") +#define rmb() asm volatile("msync all":::"memory") +#define wmb() asm volatile("msync store":::"memory") +#include <asm-generic/barrier.h> + +#endif /* __ASSEMBLY__ */ + +#endif /* __NDS32_ASM_BARRIER_H */ diff --git a/arch/nios2/include/asm/barrier.h b/arch/nios2/include/asm/barrier.h new file mode 100644 index 0000000000..918df5c298 --- /dev/null +++ b/arch/nios2/include/asm/barrier.h @@ -0,0 +1 @@ +#include <asm-generic/barrier.h> diff --git a/arch/powerpc/include/asm/barrier.h b/arch/powerpc/include/asm/barrier.h new file mode 100644 index 0000000000..fbe8df4330 --- /dev/null +++ b/arch/powerpc/include/asm/barrier.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> + */ +#ifndef _ASM_POWERPC_BARRIER_H +#define _ASM_POWERPC_BARRIER_H + +#include <asm/asm-const.h> + +/* + * Memory barrier. + * The sync instruction guarantees that all memory accesses initiated + * by this processor have been performed (with respect to all other + * mechanisms that access memory). The eieio instruction is a barrier + * providing an ordering (separately) for (a) cacheable stores and (b) + * loads and stores to non-cacheable memory (e.g. I/O devices). + * + * mb() prevents loads and stores being reordered across this point. + * rmb() prevents loads being reordered across this point. + * wmb() prevents stores being reordered across this point. + * read_barrier_depends() prevents data-dependent loads being reordered + * across this point (nop on PPC). + * + * *mb() variants without smp_ prefix must order all types of memory + * operations with one another. sync is the only instruction sufficient + * to do this. + * + * For the smp_ barriers, ordering is for cacheable memory operations + * only. We have to use the sync instruction for smp_mb(), since lwsync + * doesn't order loads with respect to previous stores. Lwsync can be + * used for smp_rmb() and smp_wmb(). + * + * However, on CPUs that don't support lwsync, lwsync actually maps to a + * heavy-weight sync, so smp_wmb() can be a lighter-weight eieio. + */ +#define mb() __asm__ __volatile__ ("sync" : : : "memory") +#define rmb() __asm__ __volatile__ ("sync" : : : "memory") +#define wmb() __asm__ __volatile__ ("sync" : : : "memory") + +/* The sub-arch has lwsync */ +#if defined(__powerpc64__) || defined(CONFIG_PPC_E500MC) +# define SMPWMB LWSYNC +#else +# define SMPWMB eieio +#endif + +#define __lwsync() __asm__ __volatile__ (stringify_in_c(LWSYNC) : : :"memory") +#define dma_rmb() __lwsync() +#define dma_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") + +#define __smp_lwsync() __lwsync() + +#define __smp_mb() mb() +#define __smp_rmb() __lwsync() +#define __smp_wmb() __asm__ __volatile__ (stringify_in_c(SMPWMB) : : :"memory") + +/* + * This is a barrier which prevents following instructions from being + * started until the value of the argument x is known. For example, if + * x is a variable loaded from memory, this prevents following + * instructions from being executed until the load has been performed. + */ +#define data_barrier(x) \ + asm volatile("twi 0,%0,0; isync" : : "r" (x) : "memory"); + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_lwsync(); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_lwsync(); \ + ___p1; \ +}) + +#ifdef CONFIG_PPC_BOOK3S_64 +#define NOSPEC_BARRIER_SLOT nop +#elif defined(CONFIG_PPC_FSL_BOOK3E) +#define NOSPEC_BARRIER_SLOT nop; nop +#endif + +#ifdef CONFIG_PPC_BARRIER_NOSPEC +/* + * Prevent execution of subsequent instructions until preceding branches have + * been fully resolved and are no longer executing speculatively. + */ +#define barrier_nospec_asm NOSPEC_BARRIER_FIXUP_SECTION; NOSPEC_BARRIER_SLOT + +// This also acts as a compiler barrier due to the memory clobber. +#define barrier_nospec() asm (stringify_in_c(barrier_nospec_asm) ::: "memory") + +#else /* !CONFIG_PPC_BARRIER_NOSPEC */ +#define barrier_nospec_asm +#define barrier_nospec() +#endif /* CONFIG_PPC_BARRIER_NOSPEC */ + +#include <asm-generic/barrier.h> + +#endif /* _ASM_POWERPC_BARRIER_H */ diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h index a3f60a8458..3f1737f301 100644 --- a/arch/riscv/include/asm/barrier.h +++ b/arch/riscv/include/asm/barrier.h @@ -1,11 +1,10 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: GPL-2.0-only */ /* + * Based on arch/arm/include/asm/barrier.h + * * Copyright (C) 2012 ARM Ltd. * Copyright (C) 2013 Regents of the University of California * Copyright (C) 2017 SiFive - * - * Taken from Linux arch/riscv/include/asm/barrier.h, which is based on - * arch/arm/include/asm/barrier.h */ #ifndef _ASM_RISCV_BARRIER_H @@ -62,6 +61,8 @@ do { \ */ #define smp_mb__after_spinlock() RISCV_FENCE(rw,rw) +#include <asm-generic/barrier.h> + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_BARRIER_H */ diff --git a/arch/sandbox/include/asm/barrier.h b/arch/sandbox/include/asm/barrier.h new file mode 100644 index 0000000000..918df5c298 --- /dev/null +++ b/arch/sandbox/include/asm/barrier.h @@ -0,0 +1 @@ +#include <asm-generic/barrier.h> diff --git a/arch/sh/include/asm/barrier.h b/arch/sh/include/asm/barrier.h new file mode 100644 index 0000000000..66faae19d2 --- /dev/null +++ b/arch/sh/include/asm/barrier.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 1999, 2000 Niibe Yutaka & Kaz Kojima + * Copyright (C) 2002 Paul Mundt + */ +#ifndef __ASM_SH_BARRIER_H +#define __ASM_SH_BARRIER_H + +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) +#include <asm/cache_insns.h> +#endif + +/* + * A brief note on ctrl_barrier(), the control register write barrier. + * + * Legacy SH cores typically require a sequence of 8 nops after + * modification of a control register in order for the changes to take + * effect. On newer cores (like the sh4a and sh5) this is accomplished + * with icbi. + * + * Also note that on sh4a in the icbi case we can forego a synco for the + * write barrier, as it's not necessary for control registers. + * + * Historically we have only done this type of barrier for the MMUCR, but + * it's also necessary for the CCR, so we make it generic here instead. + */ +#if defined(CONFIG_CPU_SH4A) || defined(CONFIG_CPU_SH5) +#define mb() __asm__ __volatile__ ("synco": : :"memory") +#define rmb() mb() +#define wmb() mb() +#define ctrl_barrier() __icbi(PAGE_OFFSET) +#else +#if defined(CONFIG_CPU_J2) && defined(CONFIG_SMP) +#define __smp_mb() do { int tmp = 0; __asm__ __volatile__ ("cas.l %0,%0,@%1" : "+r"(tmp) : "z"(&tmp) : "memory", "t"); } while(0) +#define __smp_rmb() __smp_mb() +#define __smp_wmb() __smp_mb() +#endif +#define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop") +#endif + +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) + +#include <asm-generic/barrier.h> + +#endif /* __ASM_SH_BARRIER_H */ diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h new file mode 100644 index 0000000000..7f828fe497 --- /dev/null +++ b/arch/x86/include/asm/barrier.h @@ -0,0 +1,87 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_X86_BARRIER_H +#define _ASM_X86_BARRIER_H + +#include <asm/alternative.h> +#include <asm/nops.h> + +/* + * Force strict CPU ordering. + * And yes, this might be required on UP too when we're talking + * to devices. + */ + +#ifdef CONFIG_X86_32 +#define mb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "mfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define rmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "lfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#define wmb() asm volatile(ALTERNATIVE("lock; addl $0,-4(%%esp)", "sfence", \ + X86_FEATURE_XMM2) ::: "memory", "cc") +#else +#define mb() asm volatile("mfence":::"memory") +#define rmb() asm volatile("lfence":::"memory") +#define wmb() asm volatile("sfence" ::: "memory") +#endif + +/** + * array_index_mask_nospec() - generate a mask that is ~0UL when the + * bounds check succeeds and 0 otherwise + * @index: array element index + * @size: number of elements in array + * + * Returns: + * 0 - (index < size) + */ +static inline unsigned long array_index_mask_nospec(unsigned long index, + unsigned long size) +{ + unsigned long mask; + + asm volatile ("cmp %1,%2; sbb %0,%0;" + :"=r" (mask) + :"g"(size),"r" (index) + :"cc"); + return mask; +} + +/* Override the default implementation from linux/nospec.h. */ +#define array_index_mask_nospec array_index_mask_nospec + +/* Prevent speculative execution past this barrier. */ +#define barrier_nospec() alternative("", "lfence", X86_FEATURE_LFENCE_RDTSC) + +#define dma_rmb() barrier() +#define dma_wmb() barrier() + +#ifdef CONFIG_X86_32 +#define __smp_mb() asm volatile("lock; addl $0,-4(%%esp)" ::: "memory", "cc") +#else +#define __smp_mb() asm volatile("lock; addl $0,-4(%%rsp)" ::: "memory", "cc") +#endif +#define __smp_rmb() dma_rmb() +#define __smp_wmb() barrier() +#define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) + +/* Atomic operations are already serializing on x86 */ +#define __smp_mb__before_atomic() do { } while (0) +#define __smp_mb__after_atomic() do { } while (0) + +#include <asm-generic/barrier.h> + +#endif /* _ASM_X86_BARRIER_H */ diff --git a/arch/xtensa/include/asm/barrier.h b/arch/xtensa/include/asm/barrier.h new file mode 100644 index 0000000000..d6f8d4ddc2 --- /dev/null +++ b/arch/xtensa/include/asm/barrier.h @@ -0,0 +1,25 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2001 - 2012 Tensilica Inc. + */ + +#ifndef _XTENSA_SYSTEM_H +#define _XTENSA_SYSTEM_H + +#include <asm/core.h> + +#define mb() ({ __asm__ __volatile__("memw" : : : "memory"); }) +#define rmb() barrier() +#define wmb() mb() + +#if XCHAL_HAVE_S32C1I +#define __smp_mb__before_atomic() barrier() +#define __smp_mb__after_atomic() barrier() +#endif + +#include <asm-generic/barrier.h> + +#endif /* _XTENSA_SYSTEM_H */ diff --git a/include/asm-generic/barrier.h b/include/asm-generic/barrier.h new file mode 100644 index 0000000000..85b28eb80b --- /dev/null +++ b/include/asm-generic/barrier.h @@ -0,0 +1,261 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Generic barrier definitions. + * + * It should be possible to use these on really simple architectures, + * but it serves more as a starting point for new ports. + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ +#ifndef __ASM_GENERIC_BARRIER_H +#define __ASM_GENERIC_BARRIER_H + +#ifndef __ASSEMBLY__ + +#include <linux/compiler.h> + +#ifndef nop +#define nop() asm volatile ("nop") +#endif + +/* + * Force strict CPU ordering. And yes, this is required on UP too when we're + * talking to devices. + * + * Fall back to compiler barriers if nothing better is provided. + */ + +#ifndef mb +#define mb() barrier() +#endif + +#ifndef rmb +#define rmb() mb() +#endif + +#ifndef wmb +#define wmb() mb() +#endif + +#ifndef dma_rmb +#define dma_rmb() rmb() +#endif + +#ifndef dma_wmb +#define dma_wmb() wmb() +#endif + +#ifndef read_barrier_depends +#define read_barrier_depends() do { } while (0) +#endif + +#ifndef __smp_mb +#define __smp_mb() mb() +#endif + +#ifndef __smp_rmb +#define __smp_rmb() rmb() +#endif + +#ifndef __smp_wmb +#define __smp_wmb() wmb() +#endif + +#ifndef __smp_read_barrier_depends +#define __smp_read_barrier_depends() read_barrier_depends() +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_mb +#define smp_mb() __smp_mb() +#endif + +#ifndef smp_rmb +#define smp_rmb() __smp_rmb() +#endif + +#ifndef smp_wmb +#define smp_wmb() __smp_wmb() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() __smp_read_barrier_depends() +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_mb +#define smp_mb() barrier() +#endif + +#ifndef smp_rmb +#define smp_rmb() barrier() +#endif + +#ifndef smp_wmb +#define smp_wmb() barrier() +#endif + +#ifndef smp_read_barrier_depends +#define smp_read_barrier_depends() do { } while (0) +#endif + +#endif /* CONFIG_SMP */ + +#ifndef __smp_store_mb +#define __smp_store_mb(var, value) do { WRITE_ONCE(var, value); __smp_mb(); } while (0) +#endif + +#ifndef __smp_mb__before_atomic +#define __smp_mb__before_atomic() __smp_mb() +#endif + +#ifndef __smp_mb__after_atomic +#define __smp_mb__after_atomic() __smp_mb() +#endif + +#ifndef __smp_store_release +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef __smp_load_acquire +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + __smp_mb(); \ + ___p1; \ +}) +#endif + +#ifdef CONFIG_SMP + +#ifndef smp_store_mb +#define smp_store_mb(var, value) __smp_store_mb(var, value) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() __smp_mb__before_atomic() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() __smp_mb__after_atomic() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) __smp_store_release(p, v) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) __smp_load_acquire(p) +#endif + +#else /* !CONFIG_SMP */ + +#ifndef smp_store_mb +#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0) +#endif + +#ifndef smp_mb__before_atomic +#define smp_mb__before_atomic() barrier() +#endif + +#ifndef smp_mb__after_atomic +#define smp_mb__after_atomic() barrier() +#endif + +#ifndef smp_store_release +#define smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + WRITE_ONCE(*p, v); \ +} while (0) +#endif + +#ifndef smp_load_acquire +#define smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + barrier(); \ + ___p1; \ +}) +#endif + +#endif /* CONFIG_SMP */ + +/* Barriers for virtual machine guests when talking to an SMP host */ +#define virt_mb() __smp_mb() +#define virt_rmb() __smp_rmb() +#define virt_wmb() __smp_wmb() +#define virt_read_barrier_depends() __smp_read_barrier_depends() +#define virt_store_mb(var, value) __smp_store_mb(var, value) +#define virt_mb__before_atomic() __smp_mb__before_atomic() +#define virt_mb__after_atomic() __smp_mb__after_atomic() +#define virt_store_release(p, v) __smp_store_release(p, v) +#define virt_load_acquire(p) __smp_load_acquire(p) + +/** + * smp_acquire__after_ctrl_dep() - Provide ACQUIRE ordering after a control dependency + * + * A control dependency provides a LOAD->STORE order, the additional RMB + * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, + * aka. (load)-ACQUIRE. + * + * Architectures that do not do load speculation can have this be barrier(). + */ +#ifndef smp_acquire__after_ctrl_dep +#define smp_acquire__after_ctrl_dep() smp_rmb() +#endif + +/** + * smp_cond_load_relaxed() - (Spin) wait for cond with no ordering guarantees + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using READ_ONCE() on the condition variable. + * + * Due to C lacking lambda expressions we load the value of *ptr into a + * pre-named variable @VAL to be used in @cond. + */ +#ifndef smp_cond_load_relaxed +#define smp_cond_load_relaxed(ptr, cond_expr) ({ \ + typeof(ptr) __PTR = (ptr); \ + typeof(*ptr) VAL; \ + for (;;) { \ + VAL = READ_ONCE(*__PTR); \ + if (cond_expr) \ + break; \ + cpu_relax(); \ + } \ + VAL; \ +}) +#endif + +/** + * smp_cond_load_acquire() - (Spin) wait for cond with ACQUIRE ordering + * @ptr: pointer to the variable to wait on + * @cond: boolean expression to wait for + * + * Equivalent to using smp_load_acquire() on the condition variable but employs + * the control dependency of the wait to reduce the barrier on many platforms. + */ +#ifndef smp_cond_load_acquire +#define smp_cond_load_acquire(ptr, cond_expr) ({ \ + typeof(*ptr) _val; \ + _val = smp_cond_load_relaxed(ptr, cond_expr); \ + smp_acquire__after_ctrl_dep(); \ + _val; \ +}) +#endif + +#endif /* !__ASSEMBLY__ */ +#endif /* __ASM_GENERIC_BARRIER_H */ diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index d1e49d52b6..333a6695a9 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -1,12 +1,44 @@ -#ifndef __LINUX_COMPILER_H +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." #endif -/* Some compiler specific definitions are overwritten here - * for Clang compiler - */ +/* Compiler specific definitions for Clang compiler */ -#ifdef uninitialized_var -#undef uninitialized_var #define uninitialized_var(x) x = *(&(x)) + +/* same as gcc, this was present in clang-2.6 so we can assume it works + * with any version that can compile the kernel + */ +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) + +/* all clang versions usable with the kernel support KASAN ABI version 5 */ +#define KASAN_ABI_VERSION 5 + +#if __has_feature(address_sanitizer) || __has_feature(hwaddress_sanitizer) +/* emulate gcc's __SANITIZE_ADDRESS__ flag */ +#define __SANITIZE_ADDRESS__ +#define __no_sanitize_address \ + __attribute__((no_sanitize("address", "hwaddress"))) +#else +#define __no_sanitize_address #endif + +/* + * Not all versions of clang implement the the type-generic versions + * of the builtin overflow checkers. Fortunately, clang implements + * __has_builtin allowing us to avoid awkward version + * checks. Unfortunately, we don't know which version of gcc clang + * pretends to be, so the macro may or may not be defined. + */ +#if __has_builtin(__builtin_mul_overflow) && \ + __has_builtin(__builtin_add_overflow) && \ + __has_builtin(__builtin_sub_overflow) +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 +#endif + +/* The following are for compatibility with GCC, from compiler-gcc.h, + * and may be redefined here because they should not be shared with other + * compilers, like ICC. + */ +#define barrier() __asm__ __volatile__("" : : : "memory") diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 8d9e079435..d7ee4c6bad 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -1,4 +1,5 @@ -#ifndef __LINUX_COMPILER_H +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-gcc.h> directly, include <linux/compiler.h> instead." #endif @@ -9,11 +10,14 @@ + __GNUC_MINOR__ * 100 \ + __GNUC_PATCHLEVEL__) +#if GCC_VERSION < 40600 +# error Sorry, your compiler is too old - please upgrade it. +#endif + /* Optimization barrier */ /* The "volatile" is due to gcc bugs */ -#define barrier() \ - __asm__ __volatile__("": : :"memory") +#define barrier() __asm__ __volatile__("": : :"memory") /* * This version is i.e. to prevent dead stores elimination on @ptr * where gcc and llvm may behave differently when otherwise using @@ -22,13 +26,12 @@ * clobbered. The issue is as follows: while the inline asm might * access any memory it wants, the compiler could have fit all of * @ptr into memory registers instead, and since @ptr never escaped - * from that, it proofed that the inline asm wasn't touching any of + * from that, it proved that the inline asm wasn't touching any of * it. This version works well with both compilers, i.e. we're telling * the compiler that the inline asm absolutely may see the contents * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495 */ -#define barrier_data(ptr) \ - __asm__ __volatile__("": :"r"(ptr) :"memory") +#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory") /* * This macro obfuscates arithmetic on a variable address so that gcc @@ -55,181 +58,54 @@ (typeof(ptr)) (__ptr + (off)); \ }) -/* Make the optimizer believe the variable can be manipulated arbitrarily. */ -#define OPTIMIZER_HIDE_VAR(var) \ - __asm__ ("" : "=r" (var) : "0" (var)) - -#ifdef __CHECKER__ -#define __must_be_array(a) 0 -#else -/* &a[0] degrades to a pointer: a different type from an array */ -#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -#endif - -/* - * Force always-inline if the user requests it so via the .config, - * or if gcc is too old: - */ -#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ - !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) -#define inline inline __attribute__((always_inline)) notrace -#define __inline__ __inline__ __attribute__((always_inline)) notrace -#define __inline __inline __attribute__((always_inline)) notrace -#else -/* A lot of inline functions can cause havoc with function tracing */ -#define inline inline notrace -#define __inline__ __inline__ notrace -#define __inline __inline notrace -#endif - -#define __always_inline inline __attribute__((always_inline)) -#define noinline __attribute__((noinline)) - -#define __deprecated __attribute__((deprecated)) -#define __packed __attribute__((packed)) -#define __weak __attribute__((weak)) -#define __alias(symbol) __attribute__((alias(#symbol))) - /* - * it doesn't make sense on ARM (currently the only user of __naked) - * to trace naked functions because then mcount is called without - * stack and frame pointer being set up and there is no chance to - * restore the lr register to the value before mcount was called. - * - * The asm() bodies of naked functions often depend on standard calling - * conventions, therefore they must be noinline and noclone. - * - * GCC 4.[56] currently fail to enforce this, so we must do so ourselves. - * See GCC PR44290. - */ -#define __naked __attribute__((naked)) noinline __noclone notrace - -#define __noreturn __attribute__((noreturn)) - -/* - * From the GCC manual: - * - * Many functions have no effects except the return value and their - * return value depends only on the parameters and/or global - * variables. Such a function can be subject to common subexpression - * elimination and loop optimization just as an arithmetic operator - * would be. - * [...] + * A trick to suppress uninitialized variable warning without generating any + * code */ -#define __pure __attribute__((pure)) -#define __aligned(x) __attribute__((aligned(x))) -#define __printf(a, b) __attribute__((format(printf, a, b))) -#define __scanf(a, b) __attribute__((format(scanf, a, b))) -#define __attribute_const__ __attribute__((__const__)) -#define __maybe_unused __attribute__((unused)) -#define __always_unused __attribute__((unused)) - -/* gcc version specific checks */ - -#if GCC_VERSION < 30200 -# error Sorry, your compiler is too old - please upgrade it. -#endif - -#if GCC_VERSION < 30300 -# define __used __attribute__((__unused__)) -#else -# define __used __attribute__((__used__)) -#endif - -#ifdef CONFIG_GCOV_KERNEL -# if GCC_VERSION < 30400 -# error "GCOV profiling support for gcc versions below 3.4 not included" -# endif /* __GNUC_MINOR__ */ -#endif /* CONFIG_GCOV_KERNEL */ +#define uninitialized_var(x) x = x -#if GCC_VERSION >= 30400 -#define __must_check __attribute__((warn_unused_result)) +#ifdef CONFIG_RETPOLINE +#define __noretpoline __attribute__((__indirect_branch__("keep"))) #endif -#if GCC_VERSION >= 40000 +#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) -/* GCC 4.1.[01] miscompiles __weak */ -#ifdef __KERNEL__ -# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101 -# error Your version of gcc miscompiles the __weak directive -# endif -#endif +#define __compiletime_object_size(obj) __builtin_object_size(obj, 0) -#define __used __attribute__((__used__)) -#define __compiler_offsetof(a, b) \ - __builtin_offsetof(a, b) +#define __compiletime_warning(message) __attribute__((__warning__(message))) +#define __compiletime_error(message) __attribute__((__error__(message))) -#if GCC_VERSION >= 40100 && GCC_VERSION < 40600 -# define __compiletime_object_size(obj) __builtin_object_size(obj, 0) +#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) +#define __latent_entropy __attribute__((latent_entropy)) #endif -#if GCC_VERSION >= 40300 -/* Mark functions as cold. gcc will assume any path leading to a call - * to them will be unlikely. This means a lot of manual unlikely()s - * are unnecessary now for any paths leading to the usual suspects - * like BUG(), printk(), panic() etc. [but let's keep them for now for - * older compilers] - * - * Early snapshots of gcc 4.3 don't support this and we can't detect this - * in the preprocessor, but we can live with this because they're unreleased. - * Maketime probing would be overkill here. +/* + * calling noreturn functions, __builtin_unreachable() and __builtin_trap() + * confuse the stack allocation in gcc, leading to overly large stack + * frames, see https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82365 * - * gcc also has a __attribute__((__hot__)) to move hot functions into - * a special section, but I don't see any sense in this right now in - * the kernel context + * Adding an empty inline assembly before it works around the problem */ -#define __cold __attribute__((__cold__)) - -#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__) +#define barrier_before_unreachable() asm volatile("") -#ifndef __CHECKER__ -# define __compiletime_warning(message) __attribute__((warning(message))) -# define __compiletime_error(message) __attribute__((error(message))) -#endif /* __CHECKER__ */ -#endif /* GCC_VERSION >= 40300 */ - -#if GCC_VERSION >= 40500 /* * Mark a position in code as unreachable. This can be used to * suppress control flow warnings after asm blocks that transfer * control elsewhere. - * - * Early snapshots of gcc 4.5 don't support this and we can't detect - * this in the preprocessor, but we can live with this because they're - * unreleased. Really, we need to have autoconf for the kernel. - */ -#define unreachable() __builtin_unreachable() - -/* Mark a function definition as prohibited from being cloned. */ -#define __noclone __attribute__((__noclone__)) - -#endif /* GCC_VERSION >= 40500 */ - -#if GCC_VERSION >= 40600 -/* - * When used with Link Time Optimization, gcc can optimize away C functions or - * variables which are referenced only from assembly code. __visible tells the - * optimizer that something else uses this function or variable, thus preventing - * this. */ -#define __visible __attribute__((externally_visible)) -#endif - +#define unreachable() \ + do { \ + annotate_unreachable(); \ + barrier_before_unreachable(); \ + __builtin_unreachable(); \ + } while (0) -#if GCC_VERSION >= 40900 && !defined(__CHECKER__) -/* - * __assume_aligned(n, k): Tell the optimizer that the returned - * pointer can be assumed to be k modulo n. The second argument is - * optional (default 0), so we use a variadic macro to make the - * shorthand. - * - * Beware: Do not apply this to functions which may return - * ERR_PTRs. Also, it is probably unwise to apply it to functions - * returning extra information in the low bits (but in that case the - * compiler should see some alignment anyway, when the return value is - * massaged by 'flags = ptr & 3; ptr &= ~3;'). - */ -#define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#if defined(RANDSTRUCT_PLUGIN) && !defined(__CHECKER__) +#define __randomize_layout __attribute__((randomize_layout)) +#define __no_randomize_layout __attribute__((no_randomize_layout)) +/* This anon struct can add padding, so only enable it under randstruct. */ +#define randomized_struct_fields_start struct { +#define randomized_struct_fields_end } __randomize_layout; #endif /* @@ -243,43 +119,56 @@ */ #define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0) -#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP -#if GCC_VERSION >= 40400 +/* + * sparse (__CHECKER__) pretends to be gcc, but can't do constant + * folding in __builtin_bswap*() (yet), so don't set these for it. + */ +#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__) #define __HAVE_BUILTIN_BSWAP32__ #define __HAVE_BUILTIN_BSWAP64__ -#endif -#if GCC_VERSION >= 40800 || (defined(__powerpc__) && GCC_VERSION >= 40600) +#if GCC_VERSION >= 40800 #define __HAVE_BUILTIN_BSWAP16__ #endif -#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */ +#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP && !__CHECKER__ */ -#if GCC_VERSION >= 50000 +#if GCC_VERSION >= 70000 +#define KASAN_ABI_VERSION 5 +#elif GCC_VERSION >= 50000 #define KASAN_ABI_VERSION 4 #elif GCC_VERSION >= 40902 #define KASAN_ABI_VERSION 3 #endif -#if GCC_VERSION >= 40902 -/* - * Tell the compiler that address safety instrumentation (KASAN) - * should not be applied to that function. - * Conflicts with inlining: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 - */ +#if __has_attribute(__no_sanitize_address__) #define __no_sanitize_address __attribute__((no_sanitize_address)) +#else +#define __no_sanitize_address #endif -#endif /* gcc version >= 40000 specific checks */ - -#if !defined(__noclone) -#define __noclone /* not needed */ -#endif - -#if !defined(__no_sanitize_address) -#define __no_sanitize_address +#if GCC_VERSION >= 50100 +#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif /* - * A trick to suppress uninitialized variable warning without generating any - * code + * Turn individual warnings and errors on and off locally, depending + * on version. */ -#define uninitialized_var(x) x = x +#define __diag_GCC(version, severity, s) \ + __diag_GCC_ ## version(__diag_GCC_ ## severity s) + +/* Severity used in pragma directives */ +#define __diag_GCC_ignore ignored +#define __diag_GCC_warn warning +#define __diag_GCC_error error + +#define __diag_str1(s) #s +#define __diag_str(s) __diag_str1(s) +#define __diag(s) _Pragma(__diag_str(GCC diagnostic s)) + +#if GCC_VERSION >= 80000 +#define __diag_GCC_8(s) __diag(s) +#else +#define __diag_GCC_8(s) +#endif + +#define __no_fgcse __attribute__((optimize("-fno-gcse"))) diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index d4c71132d0..b17f3cd183 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h @@ -1,22 +1,17 @@ -#ifndef __LINUX_COMPILER_H +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H #error "Please don't include <linux/compiler-intel.h> directly, include <linux/compiler.h> instead." #endif #ifdef __ECC -/* Some compiler specific definitions are overwritten here - * for Intel ECC compiler - */ +/* Compiler specific definitions for Intel ECC compiler */ #include <asm/intrinsics.h> /* Intel ECC compiler doesn't support gcc specific asm stmts. * It uses intrinsics to do the equivalent things. */ -#undef barrier -#undef barrier_data -#undef RELOC_HIDE -#undef OPTIMIZER_HIDE_VAR #define barrier() __memory_barrier() #define barrier_data(ptr) barrier() @@ -32,14 +27,8 @@ */ #define OPTIMIZER_HIDE_VAR(var) barrier() -/* Intel ECC compiler doesn't support __builtin_types_compatible_p() */ -#define __must_be_array(a) 0 - #endif -#ifndef __HAVE_BUILTIN_BSWAP16__ /* icc has this, but it's called _bswap16 */ #define __HAVE_BUILTIN_BSWAP16__ #define __builtin_bswap16 _bswap16 -#endif - diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 0ea6c8fcca..6f5f8df9f9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -1,127 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __LINUX_COMPILER_H #define __LINUX_COMPILER_H -#ifndef __ASSEMBLY__ +#include <linux/compiler_types.h> -#ifdef __CHECKER__ -# define __user __attribute__((noderef, address_space(1))) -# define __kernel __attribute__((address_space(0))) -# define __safe __attribute__((safe)) -# define __force __attribute__((force)) -# define __nocast __attribute__((nocast)) -# define __iomem __attribute__((noderef, address_space(2))) -# define __must_hold(x) __attribute__((context(x,1,1))) -# define __acquires(x) __attribute__((context(x,0,1))) -# define __releases(x) __attribute__((context(x,1,0))) -# define __acquire(x) __context__(x,1) -# define __release(x) __context__(x,-1) -# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) -# define __percpu __attribute__((noderef, address_space(3))) -# define __pmem __attribute__((noderef, address_space(5))) -#ifdef CONFIG_SPARSE_RCU_POINTER -# define __rcu __attribute__((noderef, address_space(4))) -#else -# define __rcu -#endif -extern void __chk_user_ptr(const volatile void __user *); -extern void __chk_io_ptr(const volatile void __iomem *); -#else -# define __user -# define __kernel -# define __safe -# define __force -# define __nocast -# define __iomem -# define __chk_user_ptr(x) (void)0 -# define __chk_io_ptr(x) (void)0 -# define __builtin_warning(x, y...) (1) -# define __must_hold(x) -# define __acquires(x) -# define __releases(x) -# define __acquire(x) (void)0 -# define __release(x) (void)0 -# define __cond_lock(x,c) (c) -# define __percpu -# define __rcu -# define __pmem -#endif - -/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ -#define ___PASTE(a,b) a##b -#define __PASTE(a,b) ___PASTE(a,b) +#ifndef __ASSEMBLY__ #ifdef __KERNEL__ -#ifdef __GNUC__ -#include <linux/compiler-gcc.h> -#endif - -#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__) -#define notrace __attribute__((hotpatch(0,0))) -#else -#define notrace __attribute__((no_instrument_function)) -#endif - -/* Intel compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __INTEL_COMPILER -# include <linux/compiler-intel.h> -#endif - -/* Clang compiler defines __GNUC__. So we will overwrite implementations - * coming from above header files here - */ -#ifdef __clang__ -#include <linux/compiler-clang.h> -#endif - -/* - * Generic compiler-dependent macros required for kernel - * build go below this comment. Actual compiler/compiler version - * specific implementations come from the above header files - */ - -struct ftrace_branch_data { - const char *func; - const char *file; - unsigned line; - union { - struct { - unsigned long correct; - unsigned long incorrect; - }; - struct { - unsigned long miss; - unsigned long hit; - }; - unsigned long miss_hit[2]; - }; -}; - /* * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code * to disable branch tracing on a per file basis. */ #if defined(CONFIG_TRACE_BRANCH_PROFILING) \ && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__) -void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); +void ftrace_likely_update(struct ftrace_likely_data *f, int val, + int expect, int is_constant); #define likely_notrace(x) __builtin_expect(!!(x), 1) #define unlikely_notrace(x) __builtin_expect(!!(x), 0) -#define __branch_check__(x, expect) ({ \ - int ______r; \ - static struct ftrace_branch_data \ - __attribute__((__aligned__(4))) \ - __attribute__((section("_ftrace_annotated_branch"))) \ +#define __branch_check__(x, expect, is_constant) ({ \ + long ______r; \ + static struct ftrace_likely_data \ + __aligned(4) \ + __section(_ftrace_annotated_branch) \ ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ + .data.func = __func__, \ + .data.file = __FILE__, \ + .data.line = __LINE__, \ }; \ - ______r = likely_notrace(x); \ - ftrace_likely_update(&______f, ______r, expect); \ + ______r = __builtin_expect(!!(x), expect); \ + ftrace_likely_update(&______f, ______r, \ + expect, is_constant); \ ______r; \ }) @@ -131,10 +42,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * written by Daniel Walker. */ # ifndef likely -# define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1)) +# define likely(x) (__branch_check__(x, 1, __builtin_constant_p(x))) # endif # ifndef unlikely -# define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0)) +# define unlikely(x) (__branch_check__(x, 0, __builtin_constant_p(x))) # endif #ifdef CONFIG_PROFILE_ALL_BRANCHES @@ -142,23 +53,24 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); * "Define 'is'", Bill Clinton * "Define 'if'", Steven Rostedt */ -#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) ) -#define __trace_if(cond) \ - if (__builtin_constant_p(!!(cond)) ? !!(cond) : \ - ({ \ - int ______r; \ - static struct ftrace_branch_data \ - __attribute__((__aligned__(4))) \ - __attribute__((section("_ftrace_branch"))) \ - ______f = { \ - .func = __func__, \ - .file = __FILE__, \ - .line = __LINE__, \ - }; \ - ______r = !!(cond); \ - ______f.miss_hit[______r]++; \ - ______r; \ - })) +#define if(cond, ...) if ( __trace_if_var( !!(cond , ## __VA_ARGS__) ) ) + +#define __trace_if_var(cond) (__builtin_constant_p(cond) ? (cond) : __trace_if_value(cond)) + +#define __trace_if_value(cond) ({ \ + static struct ftrace_branch_data \ + __aligned(4) \ + __section(_ftrace_branch) \ + __if_trace = { \ + .func = __func__, \ + .file = __FILE__, \ + .line = __LINE__, \ + }; \ + (cond) ? \ + (__if_trace.miss_hit[1]++,1) : \ + (__if_trace.miss_hit[0]++,0); \ +}) + #endif /* CONFIG_PROFILE_ALL_BRANCHES */ #else @@ -175,9 +87,76 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); # define barrier_data(ptr) barrier() #endif +/* workaround for GCC PR82365 if needed */ +#ifndef barrier_before_unreachable +# define barrier_before_unreachable() do { } while (0) +#endif + /* Unreachable code */ +#ifdef CONFIG_STACK_VALIDATION +/* + * These macros help objtool understand GCC code flow for unreachable code. + * The __COUNTER__ based labels are a hack to make each instance of the macros + * unique, to convince GCC not to merge duplicate inline asm statements. + */ +#define annotate_reachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.reachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define annotate_unreachable() ({ \ + asm volatile("%c0:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long %c0b - .\n\t" \ + ".popsection\n\t" : : "i" (__COUNTER__)); \ +}) +#define ASM_UNREACHABLE \ + "999:\n\t" \ + ".pushsection .discard.unreachable\n\t" \ + ".long 999b - .\n\t" \ + ".popsection\n\t" + +/* Annotate a C jump table to allow objtool to follow the code flow */ +#define __annotate_jump_table __section(.rodata..c_jump_table) + +#else +#define annotate_reachable() +#define annotate_unreachable() +#define __annotate_jump_table +#endif + +#ifndef ASM_UNREACHABLE +# define ASM_UNREACHABLE +#endif #ifndef unreachable -# define unreachable() do { } while (1) +# define unreachable() do { \ + annotate_unreachable(); \ + __builtin_unreachable(); \ +} while (0) +#endif + +/* + * KENTRY - kernel entry point + * This can be used to annotate symbols (functions or data) that are used + * without their linker symbol being referenced explicitly. For example, + * interrupt vector handlers, or functions in the kernel image that are found + * programatically. + * + * Not required for symbols exported with EXPORT_SYMBOL, or initcalls. Those + * are handled in their own way (with KEEP() in linker scripts). + * + * KENTRY can be avoided if the symbols in question are marked as KEEP() in the + * linker script. For example an architecture could KEEP() its entire + * boot/exception vector code rather than annotate each function and data. + */ +#ifndef KENTRY +# define KENTRY(sym) \ + extern typeof(sym) sym; \ + static const unsigned long __kentry_##sym \ + __used \ + __section("___kentry" "+" #sym ) \ + = (unsigned long)&sym; #endif #ifndef RELOC_HIDE @@ -188,7 +167,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); #endif #ifndef OPTIMIZER_HIDE_VAR -#define OPTIMIZER_HIDE_VAR(var) barrier() +/* Make the optimizer believe the variable can be manipulated arbitrarily. */ +#define OPTIMIZER_HIDE_VAR(var) \ + __asm__ ("" : "=r" (var) : "0" (var)) #endif /* Not-quite-unique ID. */ @@ -220,23 +201,21 @@ void __read_once_size(const volatile void *p, void *res, int size) #ifdef CONFIG_KASAN /* - * This function is not 'inline' because __no_sanitize_address confilcts + * We can't declare function 'inline' because __no_sanitize_address confilcts * with inlining. Attempt to inline it may cause a build failure. * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 * '__maybe_unused' allows us to avoid defined-but-not-used warnings. */ -static __no_sanitize_address __maybe_unused -void __read_once_size_nocheck(const volatile void *p, void *res, int size) -{ - __READ_ONCE_SIZE; -} +# define __no_kasan_or_inline __no_sanitize_address notrace __maybe_unused #else -static __always_inline +# define __no_kasan_or_inline __always_inline +#endif + +static __no_kasan_or_inline void __read_once_size_nocheck(const volatile void *p, void *res, int size) { __READ_ONCE_SIZE; } -#endif static __always_inline void __write_once_size(volatile void *p, void *res, int size) { @@ -255,24 +234,27 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of - * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the - * compiler is aware of some particular ordering. One way to make the - * compiler aware of ordering is to put the two invocations of READ_ONCE, - * WRITE_ONCE or ACCESS_ONCE() in different C statements. + * READ_ONCE and WRITE_ONCE, but only when the compiler is aware of some + * particular ordering. One way to make the compiler aware of ordering is to + * put the two invocations of READ_ONCE or WRITE_ONCE in different C + * statements. * - * In contrast to ACCESS_ONCE these two macros will also work on aggregate - * data types like structs or unions. If the size of the accessed data - * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a - * compile-time warning. + * These two macros will also work on aggregate data types like structs or + * unions. If the size of the accessed data type exceeds the word size of + * the machine (e.g., 32 bits or 64 bits) READ_ONCE() and WRITE_ONCE() will + * fall back to memcpy(). There's at least two memcpy()s: one for the + * __builtin_memcpy() and then one for the macro doing the copy of variable + * - '__u' allocated on the stack. * * Their two major use cases are: (1) Mediating communication between * process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise + * and (2) Ensuring that the compiler does not fold, spindle, or otherwise * mutilate accesses that either do not require ordering or that interact * with an explicit memory barrier or atomic instruction that provides the * required ordering. */ +#include <asm/barrier.h> +#include <linux/kasan-checks.h> #define __READ_ONCE(x, check) \ ({ \ @@ -281,6 +263,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __read_once_size(&(x), __u.__c, sizeof(x)); \ else \ __read_once_size_nocheck(&(x), __u.__c, sizeof(x)); \ + smp_read_barrier_depends(); /* Enforce dependency ordering from x */ \ __u.__val; \ }) #define READ_ONCE(x) __READ_ONCE(x, 1) @@ -291,6 +274,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s */ #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) +static __no_kasan_or_inline +unsigned long read_word_at_a_time(const void *addr) +{ + kasan_check_read(addr, 1); + return *(unsigned long *)addr; +} + #define WRITE_ONCE(x, val) \ ({ \ union { typeof(x) __val; char __c[1]; } __u = \ @@ -299,158 +289,28 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s __u.__val; \ }) -/** - * smp_cond_acquire() - Spin wait for cond with ACQUIRE ordering - * @cond: boolean expression to wait for - * - * Equivalent to using smp_load_acquire() on the condition variable but employs - * the control dependency of the wait to reduce the barrier on many platforms. - * - * The control dependency provides a LOAD->STORE order, the additional RMB - * provides LOAD->LOAD order, together they provide LOAD->{LOAD,STORE} order, - * aka. ACQUIRE. - */ -#define smp_cond_acquire(cond) do { \ - while (!(cond)) \ - cpu_relax(); \ - smp_rmb(); /* ctrl + rmb := acquire */ \ -} while (0) - -#endif /* __KERNEL__ */ - -#endif /* __ASSEMBLY__ */ - -#ifdef __KERNEL__ -/* - * Allow us to mark functions as 'deprecated' and have gcc emit a nice - * warning for each use, in hopes of speeding the functions removal. - * Usage is: - * int __deprecated foo(void) - */ -#ifndef __deprecated -# define __deprecated /* unimplemented */ -#endif - -#ifdef MODULE -#define __deprecated_for_modules __deprecated -#else -#define __deprecated_for_modules -#endif - -#ifndef __must_check -#define __must_check -#endif - -#ifndef CONFIG_ENABLE_MUST_CHECK -#undef __must_check -#define __must_check -#endif -#ifndef CONFIG_ENABLE_WARN_DEPRECATED -#undef __deprecated -#undef __deprecated_for_modules -#define __deprecated -#define __deprecated_for_modules -#endif - -/* - * Allow us to avoid 'defined but not used' warnings on functions and data, - * as well as force them to be emitted to the assembly file. - * - * As of gcc 3.4, static functions that are not marked with attribute((used)) - * may be elided from the assembly file. As of gcc 3.4, static data not so - * marked will not be elided, but this may change in a future gcc version. - * - * NOTE: Because distributions shipped with a backported unit-at-a-time - * compiler in gcc 3.3, we must define __used to be __attribute__((used)) - * for gcc >=3.3 instead of 3.4. - * - * In prior versions of gcc, such functions and data would be emitted, but - * would be warned about except with attribute((unused)). - * - * Mark functions that are referenced only in inline assembly as __used so - * the code is emitted even though it appears to be unreferenced. - */ -#ifndef __used -# define __used /* unimplemented */ -#endif - -#ifndef __maybe_unused -# define __maybe_unused /* unimplemented */ -#endif - -#ifndef __always_unused -# define __always_unused /* unimplemented */ -#endif - -#ifndef noinline -#define noinline -#endif - -/* - * Rather then using noinline to prevent stack consumption, use - * noinline_for_stack instead. For documentation reasons. - */ -#define noinline_for_stack noinline - -#ifndef __always_inline -#define __always_inline inline -#endif - #endif /* __KERNEL__ */ /* - * From the GCC manual: - * - * Many functions do not examine any values except their arguments, - * and have no effects except the return value. Basically this is - * just slightly more strict class than the `pure' attribute above, - * since function is not allowed to read global memory. - * - * Note that a function that has pointer arguments and examines the - * data pointed to must _not_ be declared `const'. Likewise, a - * function that calls a non-`const' function usually must not be - * `const'. It does not make sense for a `const' function to return - * `void'. - */ -#ifndef __attribute_const__ -# define __attribute_const__ /* unimplemented */ -#endif - -/* - * Tell gcc if a function is cold. The compiler will assume any path - * directly leading to the call is unlikely. + * Force the compiler to emit 'sym' as a symbol, so that we can reference + * it from inline assembler. Necessary in case 'sym' could be inlined + * otherwise, or eliminated entirely due to lack of references that are + * visible to the compiler. */ +#define __ADDRESSABLE(sym) \ + static void * __section(.discard.addressable) __used \ + __PASTE(__addressable_##sym, __LINE__) = (void *)&sym; -#ifndef __cold -#define __cold -#endif - -/* Simple shorthand for a section definition */ -#ifndef __section -# define __section(S) __attribute__ ((__section__(#S))) -#endif - -#ifndef __visible -#define __visible -#endif - -/* - * Assume alignment of return value. +/** + * offset_to_ptr - convert a relative memory offset to an absolute pointer + * @off: the address of the 32-bit offset value */ -#ifndef __assume_aligned -#define __assume_aligned(a, ...) -#endif - - -/* Are two types/vars the same type (ignoring qualifiers)? */ -#ifndef __same_type -# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) -#endif +static inline void *offset_to_ptr(const int *off) +{ + return (void *)((unsigned long)off + *off); +} -/* Is this type a native word size -- useful for atomic operations */ -#ifndef __native_word -# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) -#endif +#endif /* __ASSEMBLY__ */ /* Compile time object size, -1 for unknown */ #ifndef __compiletime_object_size @@ -461,29 +321,14 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s #endif #ifndef __compiletime_error # define __compiletime_error(message) -/* - * Sparse complains of variable sized arrays due to the temporary variable in - * __compiletime_assert. Unfortunately we can't just expand it out to make - * sparse see a constant array size without breaking compiletime_assert on old - * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether. - */ -# ifndef __CHECKER__ -# define __compiletime_error_fallback(condition) \ - do { ((void)sizeof(char[1 - 2 * condition])); } while (0) -# endif -#endif -#ifndef __compiletime_error_fallback -# define __compiletime_error_fallback(condition) do { } while (0) #endif #ifdef __OPTIMIZE__ # define __compiletime_assert(condition, msg, prefix, suffix) \ do { \ - bool __cond = !(condition); \ extern void prefix ## suffix(void) __compiletime_error(msg); \ - if (__cond) \ + if (!(condition)) \ prefix ## suffix(); \ - __compiletime_error_fallback(__cond); \ } while (0) #else # define __compiletime_assert(condition, msg, prefix, suffix) do { } while (0) @@ -508,52 +353,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s compiletime_assert(__native_word(t), \ "Need native word sized stores/loads for atomicity.") -/* - * Prevent the compiler from merging or refetching accesses. The compiler - * is also forbidden from reordering successive instances of ACCESS_ONCE(), - * but only when the compiler is aware of some particular ordering. One way - * to make the compiler aware of ordering is to put the two invocations of - * ACCESS_ONCE() in different C statements. - * - * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE - * on a union member will work as long as the size of the member matches the - * size of the union and the size is smaller than word size. - * - * The major use cases of ACCESS_ONCE used to be (1) Mediating communication - * between process-level code and irq/NMI handlers, all running on the same CPU, - * and (2) Ensuring that the compiler does not fold, spindle, or otherwise - * mutilate accesses that either do not require ordering or that interact - * with an explicit memory barrier or atomic instruction that provides the - * required ordering. - * - * If possible use READ_ONCE()/WRITE_ONCE() instead. - */ -#define __ACCESS_ONCE(x) ({ \ - __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \ - (volatile typeof(x) *)&(x); }) -#define ACCESS_ONCE(x) (*__ACCESS_ONCE(x)) - -/** - * lockless_dereference() - safely load a pointer for later dereference - * @p: The pointer to load - * - * Similar to rcu_dereference(), but for situations where the pointed-to - * object's lifetime is managed by something other than RCU. That - * "something other" might be reference counting or simple immortality. - */ -#define lockless_dereference(p) \ -({ \ - typeof(p) _________p1 = READ_ONCE(p); \ - smp_read_barrier_depends(); /* Dependency order vs. p above. */ \ - (_________p1); \ -}) +/* &a[0] degrades to a pointer: a different type from an array */ +#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) -/* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ -#ifdef CONFIG_KPROBES -# define __kprobes __attribute__((__section__(".kprobes.text"))) -# define nokprobe_inline __always_inline -#else -# define __kprobes -# define nokprobe_inline inline -#endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h new file mode 100644 index 0000000000..6b318efd8a --- /dev/null +++ b/include/linux/compiler_attributes.h @@ -0,0 +1,256 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_ATTRIBUTES_H +#define __LINUX_COMPILER_ATTRIBUTES_H + +/* + * The attributes in this file are unconditionally defined and they directly + * map to compiler attribute(s), unless one of the compilers does not support + * the attribute. In that case, __has_attribute is used to check for support + * and the reason is stated in its comment ("Optional: ..."). + * + * Any other "attributes" (i.e. those that depend on a configuration option, + * on a compiler, on an architecture, on plugins, on other attributes...) + * should be defined elsewhere (e.g. compiler_types.h or compiler-*.h). + * The intention is to keep this file as simple as possible, as well as + * compiler- and version-agnostic (e.g. avoiding GCC_VERSION checks). + * + * This file is meant to be sorted (by actual attribute name, + * not by #define identifier). Use the __attribute__((__name__)) syntax + * (i.e. with underscores) to avoid future collisions with other macros. + * Provide links to the documentation of each supported compiler, if it exists. + */ + +/* + * __has_attribute is supported on gcc >= 5, clang >= 2.9 and icc >= 17. + * In the meantime, to support 4.6 <= gcc < 5, we implement __has_attribute + * by hand. + * + * sparse does not support __has_attribute (yet) and defines __GNUC_MINOR__ + * depending on the compiler used to build it; however, these attributes have + * no semantic effects for sparse, so it does not matter. Also note that, + * in order to avoid sparse's warnings, even the unsupported ones must be + * defined to 0. + */ +#ifndef __has_attribute +# define __has_attribute(x) __GCC4_has_attribute_##x +# define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) +# define __GCC4_has_attribute___copy__ 0 +# define __GCC4_has_attribute___designated_init__ 0 +# define __GCC4_has_attribute___externally_visible__ 1 +# define __GCC4_has_attribute___noclone__ 1 +# define __GCC4_has_attribute___nonstring__ 0 +# define __GCC4_has_attribute___no_sanitize_address__ (__GNUC_MINOR__ >= 8) +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-alias-function-attribute + */ +#define __alias(symbol) __attribute__((__alias__(#symbol))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-aligned-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-aligned-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-aligned-variable-attribute + */ +#define __aligned(x) __attribute__((__aligned__(x))) +#define __aligned_largest __attribute__((__aligned__)) + +/* + * Note: users of __always_inline currently do not write "inline" themselves, + * which seems to be required by gcc to apply the attribute according + * to its docs (and also "warning: always_inline function might not be + * inlinable [-Wattributes]" is emitted). + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-always_005finline-function-attribute + * clang: mentioned + */ +#define __always_inline inline __attribute__((__always_inline__)) + +/* + * The second argument is optional (default 0), so we use a variadic macro + * to make the shorthand. + * + * Beware: Do not apply this to functions which may return + * ERR_PTRs. Also, it is probably unwise to apply it to functions + * returning extra information in the low bits (but in that case the + * compiler should see some alignment anyway, when the return value is + * massaged by 'flags = ptr & 3; ptr &= ~3;'). + * + * Optional: only supported since gcc >= 4.9 + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-assume_005faligned-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#assume-aligned + */ +#if __has_attribute(__assume_aligned__) +# define __assume_aligned(a, ...) __attribute__((__assume_aligned__(a, ## __VA_ARGS__))) +#else +# define __assume_aligned(a, ...) +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-cold-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-cold-label-attribute + */ +#define __cold __attribute__((__cold__)) + +/* + * Note the long name. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-const-function-attribute + */ +#define __attribute_const__ __attribute__((__const__)) + +/* + * Optional: only supported since gcc >= 9 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute + */ +#if __has_attribute(__copy__) +# define __copy(symbol) __attribute__((__copy__(symbol))) +#else +# define __copy(symbol) +#endif + +/* + * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' + * attribute warnings entirely and for good") for more information. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-deprecated-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-deprecated-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-deprecated-variable-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Enumerator-Attributes.html#index-deprecated-enumerator-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#deprecated + */ +#define __deprecated + +/* + * Optional: only supported since gcc >= 5.1 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-designated_005finit-type-attribute + */ +#if __has_attribute(__designated_init__) +# define __designated_init __attribute__((__designated_init__)) +#else +# define __designated_init +#endif + +/* + * Optional: not supported by clang + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-externally_005fvisible-function-attribute + */ +#if __has_attribute(__externally_visible__) +# define __visible __attribute__((__externally_visible__)) +#else +# define __visible +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-format-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#format + */ +#define __printf(a, b) __attribute__((__format__(printf, a, b))) +#define __scanf(a, b) __attribute__((__format__(scanf, a, b))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-gnu_005finline-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#gnu-inline + */ +#define __gnu_inline __attribute__((__gnu_inline__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-malloc-function-attribute + */ +#define __malloc __attribute__((__malloc__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-mode-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-mode-variable-attribute + */ +#define __mode(x) __attribute__((__mode__(x))) + +/* + * Optional: not supported by clang + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noclone-function-attribute + */ +#if __has_attribute(__noclone__) +# define __noclone __attribute__((__noclone__)) +#else +# define __noclone +#endif + +/* + * Note the missing underscores. + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noinline-function-attribute + * clang: mentioned + */ +#define noinline __attribute__((__noinline__)) + +/* + * Optional: only supported since gcc >= 8 + * Optional: not supported by clang + * Optional: not supported by icc + * + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-nonstring-variable-attribute + */ +#if __has_attribute(__nonstring__) +# define __nonstring __attribute__((__nonstring__)) +#else +# define __nonstring +#endif + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-noreturn-function-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#noreturn + * clang: https://clang.llvm.org/docs/AttributeReference.html#id1 + */ +#define __noreturn __attribute__((__noreturn__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-packed-type-attribute + * clang: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-packed-variable-attribute + */ +#define __packed __attribute__((__packed__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-pure-function-attribute + */ +#define __pure __attribute__((__pure__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-section-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-section-variable-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#section-declspec-allocate + */ +#define __section(S) __attribute__((__section__(#S))) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-unused-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Type-Attributes.html#index-unused-type-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-unused-variable-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Label-Attributes.html#index-unused-label-attribute + * clang: https://clang.llvm.org/docs/AttributeReference.html#maybe-unused-unused + */ +#define __always_unused __attribute__((__unused__)) +#define __maybe_unused __attribute__((__unused__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-used-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-used-variable-attribute + */ +#define __used __attribute__((__used__)) + +/* + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute + * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute + */ +#define __weak __attribute__((__weak__)) + +#endif /* __LINUX_COMPILER_ATTRIBUTES_H */ diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h new file mode 100644 index 0000000000..72393a8c1a --- /dev/null +++ b/include/linux/compiler_types.h @@ -0,0 +1,246 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_COMPILER_TYPES_H +#define __LINUX_COMPILER_TYPES_H + +#ifndef __ASSEMBLY__ + +#ifdef __CHECKER__ +# define __user __attribute__((noderef, address_space(1))) +# define __kernel __attribute__((address_space(0))) +# define __safe __attribute__((safe)) +# define __force __attribute__((force)) +# define __nocast __attribute__((nocast)) +# define __iomem __attribute__((noderef, address_space(2))) +# define __must_hold(x) __attribute__((context(x,1,1))) +# define __acquires(x) __attribute__((context(x,0,1))) +# define __releases(x) __attribute__((context(x,1,0))) +# define __acquire(x) __context__(x,1) +# define __release(x) __context__(x,-1) +# define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) +# define __percpu __attribute__((noderef, address_space(3))) +# define __rcu __attribute__((noderef, address_space(4))) +# define __private __attribute__((noderef)) +extern void __chk_user_ptr(const volatile void __user *); +extern void __chk_io_ptr(const volatile void __iomem *); +# define ACCESS_PRIVATE(p, member) (*((typeof((p)->member) __force *) &(p)->member)) +#else /* __CHECKER__ */ +# ifdef STRUCTLEAK_PLUGIN +# define __user __attribute__((user)) +# else +# define __user +# endif +# define __kernel +# define __safe +# define __force +# define __nocast +# define __iomem +# define __chk_user_ptr(x) (void)0 +# define __chk_io_ptr(x) (void)0 +# define __builtin_warning(x, y...) (1) +# define __must_hold(x) +# define __acquires(x) +# define __releases(x) +# define __acquire(x) (void)0 +# define __release(x) (void)0 +# define __cond_lock(x,c) (c) +# define __percpu +# define __rcu +# define __private +# define ACCESS_PRIVATE(p, member) ((p)->member) +#endif /* __CHECKER__ */ + +/* Indirect macros required for expanded argument pasting, eg. __LINE__. */ +#define ___PASTE(a,b) a##b +#define __PASTE(a,b) ___PASTE(a,b) + +#ifdef __KERNEL__ + +/* Attributes */ +#include <linux/compiler_attributes.h> + +/* Compiler specific macros. */ +#ifdef __clang__ +#include <linux/compiler-clang.h> +#elif defined(__INTEL_COMPILER) +#include <linux/compiler-intel.h> +#elif defined(__GNUC__) +/* The above compilers also define __GNUC__, so order is important here. */ +#include <linux/compiler-gcc.h> +#else +#error "Unknown compiler" +#endif + +/* + * Some architectures need to provide custom definitions of macros provided + * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that + * conditionally rather than using an asm-generic wrapper in order to avoid + * build failures if any C compilation, which will include this file via an + * -include argument in c_flags, occurs prior to the asm-generic wrappers being + * generated. + */ +#ifdef CONFIG_HAVE_ARCH_COMPILER_H +#include <asm/compiler.h> +#endif + +struct ftrace_branch_data { + const char *func; + const char *file; + unsigned line; + union { + struct { + unsigned long correct; + unsigned long incorrect; + }; + struct { + unsigned long miss; + unsigned long hit; + }; + unsigned long miss_hit[2]; + }; +}; + +struct ftrace_likely_data { + struct ftrace_branch_data data; + unsigned long constant; +}; + +#ifdef CONFIG_ENABLE_MUST_CHECK +#define __must_check __attribute__((__warn_unused_result__)) +#else +#define __must_check +#endif + +#if defined(CC_USING_HOTPATCH) +#define notrace __attribute__((hotpatch(0, 0))) +#elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY) +#define notrace __attribute__((patchable_function_entry(0, 0))) +#else +#define notrace __attribute__((__no_instrument_function__)) +#endif + +/* + * it doesn't make sense on ARM (currently the only user of __naked) + * to trace naked functions because then mcount is called without + * stack and frame pointer being set up and there is no chance to + * restore the lr register to the value before mcount was called. + */ +#define __naked __attribute__((__naked__)) notrace + +#define __compiler_offsetof(a, b) __builtin_offsetof(a, b) + +/* + * Force always-inline if the user requests it so via the .config. + * Prefer gnu_inline, so that extern inline functions do not emit an + * externally visible function. This makes extern inline behave as per gnu89 + * semantics rather than c99. This prevents multiple symbol definition errors + * of extern inline functions at link time. + * A lot of inline functions can cause havoc with function tracing. + * Do not use __always_inline here, since currently it expands to inline again + * (which would break users of __always_inline). + */ +#if !defined(CONFIG_OPTIMIZE_INLINING) +#define inline inline __attribute__((__always_inline__)) __gnu_inline \ + __inline_maybe_unused notrace +#else +#define inline inline __gnu_inline \ + __inline_maybe_unused notrace +#endif + +/* + * gcc provides both __inline__ and __inline as alternate spellings of + * the inline keyword, though the latter is undocumented. New kernel + * code should only use the inline spelling, but some existing code + * uses __inline__. Since we #define inline above, to ensure + * __inline__ has the same semantics, we need this #define. + * + * However, the spelling __inline is strictly reserved for referring + * to the bare keyword. + */ +#define __inline__ inline + +/* + * GCC does not warn about unused static inline functions for -Wunused-function. + * Suppress the warning in clang as well by using __maybe_unused, but enable it + * for W=1 build. This will allow clang to find unused functions. Remove the + * __inline_maybe_unused entirely after fixing most of -Wunused-function warnings. + */ +#ifdef KBUILD_EXTRA_WARN1 +#define __inline_maybe_unused +#else +#define __inline_maybe_unused __maybe_unused +#endif + +/* + * Rather then using noinline to prevent stack consumption, use + * noinline_for_stack instead. For documentation reasons. + */ +#define noinline_for_stack noinline + +#endif /* __KERNEL__ */ + +#endif /* __ASSEMBLY__ */ + +/* + * The below symbols may be defined for one or more, but not ALL, of the above + * compilers. We don't consider that to be an error, so set them to nothing. + * For example, some of them are for compiler specific plugins. + */ +#ifndef __latent_entropy +# define __latent_entropy +#endif + +#ifndef __randomize_layout +# define __randomize_layout __designated_init +#endif + +#ifndef __no_randomize_layout +# define __no_randomize_layout +#endif + +#ifndef randomized_struct_fields_start +# define randomized_struct_fields_start +# define randomized_struct_fields_end +#endif + +#ifndef asm_volatile_goto +#define asm_volatile_goto(x...) asm goto(x) +#endif + +#ifdef CONFIG_CC_HAS_ASM_INLINE +#define asm_inline asm __inline +#else +#define asm_inline asm +#endif + +#ifndef __no_fgcse +# define __no_fgcse +#endif + +/* Are two types/vars the same type (ignoring qualifiers)? */ +#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) + +/* Is this type a native word size -- useful for atomic operations */ +#define __native_word(t) \ + (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \ + sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) + +/* Helpers for emitting diagnostics in pragmas. */ +#ifndef __diag +#define __diag(string) +#endif + +#ifndef __diag_GCC +#define __diag_GCC(version, severity, string) +#endif + +#define __diag_push() __diag(push) +#define __diag_pop() __diag(pop) + +#define __diag_ignore(compiler, version, option, comment) \ + __diag_ ## compiler(version, ignore, option) +#define __diag_warn(compiler, version, option, comment) \ + __diag_ ## compiler(version, warn, option) +#define __diag_error(compiler, version, option, comment) \ + __diag_ ## compiler(version, error, option) + +#endif /* __LINUX_COMPILER_TYPES_H */ diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h new file mode 100644 index 0000000000..ac6aba632f --- /dev/null +++ b/include/linux/kasan-checks.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_KASAN_CHECKS_H +#define _LINUX_KASAN_CHECKS_H + +#include <linux/types.h> + +/* + * __kasan_check_*: Always available when KASAN is enabled. This may be used + * even in compilation units that selectively disable KASAN, but must use KASAN + * to validate access to an address. Never use these in header files! + */ +#ifdef CONFIG_KASAN +bool __kasan_check_read(const volatile void *p, unsigned int size); +bool __kasan_check_write(const volatile void *p, unsigned int size); +#else +static inline bool __kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool __kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} +#endif + +/* + * kasan_check_*: Only available when the particular compilation unit has KASAN + * instrumentation enabled. May be used in header files. + */ +#ifdef __SANITIZE_ADDRESS__ +#define kasan_check_read __kasan_check_read +#define kasan_check_write __kasan_check_write +#else +static inline bool kasan_check_read(const volatile void *p, unsigned int size) +{ + return true; +} +static inline bool kasan_check_write(const volatile void *p, unsigned int size) +{ + return true; +} +#endif + +#endif diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 425f2f53f7..20ae30d0e1 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -22,8 +22,6 @@ #include <linux/types.h> #include <linux/string.h> -#define noinline __attribute__((noinline)) - /* we use this so that we can do without the ctype library */ #define is_digit(c) ((c) >= '0' && (c) <= '9') |