diff options
Diffstat (limited to 'arch')
41 files changed, 109 insertions, 99 deletions
diff --git a/arch/arc/kernel/entry.S b/arch/arc/kernel/entry.S index 72be01270e24..1f6bb184a44d 100644 --- a/arch/arc/kernel/entry.S +++ b/arch/arc/kernel/entry.S @@ -337,11 +337,11 @@ resume_user_mode_begin: resume_kernel_mode: ; Disable Interrupts from this point on - ; CONFIG_PREEMPT: This is a must for preempt_schedule_irq() - ; !CONFIG_PREEMPT: To ensure restore_regs is intr safe + ; CONFIG_PREEMPTION: This is a must for preempt_schedule_irq() + ; !CONFIG_PREEMPTION: To ensure restore_regs is intr safe IRQ_DISABLE r9 -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ; Can't preempt if preemption disabled GET_CURR_THR_INFO_FROM_SP r10 diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index d3e937dcee4d..007d8fea7157 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -10,7 +10,7 @@ * to ensure that the maintenance completes in case we migrate to another * CPU. */ -#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) +#if defined(CONFIG_PREEMPTION) && defined(CONFIG_SMP) && defined(CONFIG_CPU_V7) #define __complete_pending_tlbi() dsb(ish) #else #define __complete_pending_tlbi() diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 858d4e541532..77f54830554c 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -211,7 +211,7 @@ __irq_svc: svc_entry irq_handler -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr r8, [tsk, #TI_PREEMPT] @ get preempt count ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 @@ -226,7 +226,7 @@ ENDPROC(__irq_svc) .ltorg -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION svc_preempt: mov r8, lr 1: bl preempt_schedule_irq @ irq en/disable is done inside diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index c053abd1fb53..abb7dd7e656f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -248,6 +248,8 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 0ee8fc4b4672..dc8f152f3556 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -135,13 +135,13 @@ flush_levels: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr isb @ isb to sych the new cssr&csidr mrc p15, 1, r1, c0, c0, 0 @ read the new csidr -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index a0035c426ce6..1bc3a0a50753 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -183,13 +183,13 @@ flush_levels: and r1, r1, #7 @ mask of the bits for current cache only cmp r1, #2 @ see what cache we have at this level blt skip @ skip if no cache, or just i-cache -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic #endif write_csselr r10, r1 @ set current cache level isb @ isb to sych the new cssr&csidr read_ccsidr r1 @ read the new csidr -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION restore_irqs_notrace r9 #endif and r2, r1, #7 @ extract the length of the cache lines diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index b1b4476ddb83..3ab05857ca8f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -34,32 +34,32 @@ config ARM64 select ARCH_HAS_TEARDOWN_DMA_OPS if IOMMU_SUPPORT select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_INLINE_READ_LOCK if !PREEMPT - select ARCH_INLINE_READ_LOCK_BH if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_READ_UNLOCK if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_WRITE_LOCK if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPT - select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPT - select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPT + select ARCH_INLINE_READ_LOCK if !PREEMPTION + select ARCH_INLINE_READ_LOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_TRYLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_LOCK_IRQSAVE if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION + select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION select ARCH_KEEP_MEMBLOCK select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_QUEUED_RWLOCKS diff --git a/arch/arm64/crypto/sha256-glue.c b/arch/arm64/crypto/sha256-glue.c index e273faca924f..999da59f03a9 100644 --- a/arch/arm64/crypto/sha256-glue.c +++ b/arch/arm64/crypto/sha256-glue.c @@ -97,7 +97,7 @@ static int sha256_update_neon(struct shash_desc *desc, const u8 *data, * input when running on a preemptible kernel, but process the * data block by block instead. */ - if (IS_ENABLED(CONFIG_PREEMPT) && + if (IS_ENABLED(CONFIG_PREEMPTION) && chunk + sctx->count % SHA256_BLOCK_SIZE > SHA256_BLOCK_SIZE) chunk = SHA256_BLOCK_SIZE - sctx->count % SHA256_BLOCK_SIZE; diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index b8cf7c85ffa2..2cc0dd8bd9f7 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -699,8 +699,8 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * where <label> is optional, and marks the point where execution will resume * after a yield has been performed. If omitted, execution resumes right after * the endif_yield_neon invocation. Note that the entire sequence, including - * the provided patchup code, will be omitted from the image if CONFIG_PREEMPT - * is not defined. + * the provided patchup code, will be omitted from the image if + * CONFIG_PREEMPTION is not defined. * * As a convenience, in the case where no patchup code is required, the above * sequence may be abbreviated to @@ -728,7 +728,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU .endm .macro if_will_cond_yield_neon -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION get_current_task x0 ldr x0, [x0, #TSK_TI_PREEMPT] sub x0, x0, #PREEMPT_DISABLE_OFFSET diff --git a/arch/arm64/include/asm/preempt.h b/arch/arm64/include/asm/preempt.h index d49951647014..80e946b2abee 100644 --- a/arch/arm64/include/asm/preempt.h +++ b/arch/arm64/include/asm/preempt.h @@ -79,11 +79,11 @@ static inline bool should_resched(int preempt_offset) return pc == preempt_offset; } -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 7c6a0a41676f..099497b4bc09 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -605,7 +605,7 @@ el1_irq: irq_handler -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count alternative_if ARM64_HAS_IRQ_PRIO_MASKING /* diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 73caf35c2262..cf402be5c573 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -144,9 +144,12 @@ void show_stack(struct task_struct *tsk, unsigned long *sp) #ifdef CONFIG_PREEMPT #define S_PREEMPT " PREEMPT" +#elif defined(CONFIG_PREEMPT_RT) +#define S_PREEMPT " PREEMPT_RT" #else #define S_PREEMPT "" #endif + #define S_SMP " SMP" static int __die(const char *str, int err, struct pt_regs *regs) diff --git a/arch/c6x/kernel/entry.S b/arch/c6x/kernel/entry.S index 4332a10aec6c..fb154d19625b 100644 --- a/arch/c6x/kernel/entry.S +++ b/arch/c6x/kernel/entry.S @@ -18,7 +18,7 @@ #define DP B14 #define SP B15 -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #endif @@ -287,7 +287,7 @@ work_notifysig: ;; is a little bit different ;; ENTRY(ret_from_exception) -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION MASK_INT B2 #endif @@ -557,7 +557,7 @@ ENDPROC(_nmi_handler) ;; ;; Jump to schedule() then return to ret_from_isr ;; -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: GET_THREAD_INFO A12 LDW .D1T1 *+A12(THREAD_INFO_PREEMPT_COUNT),A1 @@ -582,7 +582,7 @@ preempt_schedule: B .S2 preempt_schedule_irq #endif ADDKPC .S2 preempt_schedule,B3,4 -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ ENTRY(enable_exception) DINT diff --git a/arch/csky/kernel/entry.S b/arch/csky/kernel/entry.S index a7a5b67df898..007706328000 100644 --- a/arch/csky/kernel/entry.S +++ b/arch/csky/kernel/entry.S @@ -277,7 +277,7 @@ ENTRY(csky_irq) zero_fp psrset ee -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION mov r9, sp /* Get current stack pointer */ bmaski r10, THREAD_SHIFT andn r9, r10 /* Get thread_info */ @@ -294,7 +294,7 @@ ENTRY(csky_irq) mov a0, sp jbsr csky_do_IRQ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION subi r12, 1 stw r12, (r9, TINFO_PREEMPT) cmpnei r12, 0 diff --git a/arch/h8300/kernel/entry.S b/arch/h8300/kernel/entry.S index 4ade5f8299ba..c6e289b5f1f2 100644 --- a/arch/h8300/kernel/entry.S +++ b/arch/h8300/kernel/entry.S @@ -284,12 +284,12 @@ badsys: mov.l er0,@(LER0:16,sp) bra resume_userspace -#if !defined(CONFIG_PREEMPT) +#if !defined(CONFIG_PREEMPTION) #define resume_kernel restore_all #endif ret_from_exception: -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) orc #0xc0,ccr #endif ret_from_interrupt: @@ -319,7 +319,7 @@ work_resched: restore_all: RESTORE_ALL /* Does RTE */ -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) resume_kernel: mov.l @(TI_PRE_COUNT:16,er4),er0 bne restore_all:8 diff --git a/arch/hexagon/kernel/vm_entry.S b/arch/hexagon/kernel/vm_entry.S index 12242c27e2df..65a1ea0eed2f 100644 --- a/arch/hexagon/kernel/vm_entry.S +++ b/arch/hexagon/kernel/vm_entry.S @@ -265,12 +265,12 @@ event_dispatch: * should be in the designated register (usually R19) * * If we were in kernel mode, we don't need to check scheduler - * or signals if CONFIG_PREEMPT is not set. If set, then it has + * or signals if CONFIG_PREEMPTION is not set. If set, then it has * to jump to a need_resched kind of block. - * BTW, CONFIG_PREEMPT is not supported yet. + * BTW, CONFIG_PREEMPTION is not supported yet. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION R0 = #VM_INT_DISABLE trap1(#HVM_TRAP1_VMSETIE) #endif diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index a9992be5718b..2ac926331500 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -670,12 +670,12 @@ GLOBAL_ENTRY(ia64_leave_syscall) * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. - * With CONFIG_PREEMPT, we also check for extra work when the preempt_count + * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at ia64_work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r2, r18) // disable interrupts cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 @@ -685,7 +685,7 @@ GLOBAL_ENTRY(ia64_leave_syscall) (pUStk) mov r21=0 // r21 <- 0 ;; cmp.eq p6,p0=r21,r0 // p6 <- pUStk || (preempt_count == 0) -#else /* !CONFIG_PREEMPT */ +#else /* !CONFIG_PREEMPTION */ RSM_PSR_I(pUStk, r2, r18) cmp.eq pLvSys,p0=r0,r0 // pLvSys=1: leave from syscall (pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk @@ -814,12 +814,12 @@ GLOBAL_ENTRY(ia64_leave_kernel) * * p6 controls whether current_thread_info()->flags needs to be check for * extra work. We always check for extra work when returning to user-level. - * With CONFIG_PREEMPT, we also check for extra work when the preempt_count + * With CONFIG_PREEMPTION, we also check for extra work when the preempt_count * is 0. After extra work processing has been completed, execution * resumes at .work_processed_syscall with p6 set to 1 if the extra-work-check * needs to be redone. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION RSM_PSR_I(p0, r17, r31) // disable interrupts cmp.eq p0,pLvSys=r0,r0 // pLvSys=0: leave from kernel (pKStk) adds r20=TI_PRE_COUNT+IA64_TASK_SIZE,r13 @@ -1120,7 +1120,7 @@ skip_rbs_switch: /* * On entry: - * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPT) + * r20 = ¤t->thread_info->pre_count (if CONFIG_PREEMPTION) * r31 = current->thread_info->flags * On exit: * p6 = TRUE if work-pending-check needs to be redone diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index b8356edbde65..a6d6a0556f08 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -841,7 +841,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args) return 1; } -#if !defined(CONFIG_PREEMPT) +#if !defined(CONFIG_PREEMPTION) if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { /* Boost up -- we can execute copied instructions directly */ ia64_psr(regs)->ri = p->ainsn.slot; diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S index de7083bd1d24..f6ded356394a 100644 --- a/arch/microblaze/kernel/entry.S +++ b/arch/microblaze/kernel/entry.S @@ -728,7 +728,7 @@ no_intr_resched: bri 6f; /* MS: Return to kernel state. */ 2: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get preempt_count from thread info */ lwi r5, r11, TI_PREEMPT_COUNT; diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h index feb069cbf44e..655f40ddb6d1 100644 --- a/arch/mips/include/asm/asmmacro.h +++ b/arch/mips/include/asm/asmmacro.h @@ -63,7 +63,7 @@ .endm .macro local_irq_disable reg=t0 -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lw \reg, TI_PRE_COUNT($28) addi \reg, \reg, 1 sw \reg, TI_PRE_COUNT($28) @@ -73,7 +73,7 @@ xori \reg, \reg, 1 mtc0 \reg, CP0_STATUS irq_disable_hazard -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION lw \reg, TI_PRE_COUNT($28) addi \reg, \reg, -1 sw \reg, TI_PRE_COUNT($28) diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index 5469d43b6966..4849a48afc0f 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S @@ -19,7 +19,7 @@ #include <asm/thread_info.h> #include <asm/war.h> -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION #define resume_kernel restore_all #else #define __ret_from_irq ret_from_exception @@ -27,7 +27,7 @@ .text .align 5 -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION FEXPORT(ret_from_exception) local_irq_disable # preempt stop b __ret_from_irq @@ -53,7 +53,7 @@ resume_userspace: bnez t0, work_pending j restore_all -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: local_irq_disable lw t0, TI_PRE_COUNT($28) diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig index 12c06a833b7c..e30298e99e1b 100644 --- a/arch/nds32/Kconfig +++ b/arch/nds32/Kconfig @@ -62,7 +62,7 @@ config GENERIC_HWEIGHT config GENERIC_LOCKBREAK def_bool y - depends on PREEMPT + depends on PREEMPTION config TRACE_IRQFLAGS_SUPPORT def_bool y diff --git a/arch/nds32/kernel/ex-exit.S b/arch/nds32/kernel/ex-exit.S index 1df02a793364..6a2966c2d8c8 100644 --- a/arch/nds32/kernel/ex-exit.S +++ b/arch/nds32/kernel/ex-exit.S @@ -72,7 +72,7 @@ restore_user_regs_last .endm -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION .macro preempt_stop .endm #else @@ -158,7 +158,7 @@ no_work_pending: /* * preemptive kernel */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION resume_kernel: gie_disable lwi $t0, [tsk+#TSK_TI_PREEMPT] diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S index 1e515ccd698e..3d8d1d0bcb64 100644 --- a/arch/nios2/kernel/entry.S +++ b/arch/nios2/kernel/entry.S @@ -365,7 +365,7 @@ ENTRY(ret_from_interrupt) ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */ TSTBNZ r1, r1, ESTATUS_EU, Luser_return -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION GET_THREAD_INFO r1 ldw r4, TI_PREEMPT_COUNT(r1) bne r4, r0, restore_all diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index b16237c95ea3..593e4014cef8 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -81,7 +81,7 @@ config STACK_GROWSUP config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config ARCH_HAS_ILOG2_U32 bool diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index b96d74496977..9a03e29c8733 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -940,14 +940,14 @@ intr_restore: rfi nop -#ifndef CONFIG_PREEMPT +#ifndef CONFIG_PREEMPTION # define intr_do_preempt intr_restore -#endif /* !CONFIG_PREEMPT */ +#endif /* !CONFIG_PREEMPTION */ .import schedule,code intr_do_resched: /* Only call schedule on return to userspace. If we're returning - * to kernel space, we may schedule if CONFIG_PREEMPT, otherwise + * to kernel space, we may schedule if CONFIG_PREEMPTION, otherwise * we jump back to intr_restore. */ LDREG PT_IASQ0(%r16), %r20 @@ -979,7 +979,7 @@ intr_do_resched: * and preempt_count is 0. otherwise, we continue on * our merry way back to the current running task. */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION .import preempt_schedule_irq,code intr_do_preempt: rsm PSW_SM_I, %r0 /* disable interrupts */ @@ -999,7 +999,7 @@ intr_do_preempt: nop b,n intr_restore /* ssm PSW_SM_I done by intr_restore */ -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ /* * External interrupts. diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 1ec34e16ed65..6987b0832e5f 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -106,7 +106,7 @@ config LOCKDEP_SUPPORT config GENERIC_LOCKBREAK bool default y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config GENERIC_HWEIGHT bool diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d60908ea37fb..e1a4c39b83b8 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -897,7 +897,7 @@ resume_kernel: bne- 0b 1: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* check current_thread_info->preempt_count */ lwz r0,TI_PREEMPT(r2) cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ @@ -921,7 +921,7 @@ resume_kernel: */ bl trace_hardirqs_on #endif -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ restore_kuap: kuap_restore r1, r2, r9, r10, r0 diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 3fd3ef352e3f..a9a1d3cdb523 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -846,7 +846,7 @@ resume_kernel: bne- 0b 1: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION /* Check if we need to preempt */ andi. r0,r4,_TIF_NEED_RESCHED beq+ restore @@ -877,7 +877,7 @@ resume_kernel: li r10,MSR_RI mtmsrd r10,1 /* Update machine state */ #endif /* CONFIG_PPC_BOOK3E */ -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ .globl fast_exc_return_irq fast_exc_return_irq: diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index a1349ca64669..dd61bb247ce7 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -155,7 +155,7 @@ _save_context: REG_L x2, PT_SP(sp) .endm -#if !IS_ENABLED(CONFIG_PREEMPT) +#if !IS_ENABLED(CONFIG_PREEMPTION) .set resume_kernel, restore_all #endif @@ -304,7 +304,7 @@ restore_all: sret #endif -#if IS_ENABLED(CONFIG_PREEMPT) +#if IS_ENABLED(CONFIG_PREEMPTION) resume_kernel: REG_L s0, TASK_TI_PREEMPT_COUNT(tp) bnez s0, restore_all diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index bc88841d335d..6d67e9cc8888 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -30,7 +30,7 @@ config GENERIC_BUG_RELATIVE_POINTERS def_bool y config GENERIC_LOCKBREAK - def_bool y if PREEMPT + def_bool y if PREEMPTTION config PGSTE def_bool y if KVM diff --git a/arch/s390/include/asm/preempt.h b/arch/s390/include/asm/preempt.h index b5ea9e14c017..6ede29907fbf 100644 --- a/arch/s390/include/asm/preempt.h +++ b/arch/s390/include/asm/preempt.h @@ -130,11 +130,11 @@ static inline bool should_resched(int preempt_offset) #endif /* CONFIG_HAVE_MARCH_Z196_FEATURES */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION extern asmlinkage void preempt_schedule(void); #define __preempt_schedule() preempt_schedule() extern asmlinkage void preempt_schedule_notrace(void); #define __preempt_schedule_notrace() preempt_schedule_notrace() -#endif /* CONFIG_PREEMPT */ +#endif /* CONFIG_PREEMPTION */ #endif /* __ASM_PREEMPT_H */ diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c index d306fe04489a..2c122d8bab93 100644 --- a/arch/s390/kernel/dumpstack.c +++ b/arch/s390/kernel/dumpstack.c @@ -195,6 +195,8 @@ void die(struct pt_regs *regs, const char *str) regs->int_code >> 17, ++die_counter); #ifdef CONFIG_PREEMPT pr_cont("PREEMPT "); +#elif defined(CONFIG_PREEMPT_RT) + pr_cont("PREEMPT_RT "); #endif pr_cont("SMP "); if (debug_pagealloc_enabled()) diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 270d1d145761..9205add8481d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -790,7 +790,7 @@ ENTRY(io_int_handler) .Lio_work: tm __PT_PSW+1(%r11),0x01 # returning to user ? jo .Lio_work_user # yes -> do resched & signal -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION # check for preemptive scheduling icm %r0,15,__LC_PREEMPT_COUNT jnz .Lio_restore # preemption is disabled diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index f356ee674d89..9ece111b0254 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -108,7 +108,7 @@ config GENERIC_CALIBRATE_DELAY config GENERIC_LOCKBREAK def_bool y - depends on SMP && PREEMPT + depends on SMP && PREEMPTION config ARCH_SUSPEND_POSSIBLE def_bool n diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S index de68ffdfffbf..81c8b64b977f 100644 --- a/arch/sh/kernel/cpu/sh5/entry.S +++ b/arch/sh/kernel/cpu/sh5/entry.S @@ -86,7 +86,7 @@ andi r6, ~0xf0, r6; \ putcon r6, SR; -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION # define preempt_stop() CLI() #else # define preempt_stop() @@ -884,7 +884,7 @@ ret_from_exception: /* Check softirqs */ -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION pta ret_from_syscall, tr0 blink tr0, ZERO diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index d31f66e82ce5..956a7a03b0c8 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -41,7 +41,7 @@ */ #include <asm/dwarf.h> -#if defined(CONFIG_PREEMPT) +#if defined(CONFIG_PREEMPTION) # define preempt_stop() cli ; TRACE_IRQS_OFF #else # define preempt_stop() @@ -84,7 +84,7 @@ ENTRY(ret_from_irq) get_current_thread_info r8, r0 bt resume_kernel ! Yes, it's from kernel, go back soon -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION bra resume_userspace nop ENTRY(resume_kernel) diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eb24cb1afc11..e8c3ea01c12f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -277,7 +277,7 @@ config US3_MC config GENERIC_LOCKBREAK bool default y - depends on SPARC64 && SMP && PREEMPT + depends on SPARC64 && SMP && PREEMPTION config NUMA bool "NUMA support" diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 29aa34f11720..c5fd4b450d9b 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -310,7 +310,7 @@ kern_rtt_restore: retry to_kernel: -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION ldsw [%g6 + TI_PRE_COUNT], %l5 brnz %l5, kern_fpucheck ldx [%g6 + TI_FLAGS], %l5 diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index be897803834a..2c9e48566e48 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -520,7 +520,7 @@ common_exception_return: call4 schedule # void schedule (void) j 1b -#ifdef CONFIG_PREEMPT +#ifdef CONFIG_PREEMPTION 6: _bbci.l a4, TIF_NEED_RESCHED, 4f diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index 87bd68dd7687..0976e27b8d5d 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -519,12 +519,15 @@ DEFINE_SPINLOCK(die_lock); void die(const char * str, struct pt_regs * regs, long err) { static int die_counter; + const char *pr = ""; + + if (IS_ENABLED(CONFIG_PREEMPTION)) + pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT"; console_verbose(); spin_lock_irq(&die_lock); - pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, - IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT" : ""); + pr_info("%s: sig: %ld [#%d]%s\n", str, err, ++die_counter, pr); show_regs(regs); if (!user_mode(regs)) show_stack(NULL, (unsigned long*)regs->areg[1]); |