diff options
380 files changed, 10986 insertions, 2341 deletions
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 3a3b30ac2a75..9e0745cafbd8 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt @@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (or F13) - <command key>, On other - If you know of the key combos for other architectures, please let me know so I can add them to this section. -On all - write a character to /proc/sysrq-trigger. e.g.: - +On all - write a character to /proc/sysrq-trigger, e.g.: echo t > /proc/sysrq-trigger +On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. + echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq + Send an ICMP echo request with this pattern plus the particular + SysRq command key. Example: + # ping -c1 -s57 -p0102030468 + will trigger the SysRq-H (help) command. + + * What are the 'command' keys? ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 'b' - Will immediately reboot the system without syncing or unmounting diff --git a/Documentation/trace/histograms.txt b/Documentation/trace/histograms.txt new file mode 100644 index 000000000000..6f2aeabf7faa --- /dev/null +++ b/Documentation/trace/histograms.txt @@ -0,0 +1,186 @@ + Using the Linux Kernel Latency Histograms + + +This document gives a short explanation how to enable, configure and use +latency histograms. Latency histograms are primarily relevant in the +context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) +and are used in the quality management of the Linux real-time +capabilities. + + +* Purpose of latency histograms + +A latency histogram continuously accumulates the frequencies of latency +data. There are two types of histograms +- potential sources of latencies +- effective latencies + + +* Potential sources of latencies + +Potential sources of latencies are code segments where interrupts, +preemption or both are disabled (aka critical sections). To create +histograms of potential sources of latency, the kernel stores the time +stamp at the start of a critical section, determines the time elapsed +when the end of the section is reached, and increments the frequency +counter of that latency value - irrespective of whether any concurrently +running process is affected by latency or not. +- Configuration items (in the Kernel hacking/Tracers submenu) + CONFIG_INTERRUPT_OFF_LATENCY + CONFIG_PREEMPT_OFF_LATENCY + + +* Effective latencies + +Effective latencies are actually occuring during wakeup of a process. To +determine effective latencies, the kernel stores the time stamp when a +process is scheduled to be woken up, and determines the duration of the +wakeup time shortly before control is passed over to this process. Note +that the apparent latency in user space may be somewhat longer, since the +process may be interrupted after control is passed over to it but before +the execution in user space takes place. Simply measuring the interval +between enqueuing and wakeup may also not appropriate in cases when a +process is scheduled as a result of a timer expiration. The timer may have +missed its deadline, e.g. due to disabled interrupts, but this latency +would not be registered. Therefore, the offsets of missed timers are +recorded in a separate histogram. If both wakeup latency and missed timer +offsets are configured and enabled, a third histogram may be enabled that +records the overall latency as a sum of the timer latency, if any, and the +wakeup latency. This histogram is called "timerandwakeup". +- Configuration items (in the Kernel hacking/Tracers submenu) + CONFIG_WAKEUP_LATENCY + CONFIG_MISSED_TIMER_OFSETS + + +* Usage + +The interface to the administration of the latency histograms is located +in the debugfs file system. To mount it, either enter + +mount -t sysfs nodev /sys +mount -t debugfs nodev /sys/kernel/debug + +from shell command line level, or add + +nodev /sys sysfs defaults 0 0 +nodev /sys/kernel/debug debugfs defaults 0 0 + +to the file /etc/fstab. All latency histogram related files are then +available in the directory /sys/kernel/debug/tracing/latency_hist. A +particular histogram type is enabled by writing non-zero to the related +variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. +Select "preemptirqsoff" for the histograms of potential sources of +latencies and "wakeup" for histograms of effective latencies etc. The +histogram data - one per CPU - are available in the files + +/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx +/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx +/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx +/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx +/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx +/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx +/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx + +The histograms are reset by writing non-zero to the file "reset" in a +particular latency directory. To reset all latency data, use + +#!/bin/sh + +TRACINGDIR=/sys/kernel/debug/tracing +HISTDIR=$TRACINGDIR/latency_hist + +if test -d $HISTDIR +then + cd $HISTDIR + for i in `find . | grep /reset$` + do + echo 1 >$i + done +fi + + +* Data format + +Latency data are stored with a resolution of one microsecond. The +maximum latency is 10,240 microseconds. The data are only valid, if the +overflow register is empty. Every output line contains the latency in +microseconds in the first row and the number of samples in the second +row. To display only lines with a positive latency count, use, for +example, + +grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 + +#Minimum latency: 0 microseconds. +#Average latency: 0 microseconds. +#Maximum latency: 25 microseconds. +#Total samples: 3104770694 +#There are 0 samples greater or equal than 10240 microseconds +#usecs samples + 0 2984486876 + 1 49843506 + 2 58219047 + 3 5348126 + 4 2187960 + 5 3388262 + 6 959289 + 7 208294 + 8 40420 + 9 4485 + 10 14918 + 11 18340 + 12 25052 + 13 19455 + 14 5602 + 15 969 + 16 47 + 17 18 + 18 14 + 19 1 + 20 3 + 21 2 + 22 5 + 23 2 + 25 1 + + +* Wakeup latency of a selected process + +To only collect wakeup latency data of a particular process, write the +PID of the requested process to + +/sys/kernel/debug/tracing/latency_hist/wakeup/pid + +PIDs are not considered, if this variable is set to 0. + + +* Details of the process with the highest wakeup latency so far + +Selected data of the process that suffered from the highest wakeup +latency that occurred in a particular CPU are available in the file + +/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. + +In addition, other relevant system data at the time when the +latency occurred are given. + +The format of the data is (all in one line): +<PID> <Priority> <Latency> (<Timeroffset>) <Command> \ +<- <PID> <Priority> <Command> <Timestamp> + +The value of <Timeroffset> is only relevant in the combined timer +and wakeup latency recording. In the wakeup recording, it is +always 0, in the missed_timer_offsets recording, it is the same +as <Latency>. + +When retrospectively searching for the origin of a latency and +tracing was not enabled, it may be helpful to know the name and +some basic data of the task that (finally) was switching to the +late real-tlme task. In addition to the victim's data, also the +data of the possible culprit are therefore displayed after the +"<-" symbol. + +Finally, the timestamp of the time when the latency occurred +in <seconds>.<microseconds> after the most recent system boot +is provided. + +These data are also reset when the wakeup histogram is reset. diff --git a/MAINTAINERS b/MAINTAINERS index 63cefa62324c..be0ea1e5c4cc 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5196,6 +5196,23 @@ F: fs/fuse/ F: include/uapi/linux/fuse.h F: Documentation/filesystems/fuse.txt +FUTEX SUBSYSTEM +M: Thomas Gleixner <tglx@linutronix.de> +M: Ingo Molnar <mingo@redhat.com> +R: Peter Zijlstra <peterz@infradead.org> +R: Darren Hart <dvhart@infradead.org> +L: linux-kernel@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core +S: Maintained +F: kernel/futex.c +F: kernel/futex_compat.c +F: include/asm-generic/futex.h +F: include/linux/futex.h +F: include/uapi/linux/futex.h +F: tools/testing/selftests/futex/ +F: tools/perf/bench/futex* +F: Documentation/*futex* + FUTURE DOMAIN TMC-16x0 SCSI DRIVER (16-bit) M: Rik Faith <faith@cs.unc.edu> L: linux-scsi@vger.kernel.org diff --git a/arch/Kconfig b/arch/Kconfig index 659bdd079277..099fc0f5155e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -9,6 +9,7 @@ config OPROFILE tristate "OProfile system profiling" depends on PROFILING depends on HAVE_OPROFILE + depends on !PREEMPT_RT_FULL select RING_BUFFER select RING_BUFFER_ALLOW_SWAP help @@ -52,6 +53,7 @@ config KPROBES config JUMP_LABEL bool "Optimize very unlikely/likely branches" depends on HAVE_ARCH_JUMP_LABEL + depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) help This option enables a transparent branch optimization that makes certain almost-always-true or almost-always-false branch diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index b5d529fdffab..5715844e83e3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -36,7 +36,7 @@ config ARM select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 select HAVE_ARCH_HARDENED_USERCOPY - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && !PREEMPT_RT_BASE select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) @@ -75,6 +75,7 @@ config ARM select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_SYSCALL_TRACEPOINTS diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h index e53638c8ed8a..6095a1649865 100644 --- a/arch/arm/include/asm/irq.h +++ b/arch/arm/include/asm/irq.h @@ -22,6 +22,8 @@ #endif #ifndef __ASSEMBLY__ +#include <linux/cpumask.h> + struct irqaction; struct pt_regs; extern void migrate_irqs(void); diff --git a/arch/arm/include/asm/switch_to.h b/arch/arm/include/asm/switch_to.h index 12ebfcc1d539..c962084605bc 100644 --- a/arch/arm/include/asm/switch_to.h +++ b/arch/arm/include/asm/switch_to.h @@ -3,6 +3,13 @@ #include <linux/thread_info.h> +#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * For v7 SMP cores running a preemptible kernel we may be pre-empted * during a TLB maintenance operation, so execute an inner-shareable dsb @@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(struct task_struct *, struct thread_info #define switch_to(prev,next,last) \ do { \ __complete_pending_tlbi(); \ + switch_kmaps(prev, next); \ last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ } while (0) diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h index 776757d1604a..1f36a4eccc72 100644 --- a/arch/arm/include/asm/thread_info.h +++ b/arch/arm/include/asm/thread_info.h @@ -49,6 +49,7 @@ struct cpu_context_save { struct thread_info { unsigned long flags; /* low level flags */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ __u32 cpu; /* cpu */ @@ -142,7 +143,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ -#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ +#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ +#define TIF_NEED_RESCHED_LAZY 7 #define TIF_NOHZ 12 /* in adaptive nohz mode */ #define TIF_USING_IWMMXT 17 @@ -152,6 +154,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) @@ -167,7 +170,8 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *, * Change these and you break ASM code in entry-common.S */ #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_UPROBE) + _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ + _TIF_NEED_RESCHED_LAZY) #endif /* __KERNEL__ */ #endif /* __ASM_ARM_THREAD_INFO_H */ diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 608008229c7d..3866da3f7bb7 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c @@ -65,6 +65,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 9f157e7c51e7..468e224d76aa 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -220,11 +220,18 @@ __irq_svc: #ifdef CONFIG_PREEMPT ldr r8, [tsk, #TI_PREEMPT] @ get preempt count - ldr r0, [tsk, #TI_FLAGS] @ get flags teq r8, #0 @ if preempt count != 0 + bne 1f @ return from exeption + ldr r0, [tsk, #TI_FLAGS] @ get flags + tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set + blne svc_preempt @ preempt! + + ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r8, #0 @ if preempt lazy count != 0 movne r0, #0 @ force flags to 0 - tst r0, #_TIF_NEED_RESCHED + tst r0, #_TIF_NEED_RESCHED_LAZY blne svc_preempt +1: #endif svc_exit r5, irq = 1 @ return from exception @@ -239,8 +246,14 @@ svc_preempt: 1: bl preempt_schedule_irq @ irq en/disable is done inside ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS tst r0, #_TIF_NEED_RESCHED + bne 1b + tst r0, #_TIF_NEED_RESCHED_LAZY reteq r8 @ go again - b 1b + ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count + teq r0, #0 @ if preempt lazy count != 0 + beq 1b + ret r8 @ go again + #endif __und_fault: diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S index 10c3283d6c19..8872937862cc 100644 --- a/arch/arm/kernel/entry-common.S +++ b/arch/arm/kernel/entry-common.S @@ -36,7 +36,9 @@ ret_fast_syscall: UNWIND(.cantunwind ) disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) + bne fast_work_pending + tst r1, #_TIF_SECCOMP bne fast_work_pending /* perform architecture specific actions before user return */ @@ -62,8 +64,11 @@ ret_fast_syscall: str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 disable_irq_notrace @ disable interrupts ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing - tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK + tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) + bne do_slower_path + tst r1, #_TIF_SECCOMP beq no_work_pending +do_slower_path: UNWIND(.fnend ) ENDPROC(ret_fast_syscall) diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c index 69bda1a5707e..1f665acaa6a9 100644 --- a/arch/arm/kernel/patch.c +++ b/arch/arm/kernel/patch.c @@ -15,7 +15,7 @@ struct patch { unsigned int insn; }; -static DEFINE_SPINLOCK(patch_lock); +static DEFINE_RAW_SPINLOCK(patch_lock); static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) __acquires(&patch_lock) @@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags) return addr; if (flags) - spin_lock_irqsave(&patch_lock, *flags); + raw_spin_lock_irqsave(&patch_lock, *flags); else __acquire(&patch_lock); @@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) clear_fixmap(fixmap); if (flags) - spin_unlock_irqrestore(&patch_lock, *flags); + raw_spin_unlock_irqrestore(&patch_lock, *flags); else __release(&patch_lock); } diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 91d2d5b01414..750550098b59 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c @@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) } #ifdef CONFIG_MMU +/* + * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not + * initialized by pgtable_page_ctor() then a coredump of the vector page will + * fail. + */ +static int __init vectors_user_mapping_init_page(void) +{ + struct page *page; + unsigned long addr = 0xffff0000; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; + + pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); + page = pmd_page(*(pmd)); + + pgtable_page_ctor(page); + + return 0; +} +late_initcall(vectors_user_mapping_init_page); + #ifdef CONFIG_KUSER_HELPERS /* * The vectors page is always readable from user space for the diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 7b8f2141427b..96541e00b74a 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c @@ -572,7 +572,8 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall) */ trace_hardirqs_off(); do { - if (likely(thread_flags & _TIF_NEED_RESCHED)) { + if (likely(thread_flags & (_TIF_NEED_RESCHED | + _TIF_NEED_RESCHED_LAZY))) { schedule(); } else { if (unlikely(!user_mode(regs))) diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7dd14e8395e6..4cd7e3d98035 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -234,8 +234,6 @@ int __cpu_disable(void) flush_cache_louis(); local_flush_tlb_all(); - clear_tasks_mm_cpumask(cpu); - return 0; } @@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu) pr_err("CPU%u: cpu didn't die\n", cpu); return; } + + clear_tasks_mm_cpumask(cpu); + pr_notice("CPU%u: shutdown\n", cpu); /* diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c index 0bee233fef9a..314cfb232a63 100644 --- a/arch/arm/kernel/unwind.c +++ b/arch/arm/kernel/unwind.c @@ -93,7 +93,7 @@ extern const struct unwind_idx __start_unwind_idx[]; static const struct unwind_idx *__origin_unwind_idx; extern const struct unwind_idx __stop_unwind_idx[]; -static DEFINE_SPINLOCK(unwind_lock); +static DEFINE_RAW_SPINLOCK(unwind_lock); static LIST_HEAD(unwind_tables); /* Convert a prel31 symbol to an absolute address */ @@ -201,7 +201,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) /* module unwind tables */ struct unwind_table *table; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_for_each_entry(table, &unwind_tables, list) { if (addr >= table->begin_addr && addr < table->end_addr) { @@ -213,7 +213,7 @@ static const struct unwind_idx *unwind_find_idx(unsigned long addr) break; } } - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); } pr_debug("%s: idx = %p\n", __func__, idx); @@ -529,9 +529,9 @@ struct unwind_table *unwind_table_add(unsigned long start, unsigned long size, tab->begin_addr = text_addr; tab->end_addr = text_addr + text_size; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_add_tail(&tab->list, &unwind_tables); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); return tab; } @@ -543,9 +543,9 @@ void unwind_table_del(struct unwind_table *tab) if (!tab) return; - spin_lock_irqsave(&unwind_lock, flags); + raw_spin_lock_irqsave(&unwind_lock, flags); list_del(&tab->list); - spin_unlock_irqrestore(&unwind_lock, flags); + raw_spin_unlock_irqrestore(&unwind_lock, flags); kfree(tab); } diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 19b5f5c1c0ff..82aa639e6737 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) * involves poking the GIC, which must be done in a * non-preemptible context. */ - preempt_disable(); + migrate_disable(); kvm_pmu_flush_hwstate(vcpu); kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); @@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); - preempt_enable(); + migrate_enable(); continue; } @@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_vgic_sync_hwstate(vcpu); - preempt_enable(); + migrate_enable(); ret = handle_exit(vcpu, run, ret); } diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c index 98ffe1e62ad5..df9769ddece5 100644 --- a/arch/arm/mach-exynos/platsmp.c +++ b/arch/arm/mach-exynos/platsmp.c @@ -229,7 +229,7 @@ static void __iomem *scu_base_addr(void) return (void __iomem *)(S5P_VA_SCU); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void exynos_secondary_init(unsigned int cpu) { @@ -242,8 +242,8 @@ static void exynos_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) @@ -307,7 +307,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -334,7 +334,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) if (timeout == 0) { printk(KERN_ERR "cpu1 power enable failed"); - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return -ETIMEDOUT; } } @@ -380,7 +380,7 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) * calibrations, then wait for it to finish */ fail: - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? ret : 0; } diff --git a/arch/arm/mach-hisi/platmcpm.c b/arch/arm/mach-hisi/platmcpm.c index 4b653a8cb75c..b03d5a922cb1 100644 --- a/arch/arm/mach-hisi/platmcpm.c +++ b/arch/arm/mach-hisi/platmcpm.c @@ -61,7 +61,7 @@ static void __iomem *sysctrl, *fabric; static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static u32 fabric_phys_addr; /* * [0]: bootwrapper physical address @@ -113,7 +113,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) return -EINVAL; - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); if (hip04_cpu_table[cluster][cpu]) goto out; @@ -147,7 +147,7 @@ static int hip04_boot_secondary(unsigned int l_cpu, struct task_struct *idle) out: hip04_cpu_table[cluster][cpu]++; - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 0; } @@ -162,11 +162,11 @@ static void hip04_cpu_die(unsigned int l_cpu) cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); hip04_cpu_table[cluster][cpu]--; if (hip04_cpu_table[cluster][cpu] == 1) { /* A power_up request went ahead of us. */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return; } else if (hip04_cpu_table[cluster][cpu] > 1) { pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); @@ -174,7 +174,7 @@ static void hip04_cpu_die(unsigned int l_cpu) } last_man = hip04_cluster_is_down(cluster); - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); if (last_man) { /* Since it's Cortex A15, disable L2 prefetching. */ asm volatile( @@ -203,7 +203,7 @@ static int hip04_cpu_kill(unsigned int l_cpu) cpu >= HIP04_MAX_CPUS_PER_CLUSTER); count = TIMEOUT_MSEC / POLL_MSEC; - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); for (tries = 0; tries < count; tries++) { if (hip04_cpu_table[cluster][cpu]) goto err; @@ -211,10 +211,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); if (data & CORE_WFI_STATUS(cpu)) break; - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); /* Wait for clean L2 when the whole cluster is down. */ msleep(POLL_MSEC); - spin_lock_irq(&boot_lock); + raw_spin_lock_irq(&boot_lock); } if (tries >= count) goto err; @@ -231,10 +231,10 @@ static int hip04_cpu_kill(unsigned int l_cpu) goto err; if (hip04_cluster_is_down(cluster)) hip04_set_snoop_filter(cluster, 0); - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 1; err: - spin_unlock_irq(&boot_lock); + raw_spin_unlock_irq(&boot_lock); return 0; } #endif diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index b4de3da6dffa..b52893319d75 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -64,7 +64,7 @@ static const struct omap_smp_config omap5_cfg __initconst = { .startup_addr = omap5_secondary_startup, }; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void __iomem *omap4_get_scu_base(void) { @@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * Update the AuxCoreBoot0 with boot state for secondary core. @@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) * Now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return 0; } diff --git a/arch/arm/mach-prima2/platsmp.c b/arch/arm/mach-prima2/platsmp.c index 0875b99add18..18b6d98d2581 100644 --- a/arch/arm/mach-prima2/platsmp.c +++ b/arch/arm/mach-prima2/platsmp.c @@ -22,7 +22,7 @@ static void __iomem *clk_base; -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void sirfsoc_secondary_init(unsigned int cpu) { @@ -36,8 +36,8 @@ static void sirfsoc_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static const struct of_device_id clk_ids[] = { @@ -75,7 +75,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) /* make sure write buffer is drained */ mb(); - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -107,7 +107,7 @@ static int sirfsoc_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-qcom/platsmp.c b/arch/arm/mach-qcom/platsmp.c index 5494c9e0c909..e8ce157d3548 100644 --- a/arch/arm/mach-qcom/platsmp.c +++ b/arch/arm/mach-qcom/platsmp.c @@ -46,7 +46,7 @@ extern void secondary_startup_arm(void); -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); #ifdef CONFIG_HOTPLUG_CPU static void qcom_cpu_die(unsigned int cpu) @@ -60,8 +60,8 @@ static void qcom_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int scss_release_secondary(unsigned int cpu) @@ -284,7 +284,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * Send the secondary CPU a soft interrupt, thereby causing @@ -297,7 +297,7 @@ static int qcom_boot_secondary(unsigned int cpu, int (*func)(unsigned int)) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return ret; } diff --git a/arch/arm/mach-spear/platsmp.c b/arch/arm/mach-spear/platsmp.c index 8d1e2d551786..7fa56cc78118 100644 --- a/arch/arm/mach-spear/platsmp.c +++ b/arch/arm/mach-spear/platsmp.c @@ -32,7 +32,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); @@ -47,8 +47,8 @@ static void spear13xx_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -59,7 +59,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -84,7 +84,7 @@ static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mach-sti/platsmp.c b/arch/arm/mach-sti/platsmp.c index ea5a2277ee46..b988e081ac79 100644 --- a/arch/arm/mach-sti/platsmp.c +++ b/arch/arm/mach-sti/platsmp.c @@ -35,7 +35,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); static void sti_secondary_init(unsigned int cpu) { @@ -48,8 +48,8 @@ static void sti_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -60,7 +60,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * The secondary processor is waiting to be released from @@ -91,7 +91,7 @@ static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f7861dc83182..ce47dfe25fb0 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -433,6 +433,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, if (addr < TASK_SIZE) return do_page_fault(addr, fsr, regs); + if (interrupts_enabled(regs)) + local_irq_enable(); + if (user_mode(regs)) goto bad_area; @@ -500,6 +503,9 @@ do_translation_fault(unsigned long addr, unsigned int fsr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + if (interrupts_enabled(regs)) + local_irq_enable(); + do_bad_area(addr, fsr, regs); return 0; } diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index d02f8187b1cc..542692dbd40a 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c @@ -34,6 +34,11 @@ static inline pte_t get_fixmap_pte(unsigned long vaddr) return *ptep; } +static unsigned int fixmap_idx(int type) +{ + return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); +} + void *kmap(struct page *page) { might_sleep(); @@ -54,12 +59,13 @@ EXPORT_SYMBOL(kunmap); void *kmap_atomic(struct page *page) { + pte_t pte = mk_pte(page, kmap_prot); unsigned int idx; unsigned long vaddr; void *kmap; int type; - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); @@ -79,7 +85,7 @@ void *kmap_atomic(struct page *page) type = kmap_atomic_idx_push(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM /* @@ -93,7 +99,10 @@ void *kmap_atomic(struct page *page) * in place, so the contained TLB flush ensures the TLB is updated * with the new mapping. */ - set_fixmap_pte(idx, mk_pte(page, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_fixmap_pte(idx, pte); return (void *)vaddr; } @@ -106,44 +115,75 @@ void __kunmap_atomic(void *kvaddr) if (kvaddr >= (void *)FIXADDR_START) { type = kmap_atomic_idx(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); if (cache_is_vivt()) __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(vaddr != __fix_to_virt(idx)); - set_fixmap_pte(idx, __pte(0)); #else (void) idx; /* to kill a warning */ #endif + set_fixmap_pte(idx, __pte(0)); kmap_atomic_idx_pop(); } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { /* this address was obtained through kmap_high_get() */ kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); } pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } EXPORT_SYMBOL(__kunmap_atomic); void *kmap_atomic_pfn(unsigned long pfn) { + pte_t pte = pfn_pte(pfn, kmap_prot); unsigned long vaddr; int idx, type; struct page *page = pfn_to_page(pfn); - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) return page_address(page); type = kmap_atomic_idx_push(); - idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); + idx = fixmap_idx(type); vaddr = __fix_to_virt(idx); #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(get_fixmap_pte(vaddr))); #endif - set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_fixmap_pte(idx, pte); return (void *)vaddr; } +#if defined CONFIG_PREEMPT_RT_FULL +void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = fixmap_idx(i); + + set_fixmap_pte(idx, __pte(0)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = fixmap_idx(i); + + if (!pte_none(next_p->kmap_pte[i])) + set_fixmap_pte(idx, next_p->kmap_pte[i]); + } +} +#endif diff --git a/arch/arm/plat-versatile/platsmp.c b/arch/arm/plat-versatile/platsmp.c index c2366510187a..6b60f582b738 100644 --- a/arch/arm/plat-versatile/platsmp.c +++ b/arch/arm/plat-versatile/platsmp.c @@ -32,7 +32,7 @@ static void write_pen_release(int val) sync_cache_w(&pen_release); } -static DEFINE_SPINLOCK(boot_lock); +static DEFINE_RAW_SPINLOCK(boot_lock); void versatile_secondary_init(unsigned int cpu) { @@ -45,8 +45,8 @@ void versatile_secondary_init(unsigned int cpu) /* * Synchronise with the boot thread. */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); + raw_spin_lock(&boot_lock); + raw_spin_unlock(&boot_lock); } int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) @@ -57,7 +57,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * Set synchronisation state between this boot processor * and the secondary one */ - spin_lock(&boot_lock); + raw_spin_lock(&boot_lock); /* * This is really belt and braces; we hold unintended secondary @@ -87,7 +87,7 @@ int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) * now the secondary core is starting up let it run its * calibrations, then wait for it to finish */ - spin_unlock(&boot_lock); + raw_spin_unlock(&boot_lock); return pen_release != -1 ? -ENOSYS : 0; } diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index cf57a7799a0f..78d1b49fbed5 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -91,6 +91,7 @@ config ARM64 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_PREEMPT_LAZY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RCU_TABLE_FREE select HAVE_SYSCALL_TRACEPOINTS @@ -704,7 +705,7 @@ config XEN_DOM0 config XEN bool "Xen guest support on ARM64" - depends on ARM64 && OF + depends on ARM64 && OF && !PREEMPT_RT_FULL select SWIOTLB_XEN select PARAVIRT help diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e9ea5a6bd449..6c500ad63c6a 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -49,6 +49,7 @@ struct thread_info { mm_segment_t addr_limit; /* address limit */ struct task_struct *task; /* main task structure */ int preempt_count; /* 0 => preemptable, <0 => bug */ + int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ int cpu; /* cpu */ }; @@ -112,6 +113,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_NEED_RESCHED 1 #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ +#define TIF_NEED_RESCHED_LAZY 4 #define TIF_NOHZ 7 #define TIF_SYSCALL_TRACE 8 #define TIF_SYSCALL_AUDIT 9 @@ -127,6 +129,7 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_NOHZ (1 << TIF_NOHZ) #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) @@ -135,7 +138,9 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_32BIT (1 << TIF_32BIT) #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ - _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) + _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ + _TIF_NEED_RESCHED_LAZY) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index c58ddf8c4062..a8f2f7c1fe12 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -38,6 +38,7 @@ int main(void) BLANK(); DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index b4c7db434654..433d846f4f51 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -430,11 +430,16 @@ el1_irq: #ifdef CONFIG_PREEMPT ldr w24, [tsk, #TI_PREEMPT] // get preempt count - cbnz w24, 1f // preempt count != 0 + cbnz w24, 2f // preempt count != 0 ldr x0, [tsk, #TI_FLAGS] // get flags - tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? - bl el1_preempt + tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? + + ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count + cbnz w24, 2f // preempt lazy count != 0 + tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? 1: + bl el1_preempt +2: #endif #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_on @@ -448,6 +453,7 @@ el1_preempt: 1: bl preempt_schedule_irq // irq en/disable is done inside ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? + tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? ret x24 #endif diff --git a/arch/arm64/kernel/signal.c b/arch/arm64/kernel/signal.c index 404dd67080b9..639dc6d12e72 100644 --- a/arch/arm64/kernel/signal.c +++ b/arch/arm64/kernel/signal.c @@ -409,7 +409,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, */ trace_hardirqs_off(); do { - if (thread_flags & _TIF_NEED_RESCHED) { + if (thread_flags & _TIF_NEED_RESCHED_MASK) { schedule(); } else { local_irq_enable(); diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 5e844f68e847..dc613cc10f54 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE # config HIGHMEM bool "High Memory Support" - depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA + depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL config CPU_SUPPORTS_HIGHMEM bool diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 8f01f21e78f1..619485b777d2 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -52,10 +52,11 @@ config LOCKDEP_SUPPORT config RWSEM_GENERIC_SPINLOCK bool + default y if PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM bool - default y + default y if !PREEMPT_RT_FULL config GENERIC_LOCKBREAK bool @@ -134,6 +135,7 @@ config PPC select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_PREEMPT_LAZY select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select CLONE_BACKWARDS @@ -321,7 +323,7 @@ menu "Kernel options" config HIGHMEM bool "High memory support" - depends on PPC32 + depends on PPC32 && !PREEMPT_RT_FULL source kernel/Kconfig.hz source kernel/Kconfig.preempt diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 87e4b2d8dcd4..981e501a4359 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -43,6 +43,8 @@ struct thread_info { int cpu; /* cpu we're on */ int preempt_count; /* 0 => preemptable, <0 => BUG */ + int preempt_lazy_count; /* 0 => preemptable, + <0 => BUG */ unsigned long local_flags; /* private flags for thread */ #ifdef CONFIG_LIVEPATCH unsigned long *livepatch_sp; @@ -88,8 +90,7 @@ static inline struct thread_info *current_thread_info(void) #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ #define TIF_SIGPENDING 1 /* signal pending */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ -#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling - TIF_NEED_RESCHED */ +#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ #define TIF_32BIT 4 /* 32 bit binary */ #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ @@ -107,6 +108,8 @@ static inline struct thread_info *current_thread_info(void) #if defined(CONFIG_PPC64) #define TIF_ELF2ABI 18 /* function descriptors must die! */ #endif +#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling + TIF_NEED_RESCHED */ /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) @@ -125,14 +128,16 @@ static inline struct thread_info *current_thread_info(void) #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE) #define _TIF_NOHZ (1<<TIF_NOHZ) +#define _TIF_NEED_RESCHED_LAZY (1<<TIF_NEED_RESCHED_LAZY) #define _TIF_SYSCALL_DOTRACE (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \ _TIF_NOHZ) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_RESTORE_TM) + _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY) #define _TIF_PERSYSCALL_MASK (_TIF_RESTOREALL|_TIF_NOERROR) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) /* Bits in local_flags */ /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */ diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index c833d88c423d..96e9fbc3f684 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c @@ -156,6 +156,7 @@ int main(void) DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); + DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); DEFINE(TI_TASK, offsetof(struct thread_info, task)); DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 3841d749a430..6dbaeff192b9 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -835,7 +835,14 @@ resume_kernel: cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ bne restore andi. r8,r8,_TIF_NEED_RESCHED + bne+ 1f + lwz r0,TI_PREEMPT_LAZY(r9) + cmpwi 0,r0,0 /* if non-zero, just restore regs and return */ + bne restore + lwz r0,TI_FLAGS(r9) + andi. r0,r0,_TIF_NEED_RESCHED_LAZY beq+ restore +1: lwz r3,_MSR(r1) andi. r0,r3,MSR_EE /* interrupts off? */ beq restore /* don't schedule if so */ @@ -846,11 +853,11 @@ resume_kernel: */ bl trace_hardirqs_off #endif -1: bl preempt_schedule_irq +2: bl preempt_schedule_irq CURRENT_THREAD_INFO(r9, r1) lwz r3,TI_FLAGS(r9) - andi. r0,r3,_TIF_NEED_RESCHED - bne- 1b + andi. r0,r3,_TIF_NEED_RESCHED_MASK + bne- 2b #ifdef CONFIG_TRACE_IRQFLAGS /* And now, to properly rebalance the above, we tell lockdep they * are being turned back on, which will happen when we return @@ -1171,7 +1178,7 @@ global_dbcr0: #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */ do_work: /* r10 contains MSR_KERNEL here */ - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,_TIF_NEED_RESCHED_MASK beq do_user_signal do_resched: /* r10 contains MSR_KERNEL here */ @@ -1192,7 +1199,7 @@ recheck: MTMSRD(r10) /* disable interrupts */ CURRENT_THREAD_INFO(r9, r1) lwz r9,TI_FLAGS(r9) - andi. r0,r9,_TIF_NEED_RESCHED + andi. r0,r9,_TIF_NEED_RESCHED_MASK bne- do_resched andi. r0,r9,_TIF_USER_WORK_MASK beq restore_user diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index caa659671599..891080c4a41e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -656,7 +656,7 @@ _GLOBAL(ret_from_except_lite) bl restore_math b restore #endif -1: andi. r0,r4,_TIF_NEED_RESCHED +1: andi. r0,r4,_TIF_NEED_RESCHED_MASK beq 2f bl restore_interrupts SCHEDULE_USER @@ -718,10 +718,18 @@ resume_kernel: #ifdef CONFIG_PREEMPT /* Check if we need to preempt */ + lwz r8,TI_PREEMPT(r9) + cmpwi 0,r8,0 /* if non-zero, just restore regs and return */ + bne restore andi. r0,r4,_TIF_NEED_RESCHED + bne+ check_count + + andi. r0,r4,_TIF_NEED_RESCHED_LAZY beq+ restore + lwz r8,TI_PREEMPT_LAZY(r9) + /* Check that preempt_count() == 0 and interrupts are enabled */ - lwz r8,TI_PREEMPT(r9) +check_count: cmpwi cr1,r8,0 ld r0,SOFTE(r1) cmpdi r0,0 @@ -738,7 +746,7 @@ resume_kernel: /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) ld r4,TI_FLAGS(r9) - andi. r0,r4,_TIF_NEED_RESCHED + andi. r0,r4,_TIF_NEED_RESCHED_MASK bne 1b /* diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 028a22bfa90c..a75e2dd3e71f 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -651,6 +651,7 @@ void irq_ctx_init(void) } } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { struct thread_info *curtp, *irqtp; @@ -668,6 +669,7 @@ void do_softirq_own_stack(void) if (irqtp->flags) set_bits(irqtp->flags, &curtp->flags); } +#endif irq_hw_number_t virq_to_hw(unsigned int virq) { diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 030d72df5dd5..b471a709e100 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -41,6 +41,7 @@ * We store the saved ksp_limit in the unused part * of the STACK_FRAME_OVERHEAD */ +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 stw r0,4(r1) @@ -57,6 +58,7 @@ _GLOBAL(call_do_softirq) stw r10,THREAD+KSP_LIMIT(r2) mtlr r0 blr +#endif /* * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 4cefe6888b18..cb2ee4be999a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -31,6 +31,7 @@ .text +#ifndef CONFIG_PREEMPT_RT_FULL _GLOBAL(call_do_softirq) mflr r0 std r0,16(r1) @@ -41,6 +42,7 @@ _GLOBAL(call_do_softirq) ld r0,16(r1) mtlr r0 blr +#endif _GLOBAL(call_do_irq) mflr r0 diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig index 029be26b5a17..9528089ea142 100644 --- a/arch/powerpc/kvm/Kconfig +++ b/arch/powerpc/kvm/Kconfig @@ -175,6 +175,7 @@ config KVM_E500MC config KVM_MPIC bool "KVM in-kernel MPIC emulation" depends on KVM && E500 + depends on !PREEMPT_RT_FULL select HAVE_KVM_IRQCHIP select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index e48462447ff0..2670cee66064 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c @@ -752,7 +752,7 @@ static int ps3_notification_read_write(struct ps3_notification_device *dev, } pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - res = wait_event_interruptible(dev->done.wait, + res = swait_event_interruptible(dev->done.wait, dev->done.done || kthread_should_stop()); if (kthread_should_stop()) res = -EINTR; diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 6c0378c0b8b5..abd58b4dff97 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu) hardirq_ctx[cpu] = NULL; } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { struct thread_info *curctx; @@ -174,6 +175,7 @@ void do_softirq_own_stack(void) "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" ); } +#endif #else static inline void handle_one_irq(unsigned int irq) { diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index b27e48e25841..2965bf76d070 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -194,12 +194,10 @@ config NR_CPUS source kernel/Kconfig.hz config RWSEM_GENERIC_SPINLOCK - bool - default y if SPARC32 + def_bool PREEMPT_RT_FULL config RWSEM_XCHGADD_ALGORITHM - bool - default y if SPARC64 + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_HWEIGHT bool diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 5cbf03c14981..6067d9379e5b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, struct pt_regs *regs) set_irq_regs(old_regs); } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { void *orig_sp, *sp = softirq_stack[smp_processor_id()]; @@ -868,6 +869,7 @@ void do_softirq_own_stack(void) __asm__ __volatile__("mov %0, %%sp" : : "r" (orig_sp)); } +#endif #ifdef CONFIG_HOTPLUG_CPU void fixup_irqs(void) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index bada636d1065..f8a995c90c01 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -17,6 +17,7 @@ config X86_64 ### Arch settings config X86 def_bool y + select HAVE_PREEMPT_LAZY select ACPI_LEGACY_TABLES_LOOKUP if ACPI select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI select ANON_INODES @@ -232,8 +233,11 @@ config ARCH_MAY_HAVE_PC_FDC def_bool y depends on ISA_DMA_API +config RWSEM_GENERIC_SPINLOCK + def_bool PREEMPT_RT_FULL + config RWSEM_XCHGADD_ALGORITHM - def_bool y + def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL config GENERIC_CALIBRATE_DELAY def_bool y @@ -897,7 +901,7 @@ config IOMMU_HELPER config MAXSMP bool "Enable Maximum number of SMP Processors and NUMA Nodes" depends on X86_64 && SMP && DEBUG_KERNEL - select CPUMASK_OFFSTACK + select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL ---help--- Enable maximum number of CPUS and NUMA Nodes for this architecture. If unsure, say N. diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index aa8b0672f87a..2429414bfc71 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c @@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); + nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt(desc, &walk); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes)) { + kernel_fpu_begin(); aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } - kernel_fpu_end(); return err; } @@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_desc *desc, err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - kernel_fpu_begin(); while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { + kernel_fpu_begin(); aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, nbytes & AES_BLOCK_MASK, walk.iv); + kernel_fpu_end(); nbytes &= AES_BLOCK_SIZE - 1; err = blkcipher_walk_done(desc, &walk, nbytes); } if (walk.nbytes) { + kernel_fpu_begin(); ctr_crypt_final(ctx, &walk); + kernel_fpu_end(); err = blkcipher_walk_done(desc, &walk, 0); } - kernel_fpu_end(); return err; } diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c index 8648158f3916..d7699130ee36 100644 --- a/arch/x86/crypto/cast5_avx_glue.c +++ b/arch/x86/crypto/cast5_avx_glue.c @@ -59,7 +59,7 @@ static inline void cast5_fpu_end(bool fpu_enabled) static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, bool enc) { - bool fpu_enabled = false; + bool fpu_enabled; struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); const unsigned int bsize = CAST5_BLOCK_SIZE; unsigned int nbytes; @@ -75,7 +75,7 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, u8 *wsrc = walk->src.virt.addr; u8 *wdst = walk->dst.virt.addr; - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); /* Process multi-block batch */ if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { @@ -103,10 +103,9 @@ static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, } while (nbytes >= bsize); done: + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -227,7 +226,7 @@ done: static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -236,12 +235,11 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; while ((nbytes = walk.nbytes)) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); nbytes = __cbc_decrypt(desc, &walk); + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - - cast5_fpu_end(fpu_enabled); return err; } @@ -311,7 +309,7 @@ done: static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { - fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); + fpu_enabled = cast5_fpu_begin(false, nbytes); nbytes = __ctr_crypt(desc, &walk); + cast5_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - cast5_fpu_end(fpu_enabled); - if (walk.nbytes) { ctr_crypt_final(desc, &walk); err = blkcipher_walk_done(desc, &walk, 0); diff --git a/arch/x86/crypto/glue_helper.c b/arch/x86/crypto/glue_helper.c index 6a85598931b5..3a506ce7ed93 100644 --- a/arch/x86/crypto/glue_helper.c +++ b/arch/x86/crypto/glue_helper.c @@ -39,7 +39,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, void *ctx = crypto_blkcipher_ctx(desc->tfm); const unsigned int bsize = 128 / 8; unsigned int nbytes, i, func_bytes; - bool fpu_enabled = false; + bool fpu_enabled; int err; err = blkcipher_walk_virt(desc, walk); @@ -49,7 +49,7 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, u8 *wdst = walk->dst.virt.addr; fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); for (i = 0; i < gctx->num_funcs; i++) { func_bytes = bsize * gctx->funcs[i].num_blocks; @@ -71,10 +71,10 @@ static int __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, } done: + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, walk, nbytes); } - glue_fpu_end(fpu_enabled); return err; } @@ -194,7 +194,7 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct scatterlist *src, unsigned int nbytes) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -203,12 +203,12 @@ int glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, while ((nbytes = walk.nbytes)) { fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - glue_fpu_end(fpu_enabled); return err; } EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); @@ -277,7 +277,7 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct scatterlist *src, unsigned int nbytes) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -286,13 +286,12 @@ int glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, while ((nbytes = walk.nbytes) >= bsize) { fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, nbytes); + desc, false, nbytes); nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); } - glue_fpu_end(fpu_enabled); - if (walk.nbytes) { glue_ctr_crypt_final_128bit( gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); @@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, void *tweak_ctx, void *crypt_ctx) { const unsigned int bsize = 128 / 8; - bool fpu_enabled = false; + bool fpu_enabled; struct blkcipher_walk walk; int err; @@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, /* set minimum length to bsize, for tweak_fn */ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, - desc, fpu_enabled, + desc, false, nbytes < bsize ? bsize : nbytes); - /* calculate first value of T */ tweak_fn(tweak_ctx, walk.iv, walk.iv); + glue_fpu_end(fpu_enabled); while (nbytes) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, + desc, false, nbytes); nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); + glue_fpu_end(fpu_enabled); err = blkcipher_walk_done(desc, &walk, nbytes); nbytes = walk.nbytes; } - - glue_fpu_end(fpu_enabled); - return err; } EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index bdd9cc59d20f..56d01a339ba4 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -129,7 +129,7 @@ static long syscall_trace_enter(struct pt_regs *regs) #define EXIT_TO_USERMODE_LOOP_FLAGS \ (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ - _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) + _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) { @@ -145,9 +145,16 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) /* We have work to do. */ local_irq_enable(); - if (cached_flags & _TIF_NEED_RESCHED) + if (cached_flags & _TIF_NEED_RESCHED_MASK) schedule(); +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (unlikely(current->forced_info.si_signo)) { + struct task_struct *t = current; + force_sig_info(t->forced_info.si_signo, &t->forced_info, t); + t->forced_info.si_signo = 0; + } +#endif if (cached_flags & _TIF_UPROBE) uprobe_notify_resume(regs); diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S index edba8606b99a..4a3389535fc6 100644 --- a/arch/x86/entry/entry_32.S +++ b/arch/x86/entry/entry_32.S @@ -308,8 +308,25 @@ END(ret_from_exception) ENTRY(resume_kernel) DISABLE_INTERRUPTS(CLBR_ANY) need_resched: + # preempt count == 0 + NEED_RS set? cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz restore_all +#else + jz test_int_off + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jne restore_all + + movl PER_CPU_VAR(current_task), %ebp + cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? + jnz restore_all + + testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp) + jz restore_all +test_int_off: +#endif testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? jz restore_all call preempt_schedule_irq diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index e7b0e7ff4c58..65916d49dbc9 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -546,7 +546,23 @@ retint_kernel: bt $9, EFLAGS(%rsp) /* were interrupts off? */ jnc 1f 0: cmpl $0, PER_CPU_VAR(__preempt_count) +#ifndef CONFIG_PREEMPT_LAZY jnz 1f +#else + jz do_preempt_schedule_irq + + # atleast preempt count == 0 ? + cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) + jnz 1f + + movq PER_CPU_VAR(current_task), %rcx + cmpl $0, TASK_TI_preempt_lazy_count(%rcx) + jnz 1f + + bt $TIF_NEED_RESCHED_LAZY,TASK_TI_flags(%rcx) + jnc 1f +do_preempt_schedule_irq: +#endif call preempt_schedule_irq jmp 0b 1: @@ -894,6 +910,7 @@ bad_gs: jmp 2b .previous +#ifndef CONFIG_PREEMPT_RT_FULL /* Call softirq on interrupt stack. Interrupts are off. */ ENTRY(do_softirq_own_stack) pushq %rbp @@ -906,6 +923,7 @@ ENTRY(do_softirq_own_stack) decl PER_CPU_VAR(irq_count) ret END(do_softirq_own_stack) +#endif #ifdef CONFIG_XEN idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h index 17f218645701..11bd1b7ee6eb 100644 --- a/arch/x86/include/asm/preempt.h +++ b/arch/x86/include/asm/preempt.h @@ -79,17 +79,46 @@ static __always_inline void __preempt_count_sub(int val) * a decrement which hits zero means we have no preempt_count and should * reschedule. */ -static __always_inline bool __preempt_count_dec_and_test(void) +static __always_inline bool ____preempt_count_dec_and_test(void) { GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), e); } +static __always_inline bool __preempt_count_dec_and_test(void) +{ + if (____preempt_count_dec_and_test()) + return true; +#ifdef CONFIG_PREEMPT_LAZY + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else + return false; +#endif +} + /* * Returns true when we need to resched and can (barring IRQ state). */ static __always_inline bool should_resched(int preempt_offset) { +#ifdef CONFIG_PREEMPT_LAZY + u32 tmp; + + tmp = raw_cpu_read_4(__preempt_count); + if (tmp == preempt_offset) + return true; + + /* preempt count == 0 ? */ + tmp &= ~PREEMPT_NEED_RESCHED; + if (tmp) + return false; + if (current_thread_info()->preempt_lazy_count) + return false; + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +#else return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); +#endif } #ifdef CONFIG_PREEMPT diff --git a/arch/x86/include/asm/signal.h b/arch/x86/include/asm/signal.h index 8af22be0fe61..d1328789b759 100644 --- a/arch/x86/include/asm/signal.h +++ b/arch/x86/include/asm/signal.h @@ -27,6 +27,19 @@ typedef struct { #define SA_IA32_ABI 0x02000000u #define SA_X32_ABI 0x01000000u +/* + * Because some traps use the IST stack, we must keep preemption + * disabled while calling do_trap(), but do_trap() may call + * force_sig_info() which will grab the signal spin_locks for the + * task, which in PREEMPT_RT_FULL are mutexes. By defining + * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set + * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the + * trap. + */ +#if defined(CONFIG_PREEMPT_RT_FULL) +#define ARCH_RT_DELAYS_SIGNAL_SEND +#endif + #ifndef CONFIG_COMPAT typedef sigset_t compat_sigset_t; #endif diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h index 58505f01962f..02fa39652cd6 100644 --- a/arch/x86/include/asm/stackprotector.h +++ b/arch/x86/include/asm/stackprotector.h @@ -59,7 +59,7 @@ */ static __always_inline void boot_init_stack_canary(void) { - u64 canary; + u64 uninitialized_var(canary); u64 tsc; #ifdef CONFIG_X86_64 @@ -70,8 +70,15 @@ static __always_inline void boot_init_stack_canary(void) * of randomness. The TSC only matters for very early init, * there it already has some randomness on most systems. Later * on during the bootup the random pool has true entropy too. + * + * For preempt-rt we need to weaken the randomness a bit, as + * we can't call into the random generator from atomic context + * due to locking constraints. We just leave canary + * uninitialized and use the TSC based randomness on top of it. */ +#ifndef CONFIG_PREEMPT_RT_FULL get_random_bytes(&canary, sizeof(canary)); +#endif tsc = rdtsc(); canary += tsc + (tsc << 32UL); diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h index ad6f5eb07a95..5ceb3a1c2b1a 100644 --- a/arch/x86/include/asm/thread_info.h +++ b/arch/x86/include/asm/thread_info.h @@ -54,11 +54,14 @@ struct task_struct; struct thread_info { unsigned long flags; /* low level flags */ + int preempt_lazy_count; /* 0 => lazy preemptable + <0 => BUG */ }; #define INIT_THREAD_INFO(tsk) \ { \ .flags = 0, \ + .preempt_lazy_count = 0, \ } #define init_stack (init_thread_union.stack) @@ -67,6 +70,10 @@ struct thread_info { #include <asm/asm-offsets.h> +#define GET_THREAD_INFO(reg) \ + _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \ + _ASM_SUB $(THREAD_SIZE),reg ; + #endif /* @@ -85,6 +92,7 @@ struct thread_info { #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ #define TIF_SECCOMP 8 /* secure computing */ +#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ #define TIF_UPROBE 12 /* breakpointed or singlestepping */ #define TIF_NOTSC 16 /* TSC is not accessible in userland */ @@ -108,6 +116,7 @@ struct thread_info { #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) #define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) #define _TIF_UPROBE (1 << TIF_UPROBE) #define _TIF_NOTSC (1 << TIF_NOTSC) @@ -143,6 +152,8 @@ struct thread_info { #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) +#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) + #define STACK_WARN (THREAD_SIZE/8) /* diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index 57ab86d94d64..35d25e27180f 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h @@ -624,9 +624,9 @@ struct bau_control { cycles_t send_message; cycles_t period_end; cycles_t period_time; - spinlock_t uvhub_lock; - spinlock_t queue_lock; - spinlock_t disable_lock; + raw_spinlock_t uvhub_lock; + raw_spinlock_t queue_lock; + raw_spinlock_t disable_lock; /* tunables */ int max_concurr; int max_concurr_const; @@ -815,15 +815,15 @@ static inline int atom_asr(short i, struct atomic_short *v) * to be lowered below the current 'v'. atomic_add_unless can only stop * on equal. */ -static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) +static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) { - spin_lock(lock); + raw_spin_lock(lock); if (atomic_read(v) >= u) { - spin_unlock(lock); + raw_spin_unlock(lock); return 0; } atomic_inc(v); - spin_unlock(lock); + raw_spin_unlock(lock); return 1; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b89bef95f63b..c3c1ad2fce5c 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; * ->ioapic_mutex * ->ioapic_lock */ +#ifdef CONFIG_X86_IO_APIC static DEFINE_MUTEX(acpi_ioapic_lock); +#endif /* -------------------------------------------------------------------------- Boot-time Configuration diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index cf89928dbd46..18b5ec2a71df 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(struct mp_chip_data *data) static inline bool ioapic_irqd_mask(struct irq_data *data) { /* If we are moving the irq we need to mask it */ - if (unlikely(irqd_is_setaffinity_pending(data))) { + if (unlikely(irqd_is_setaffinity_pending(data) && + !irqd_irq_inprogress(data))) { mask_ioapic_irq(data); return true; } diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c index c62e015b126c..0cc71257fca6 100644 --- a/arch/x86/kernel/asm-offsets.c +++ b/arch/x86/kernel/asm-offsets.c @@ -36,6 +36,7 @@ void common(void) { BLANK(); OFFSET(TASK_TI_flags, task_struct, thread_info.flags); + OFFSET(TASK_TI_preempt_lazy_count, task_struct, thread_info.preempt_lazy_count); OFFSET(TASK_addr_limit, task_struct, thread.addr_limit); BLANK(); @@ -91,4 +92,5 @@ void common(void) { BLANK(); DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); + DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); } diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 8ca5f8ad008e..edcbd18b3189 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c @@ -41,6 +41,8 @@ #include <linux/debugfs.h> #include <linux/irq_work.h> #include <linux/export.h> +#include <linux/jiffies.h> +#include <linux/swork.h> #include <linux/jump_label.h> #include <asm/processor.h> @@ -1306,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 status) static unsigned long check_interval = INITIAL_CHECK_INTERVAL; static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ -static DEFINE_PER_CPU(struct timer_list, mce_timer); +static DEFINE_PER_CPU(struct hrtimer, mce_timer); static unsigned long mce_adjust_timer_default(unsigned long interval) { @@ -1315,32 +1317,18 @@ static unsigned long mce_adjust_timer_default(unsigned long interval) static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; -static void __restart_timer(struct timer_list *t, unsigned long interval) +static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval) { - unsigned long when = jiffies + interval; - unsigned long flags; - - local_irq_save(flags); - - if (timer_pending(t)) { - if (time_before(when, t->expires)) - mod_timer(t, when); - } else { - t->expires = round_jiffies(when); - add_timer_on(t, smp_processor_id()); - } - - local_irq_restore(flags); + if (!interval) + return HRTIMER_NORESTART; + hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval))); + return HRTIMER_RESTART; } -static void mce_timer_fn(unsigned long data) +static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) { - struct timer_list *t = this_cpu_ptr(&mce_timer); - int cpu = smp_processor_id(); unsigned long iv; - WARN_ON(cpu != data); - iv = __this_cpu_read(mce_next_interval); if (mce_available(this_cpu_ptr(&cpu_info))) { @@ -1363,7 +1351,7 @@ static void mce_timer_fn(unsigned long data) done: __this_cpu_write(mce_next_interval, iv); - __restart_timer(t, iv); + return __restart_timer(timer, iv); } /* @@ -1371,7 +1359,7 @@ done: */ void mce_timer_kick(unsigned long interval) { - struct timer_list *t = this_cpu_ptr(&mce_timer); + struct hrtimer *t = this_cpu_ptr(&mce_timer); unsigned long iv = __this_cpu_read(mce_next_interval); __restart_timer(t, interval); @@ -1386,7 +1374,7 @@ static void mce_timer_delete_all(void) int cpu; for_each_online_cpu(cpu) - del_timer_sync(&per_cpu(mce_timer, cpu)); + hrtimer_cancel(&per_cpu(mce_timer, cpu)); } static void mce_do_trigger(struct work_struct *work) @@ -1396,6 +1384,56 @@ static void mce_do_trigger(struct work_struct *work) static DECLARE_WORK(mce_trigger_work, mce_do_trigger); +static void __mce_notify_work(struct swork_event *event) +{ + /* Not more than two messages every minute */ + static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); + + /* wake processes polling /dev/mcelog */ + wake_up_interruptible(&mce_chrdev_wait); + + /* + * There is no risk of missing notifications because + * work_pending is always cleared before the function is + * executed. + */ + if (mce_helper[0] && !work_pending(&mce_trigger_work)) + schedule_work(&mce_trigger_work); + + if (__ratelimit(&ratelimit)) + pr_info(HW_ERR "Machine check events logged\n"); +} + +#ifdef CONFIG_PREEMPT_RT_FULL +static bool notify_work_ready __read_mostly; +static struct swork_event notify_work; + +static int mce_notify_work_init(void) +{ + int err; + + err = swork_get(); + if (err) + return err; + + INIT_SWORK(¬ify_work, __mce_notify_work); + notify_work_ready = true; + return 0; +} + +static void mce_notify_work(void) +{ + if (notify_work_ready) + swork_queue(¬ify_work); +} +#else +static void mce_notify_work(void) +{ + __mce_notify_work(NULL); +} +static inline int mce_notify_work_init(void) { return 0; } +#endif + /* * Notify the user(s) about new machine check events. * Can be called from interrupt context, but not from machine check/NMI @@ -1403,19 +1441,8 @@ static DECLARE_WORK(mce_trigger_work, mce_do_trigger); */ int mce_notify_irq(void) { - /* Not more than two messages every minute */ - static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); - if (test_and_clear_bit(0, &mce_need_notify)) { - /* wake processes polling /dev/mcelog */ - wake_up_interruptible(&mce_chrdev_wait); - - if (mce_helper[0]) - schedule_work(&mce_trigger_work); - - if (__ratelimit(&ratelimit)) - pr_info(HW_ERR "Machine check events logged\n"); - + mce_notify_work(); return 1; } return 0; @@ -1721,7 +1748,7 @@ static void __mcheck_cpu_clear_vendor(struct cpuinfo_x86 *c) } } -static void mce_start_timer(unsigned int cpu, struct timer_list *t) +static void mce_start_timer(unsigned int cpu, struct hrtimer *t) { unsigned long iv = check_interval * HZ; @@ -1730,16 +1757,17 @@ static void mce_start_timer(unsigned int cpu, struct timer_list *t) per_cpu(mce_next_interval, cpu) = iv; - t->expires = round_jiffies(jiffies + iv); - add_timer_on(t, cpu); + hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), + 0, HRTIMER_MODE_REL_PINNED); } static void __mcheck_cpu_init_timer(void) { - struct timer_list *t = this_cpu_ptr(&mce_timer); + struct hrtimer *t = this_cpu_ptr(&mce_timer); unsigned int cpu = smp_processor_id(); - setup_pinned_timer(t, mce_timer_fn, cpu); + hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + t->function = mce_timer_fn; mce_start_timer(cpu, t); } @@ -2464,6 +2492,8 @@ static void mce_disable_cpu(void *h) if (!mce_available(raw_cpu_ptr(&cpu_info))) return; + hrtimer_cancel(this_cpu_ptr(&mce_timer)); + if (!(action & CPU_TASKS_FROZEN)) cmci_clear(); @@ -2486,6 +2516,7 @@ static void mce_reenable_cpu(void *h) if (b->init) wrmsrl(msr_ops.ctl(i), b->ctl); } + __mcheck_cpu_init_timer(); } /* Get notified when a cpu comes on/off. Be hotplug friendly. */ @@ -2493,7 +2524,6 @@ static int mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; - struct timer_list *t = &per_cpu(mce_timer, cpu); switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: @@ -2513,11 +2543,9 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_DOWN_PREPARE: smp_call_function_single(cpu, mce_disable_cpu, &action, 1); - del_timer_sync(t); break; case CPU_DOWN_FAILED: smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); - mce_start_timer(cpu, t); break; } @@ -2556,6 +2584,10 @@ static __init int mcheck_init_device(void) goto err_out; } + err = mce_notify_work_init(); + if (err) + goto err_out; + if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { err = -ENOMEM; goto err_out; diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 1f38d9a4d9de..053bf3b2ef39 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c @@ -127,6 +127,7 @@ void irq_ctx_init(int cpu) cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); } +#ifndef CONFIG_PREEMPT_RT_FULL void do_softirq_own_stack(void) { struct irq_stack *irqstk; @@ -143,6 +144,7 @@ void do_softirq_own_stack(void) call_on_stack(__do_softirq, isp); } +#endif bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) { diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c index bd7be8efdc4c..b3b0a7f7b1ca 100644 --- a/arch/x86/kernel/process_32.c +++ b/arch/x86/kernel/process_32.c @@ -35,6 +35,7 @@ #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kdebug.h> +#include <linux/highmem.h> #include <asm/pgtable.h> #include <asm/ldt.h> @@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) } EXPORT_SYMBOL_GPL(start_thread); +#ifdef CONFIG_PREEMPT_RT_FULL +static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) +{ + int i; + + /* + * Clear @prev's kmap_atomic mappings + */ + for (i = 0; i < prev_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + pte_t *ptep = kmap_pte - idx; + + kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); + } + /* + * Restore @next_p's kmap_atomic mappings + */ + for (i = 0; i < next_p->kmap_idx; i++) { + int idx = i + KM_TYPE_NR * smp_processor_id(); + + if (!pte_none(next_p->kmap_pte[i])) + set_pte(kmap_pte - idx, next_p->kmap_pte[i]); + } +} +#else +static inline void +switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } +#endif + /* * switch_to(x,y) should switch tasks from x to y. @@ -271,6 +301,8 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); + switch_kmaps(prev_p, next_p); + /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3f05c044720b..fe68afd37162 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vcpu) hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); apic->lapic_timer.timer.function = apic_timer_fn; + apic->lapic_timer.timer.irqsafe = 1; /* * APIC is created enabled. This will prevent kvm_lapic_set_base from diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 595f8149c0d9..31b15149f412 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5961,6 +5961,13 @@ int kvm_arch_init(void *opaque) goto out; } +#ifdef CONFIG_PREEMPT_RT_FULL + if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { + printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); + return -EOPNOTSUPP; + } +#endif + r = kvm_mmu_module_init(); if (r) goto out_free_percpu; diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index 6d18b70ed5a9..f752724c22e8 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c @@ -32,10 +32,11 @@ EXPORT_SYMBOL(kunmap); */ void *kmap_atomic_prot(struct page *page, pgprot_t prot) { + pte_t pte = mk_pte(page, prot); unsigned long vaddr; int idx, type; - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); if (!PageHighMem(page)) @@ -45,7 +46,10 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) idx = type + KM_TYPE_NR*smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); BUG_ON(!pte_none(*(kmap_pte-idx))); - set_pte(kmap_pte-idx, mk_pte(page, prot)); +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte-idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -88,6 +92,9 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); arch_flush_lazy_mmu_mode(); @@ -100,7 +107,7 @@ void __kunmap_atomic(void *kvaddr) #endif pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } EXPORT_SYMBOL(__kunmap_atomic); diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c index ada98b39b8ad..585f6829653b 100644 --- a/arch/x86/mm/iomap_32.c +++ b/arch/x86/mm/iomap_32.c @@ -56,6 +56,7 @@ EXPORT_SYMBOL_GPL(iomap_free); void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) { + pte_t pte = pfn_pte(pfn, prot); unsigned long vaddr; int idx, type; @@ -65,7 +66,12 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) type = kmap_atomic_idx_push(); idx = type + KM_TYPE_NR * smp_processor_id(); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); + WARN_ON(!pte_none(*(kmap_pte - idx))); + +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = pte; +#endif + set_pte(kmap_pte - idx, pte); arch_flush_lazy_mmu_mode(); return (void *)vaddr; @@ -113,6 +119,9 @@ iounmap_atomic(void __iomem *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ +#ifdef CONFIG_PREEMPT_RT_FULL + current->kmap_pte[type] = __pte(0); +#endif kpte_clear_flush(kmap_pte-idx, vaddr); kmap_atomic_idx_pop(); } diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index e3353c97d086..01664968555c 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) { unsigned int i, level; +#ifdef CONFIG_PREEMPT + /* + * Avoid wbinvd() because it causes latencies on all CPUs, + * regardless of any CPU isolation that may be in effect. + */ + unsigned long do_wbinvd = 0; +#else unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */ +#endif BUG_ON(irqs_disabled()); diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 9e42842e924a..5398f97172f9 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -748,9 +748,9 @@ static void destination_plugged(struct bau_desc *bau_desc, quiesce_local_uvhub(hmaster); - spin_lock(&hmaster->queue_lock); + raw_spin_lock(&hmaster->queue_lock); reset_with_ipi(&bau_desc->distribution, bcp); - spin_unlock(&hmaster->queue_lock); + raw_spin_unlock(&hmaster->queue_lock); end_uvhub_quiesce(hmaster); @@ -770,9 +770,9 @@ static void destination_timeout(struct bau_desc *bau_desc, quiesce_local_uvhub(hmaster); - spin_lock(&hmaster->queue_lock); + raw_spin_lock(&hmaster->queue_lock); reset_with_ipi(&bau_desc->distribution, bcp); - spin_unlock(&hmaster->queue_lock); + raw_spin_unlock(&hmaster->queue_lock); end_uvhub_quiesce(hmaster); @@ -793,7 +793,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) cycles_t tm1; hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); + raw_spin_lock(&hmaster->disable_lock); if (!bcp->baudisabled) { stat->s_bau_disabled++; tm1 = get_cycles(); @@ -806,7 +806,7 @@ static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) } } } - spin_unlock(&hmaster->disable_lock); + raw_spin_unlock(&hmaster->disable_lock); } static void count_max_concurr(int stat, struct bau_control *bcp, @@ -869,7 +869,7 @@ static void record_send_stats(cycles_t time1, cycles_t time2, */ static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) { - spinlock_t *lock = &hmaster->uvhub_lock; + raw_spinlock_t *lock = &hmaster->uvhub_lock; atomic_t *v; v = &hmaster->active_descriptor_count; @@ -1002,7 +1002,7 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) struct bau_control *hmaster; hmaster = bcp->uvhub_master; - spin_lock(&hmaster->disable_lock); + raw_spin_lock(&hmaster->disable_lock); if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { stat->s_bau_reenabled++; for_each_present_cpu(tcpu) { @@ -1014,10 +1014,10 @@ static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) tbcp->period_giveups = 0; } } - spin_unlock(&hmaster->disable_lock); + raw_spin_unlock(&hmaster->disable_lock); return 0; } - spin_unlock(&hmaster->disable_lock); + raw_spin_unlock(&hmaster->disable_lock); return -1; } @@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables(void) bcp->cong_reps = congested_reps; bcp->disabled_period = sec_2_cycles(disabled_period); bcp->giveup_limit = giveup_limit; - spin_lock_init(&bcp->queue_lock); - spin_lock_init(&bcp->uvhub_lock); - spin_lock_init(&bcp->disable_lock); + raw_spin_lock_init(&bcp->queue_lock); + raw_spin_lock_init(&bcp->uvhub_lock); + raw_spin_lock_init(&bcp->disable_lock); } } diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index b333fc45f9ec..8b85916e6986 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -57,7 +57,7 @@ static DEFINE_PER_CPU(struct clock_event_device, cpu_ced); /* There is one of these allocated per node */ struct uv_rtc_timer_head { - spinlock_t lock; + raw_spinlock_t lock; /* next cpu waiting for timer, local node relative: */ int next_cpu; /* number of cpus on this node: */ @@ -177,7 +177,7 @@ static __init int uv_rtc_allocate_timers(void) uv_rtc_deallocate_timers(); return -ENOMEM; } - spin_lock_init(&head->lock); + raw_spin_lock_init(&head->lock); head->ncpus = uv_blade_nr_possible_cpus(bid); head->next_cpu = -1; blade_info[bid] = head; @@ -231,7 +231,7 @@ static int uv_rtc_set_timer(int cpu, u64 expires) unsigned long flags; int next_cpu; - spin_lock_irqsave(&head->lock, flags); + raw_spin_lock_irqsave(&head->lock, flags); next_cpu = head->next_cpu; *t = expires; @@ -243,12 +243,12 @@ static int uv_rtc_set_timer(int cpu, u64 expires) if (uv_setup_intr(cpu, expires)) { *t = ULLONG_MAX; uv_rtc_find_next_timer(head, pnode); - spin_unlock_irqrestore(&head->lock, flags); + raw_spin_unlock_irqrestore(&head->lock, flags); return -ETIME; } } - spin_unlock_irqrestore(&head->lock, flags); + raw_spin_unlock_irqrestore(&head->lock, flags); return 0; } @@ -267,7 +267,7 @@ static int uv_rtc_unset_timer(int cpu, int force) unsigned long flags; int rc = 0; - spin_lock_irqsave(&head->lock, flags); + raw_spin_lock_irqsave(&head->lock, flags); if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) rc = 1; @@ -279,7 +279,7 @@ static int uv_rtc_unset_timer(int cpu, int force) uv_rtc_find_next_timer(head, pnode); } - spin_unlock_irqrestore(&head->lock, flags); + raw_spin_unlock_irqrestore(&head->lock, flags); return rc; } @@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, int force) static cycle_t uv_read_rtc(struct clocksource *cs) { unsigned long offset; + cycle_t cycles; + preempt_disable(); if (uv_get_min_hub_revision_id() == 1) offset = 0; else offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); + cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); + preempt_enable(); + + return cycles; } /* diff --git a/block/blk-core.c b/block/blk-core.c index 95379fc83805..e531da0c9232 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q, struct request *rq) INIT_LIST_HEAD(&rq->queuelist); INIT_LIST_HEAD(&rq->timeout_list); +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif rq->cpu = -1; rq->q = q; rq->__sector = (sector_t) -1; @@ -233,7 +236,7 @@ EXPORT_SYMBOL(blk_start_queue_async); **/ void blk_start_queue(struct request_queue *q) { - WARN_ON(!in_interrupt() && !irqs_disabled()); + WARN_ON_NONRT(!in_interrupt() && !irqs_disabled()); queue_flag_clear(QUEUE_FLAG_STOPPED, q); __blk_run_queue(q); @@ -659,7 +662,7 @@ int blk_queue_enter(struct request_queue *q, bool nowait) if (nowait) return -EBUSY; - ret = wait_event_interruptible(q->mq_freeze_wq, + ret = swait_event_interruptible(q->mq_freeze_wq, !atomic_read(&q->mq_freeze_depth) || blk_queue_dying(q)); if (blk_queue_dying(q)) @@ -679,7 +682,7 @@ static void blk_queue_usage_counter_release(struct percpu_ref *ref) struct request_queue *q = container_of(ref, struct request_queue, q_usage_counter); - wake_up_all(&q->mq_freeze_wq); + swake_up_all(&q->mq_freeze_wq); } static void blk_rq_timed_out_timer(unsigned long data) @@ -748,7 +751,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) q->bypass_depth = 1; __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); - init_waitqueue_head(&q->mq_freeze_wq); + init_swait_queue_head(&q->mq_freeze_wq); /* * Init percpu_ref in atomic mode so that it's faster to shutdown. @@ -3200,7 +3203,7 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth, blk_run_queue_async(q); else __blk_run_queue(q); - spin_unlock(q->queue_lock); + spin_unlock_irq(q->queue_lock); } static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) @@ -3248,7 +3251,6 @@ EXPORT_SYMBOL(blk_check_plugged); void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) { struct request_queue *q; - unsigned long flags; struct request *rq; LIST_HEAD(list); unsigned int depth; @@ -3268,11 +3270,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) q = NULL; depth = 0; - /* - * Save and disable interrupts here, to avoid doing it for every - * queue lock we have to take. - */ - local_irq_save(flags); while (!list_empty(&list)) { rq = list_entry_rq(list.next); list_del_init(&rq->queuelist); @@ -3285,7 +3282,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) queue_unplugged(q, depth, from_schedule); q = rq->q; depth = 0; - spin_lock(q->queue_lock); + spin_lock_irq(q->queue_lock); } /* @@ -3312,8 +3309,6 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) */ if (q) queue_unplugged(q, depth, from_schedule); - - local_irq_restore(flags); } void blk_finish_plug(struct blk_plug *plug) diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 381cb50a673c..dc8785233d94 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c @@ -7,6 +7,7 @@ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> +#include <linux/delay.h> #include "blk.h" @@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_struct *work) spin_unlock(q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); spin_lock_irqsave_nested(&ioc->lock, flags, 1); } } @@ -187,7 +188,7 @@ retry: spin_unlock(icq->q->queue_lock); } else { spin_unlock_irqrestore(&ioc->lock, flags); - cpu_relax(); + cpu_chill(); goto retry; } } diff --git a/block/blk-mq.c b/block/blk-mq.c index 7b597ec4e9c5..48c9652a701c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { - wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); + swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); } /* @@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct request_queue *q) WARN_ON_ONCE(freeze_depth < 0); if (!freeze_depth) { percpu_ref_reinit(&q->q_usage_counter); - wake_up_all(&q->mq_freeze_wq); + swake_up_all(&q->mq_freeze_wq); } } EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); @@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_queue *q) * dying, we need to ensure that processes currently waiting on * the queue are notified as well. */ - wake_up_all(&q->mq_freeze_wq); + swake_up_all(&q->mq_freeze_wq); } bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) @@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, rq->resid_len = 0; rq->sense = NULL; +#ifdef CONFIG_PREEMPT_RT_FULL + INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); +#endif INIT_LIST_HEAD(&rq->timeout_list); rq->timeout = 0; @@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *rq, int error) } EXPORT_SYMBOL(blk_mq_end_request); +#ifdef CONFIG_PREEMPT_RT_FULL + +void __blk_mq_complete_request_remote_work(struct work_struct *work) +{ + struct request *rq = container_of(work, struct request, work); + + rq->q->softirq_done_fn(rq); +} + +#else + static void __blk_mq_complete_request_remote(void *data) { struct request *rq = data; @@ -352,6 +366,8 @@ static void __blk_mq_complete_request_remote(void *data) rq->q->softirq_done_fn(rq); } +#endif + static void blk_mq_ipi_complete_request(struct request *rq) { struct blk_mq_ctx *ctx = rq->mq_ctx; @@ -363,19 +379,23 @@ static void blk_mq_ipi_complete_request(struct request *rq) return; } - cpu = get_cpu(); + cpu = get_cpu_light(); if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) shared = cpus_share_cache(cpu, ctx->cpu); if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { +#ifdef CONFIG_PREEMPT_RT_FULL + schedule_work_on(ctx->cpu, &rq->work); +#else rq->csd.func = __blk_mq_complete_request_remote; rq->csd.info = rq; rq->csd.flags = 0; smp_call_function_single_async(ctx->cpu, &rq->csd); +#endif } else { rq->q->softirq_done_fn(rq); } - put_cpu(); + put_cpu_light(); } static void __blk_mq_complete_request(struct request *rq) @@ -906,14 +926,14 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async) return; if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) { - int cpu = get_cpu(); + int cpu = get_cpu_light(); if (cpumask_test_cpu(cpu, hctx->cpumask)) { __blk_mq_run_hw_queue(hctx); - put_cpu(); + put_cpu_light(); return; } - put_cpu(); + put_cpu_light(); } kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work); diff --git a/block/blk-mq.h b/block/blk-mq.h index e5d25249028c..1e846b842eab 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, */ static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) { - return __blk_mq_get_ctx(q, get_cpu()); + return __blk_mq_get_ctx(q, get_cpu_light()); } static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) { - put_cpu(); + put_cpu_light(); } struct blk_mq_alloc_data { diff --git a/block/blk-softirq.c b/block/blk-softirq.c index 06cf9807f49a..c40342643ca0 100644 --- a/block/blk-softirq.c +++ b/block/blk-softirq.c @@ -51,6 +51,7 @@ static void trigger_softirq(void *data) raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } /* @@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned int cpu) this_cpu_ptr(&blk_cpu_done)); raise_softirq_irqoff(BLOCK_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); return 0; } @@ -141,6 +143,7 @@ do_local: goto do_local; local_irq_restore(flags); + preempt_check_resched_rt(); } /** diff --git a/block/bounce.c b/block/bounce.c index 1cb5dd3a5da1..2f1ec8a67cbe 100644 --- a/block/bounce.c +++ b/block/bounce.c @@ -55,11 +55,11 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom) unsigned long flags; unsigned char *vto; - local_irq_save(flags); + local_irq_save_nort(flags); vto = kmap_atomic(to->bv_page); memcpy(vto + to->bv_offset, vfrom, to->bv_len); kunmap_atomic(vto); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #else /* CONFIG_HIGHMEM */ diff --git a/crypto/algapi.c b/crypto/algapi.c index 1fad2a6b3bbb..ecb7315426a9 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -719,13 +719,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2); int crypto_register_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_register(&crypto_chain, nb); + return srcu_notifier_chain_register(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_register_notifier); int crypto_unregister_notifier(struct notifier_block *nb) { - return blocking_notifier_chain_unregister(&crypto_chain, nb); + return srcu_notifier_chain_unregister(&crypto_chain, nb); } EXPORT_SYMBOL_GPL(crypto_unregister_notifier); diff --git a/crypto/api.c b/crypto/api.c index bbc147cb5dec..bc1a848f02ec 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -31,7 +31,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list); DECLARE_RWSEM(crypto_alg_sem); EXPORT_SYMBOL_GPL(crypto_alg_sem); -BLOCKING_NOTIFIER_HEAD(crypto_chain); +SRCU_NOTIFIER_HEAD(crypto_chain); EXPORT_SYMBOL_GPL(crypto_chain); static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); @@ -236,10 +236,10 @@ int crypto_probing_notify(unsigned long val, void *v) { int ok; - ok = blocking_notifier_call_chain(&crypto_chain, val, v); + ok = srcu_notifier_call_chain(&crypto_chain, val, v); if (ok == NOTIFY_DONE) { request_module("cryptomgr"); - ok = blocking_notifier_call_chain(&crypto_chain, val, v); + ok = srcu_notifier_call_chain(&crypto_chain, val, v); } return ok; diff --git a/crypto/internal.h b/crypto/internal.h index 7eefcdb00227..0ecc7f5a2f40 100644 --- a/crypto/internal.h +++ b/crypto/internal.h @@ -47,7 +47,7 @@ struct crypto_larval { extern struct list_head crypto_alg_list; extern struct rw_semaphore crypto_alg_sem; -extern struct blocking_notifier_head crypto_chain; +extern struct srcu_notifier_head crypto_chain; #ifdef CONFIG_PROC_FS void __init crypto_init_proc(void); @@ -146,7 +146,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg) static inline void crypto_notify(unsigned long val, void *v) { - blocking_notifier_call_chain(&crypto_chain, val, v); + srcu_notifier_call_chain(&crypto_chain, val, v); } #endif /* _CRYPTO_INTERNAL_H */ diff --git a/drivers/acpi/acpica/acglobal.h b/drivers/acpi/acpica/acglobal.h index 750fa824d42c..441edf51484a 100644 --- a/drivers/acpi/acpica/acglobal.h +++ b/drivers/acpi/acpica/acglobal.h @@ -116,7 +116,7 @@ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pending); * interrupt level */ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ -ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ +ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); /* Mutex for _OSI support */ diff --git a/drivers/acpi/acpica/hwregs.c b/drivers/acpi/acpica/hwregs.c index 3b7fb99362b6..696bf8e62afb 100644 --- a/drivers/acpi/acpica/hwregs.c +++ b/drivers/acpi/acpica/hwregs.c @@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(void) ACPI_BITMASK_ALL_FIXED_STATUS, ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* Clear the fixed events in PM1 A/B */ status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, ACPI_BITMASK_ALL_FIXED_STATUS); - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); if (ACPI_FAILURE(status)) { goto exit; diff --git a/drivers/acpi/acpica/hwxface.c b/drivers/acpi/acpica/hwxface.c index 98c26ff39409..6e236f2ea791 100644 --- a/drivers/acpi/acpica/hwxface.c +++ b/drivers/acpi/acpica/hwxface.c @@ -373,7 +373,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) return_ACPI_STATUS(AE_BAD_PARAMETER); } - lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); + raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); /* * At this point, we know that the parent register is one of the @@ -434,7 +434,7 @@ acpi_status acpi_write_bit_register(u32 register_id, u32 value) unlock_and_exit: - acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); + raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); return_ACPI_STATUS(status); } diff --git a/drivers/acpi/acpica/utmutex.c b/drivers/acpi/acpica/utmutex.c index 15073375bd00..357e7ca5a587 100644 --- a/drivers/acpi/acpica/utmutex.c +++ b/drivers/acpi/acpica/utmutex.c @@ -88,7 +88,7 @@ acpi_status acpi_ut_mutex_initialize(void) return_ACPI_STATUS (status); } - status = acpi_os_create_lock (&acpi_gbl_hardware_lock); + status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); if (ACPI_FAILURE (status)) { return_ACPI_STATUS (status); } @@ -145,7 +145,7 @@ void acpi_ut_mutex_terminate(void) /* Delete the spinlocks */ acpi_os_delete_lock(acpi_gbl_gpe_lock); - acpi_os_delete_lock(acpi_gbl_hardware_lock); + acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); acpi_os_delete_lock(acpi_gbl_reference_count_lock); /* Delete the reader/writer lock */ diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 051b6158d1b7..7ad293bef6ed 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, unsigned long flags; unsigned int consumed; - local_irq_save(flags); + local_irq_save_nort(flags); consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); - local_irq_restore(flags); + local_irq_restore_nort(flags); return consumed; } @@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) unsigned long flags; /* FIXME: use a bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_queued_cmd *qc) do_write); kunmap_atomic(buf); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, @@ -864,7 +864,7 @@ next_sg: unsigned long flags; /* FIXME: use bounce buffer */ - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page); /* do the actual data transfer */ @@ -872,7 +872,7 @@ next_sg: count, rw); kunmap_atomic(buf); - local_irq_restore(flags); + local_irq_restore_nort(flags); } else { buf = page_address(page); consumed = ap->ops->sff_data_xfer(dev, buf + offset, diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 4b5cd3a7b2b6..fa8329ad79fd 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -118,12 +118,19 @@ ssize_t zcomp_available_show(const char *comp, char *buf) struct zcomp_strm *zcomp_stream_get(struct zcomp *comp) { - return *get_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *this_cpu_ptr(comp->stream); + spin_lock(&zstrm->zcomp_lock); + return zstrm; } void zcomp_stream_put(struct zcomp *comp) { - put_cpu_ptr(comp->stream); + struct zcomp_strm *zstrm; + + zstrm = *this_cpu_ptr(comp->stream); + spin_unlock(&zstrm->zcomp_lock); } int zcomp_compress(struct zcomp_strm *zstrm, @@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct zcomp *comp, pr_err("Can't allocate a compression stream\n"); return NOTIFY_BAD; } + spin_lock_init(&zstrm->zcomp_lock); *per_cpu_ptr(comp->stream, cpu) = zstrm; break; case CPU_DEAD: diff --git a/drivers/block/zram/zcomp.h b/drivers/block/zram/zcomp.h index 478cac2ed465..f7a6efdc3285 100644 --- a/drivers/block/zram/zcomp.h +++ b/drivers/block/zram/zcomp.h @@ -14,6 +14,7 @@ struct zcomp_strm { /* compression/decompression buffer */ void *buffer; struct crypto_comp *tfm; + spinlock_t zcomp_lock; }; /* dynamic per-device compression frontend */ diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index c9914d653968..2038d138f286 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize) goto out_error; } + zram_meta_init_table_locks(meta, disksize); + return meta; out_error: @@ -575,28 +577,28 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) struct zram_meta *meta = zram->meta; unsigned long handle; unsigned int size; + struct zcomp_strm *zstrm; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); handle = meta->table[index].handle; size = zram_get_obj_size(meta, index); if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); memset(mem, 0, PAGE_SIZE); return 0; } + zstrm = zcomp_stream_get(zram->comp); cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); if (size == PAGE_SIZE) { memcpy(mem, cmem, PAGE_SIZE); } else { - struct zcomp_strm *zstrm = zcomp_stream_get(zram->comp); - ret = zcomp_decompress(zstrm, cmem, size, mem); - zcomp_stream_put(zram->comp); } zs_unmap_object(meta->mem_pool, handle); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zcomp_stream_put(zram->comp); + zram_unlock_table(&meta->table[index]); /* Should NEVER happen. Return bio error if it does. */ if (unlikely(ret)) { @@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, struct zram_meta *meta = zram->meta; page = bvec->bv_page; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); if (unlikely(!meta->table[index].handle) || zram_test_flag(meta, index, ZRAM_ZERO)) { - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); handle_zero_page(bvec); return 0; } - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); if (is_partial_io(bvec)) /* Use a temporary buffer to decompress the page */ @@ -700,10 +702,10 @@ compress_again: if (user_mem) kunmap_atomic(user_mem); /* Free memory associated with this sector now. */ - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); zram_set_flag(meta, index, ZRAM_ZERO); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.zero_pages); ret = 0; @@ -794,12 +796,12 @@ compress_again: * Free memory associated with this sector * before overwriting unused sectors. */ - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); meta->table[index].handle = handle; zram_set_obj_size(meta, index, clen); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); /* Update stats */ atomic64_add(clen, &zram->stats.compr_data_size); @@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram *zram, u32 index, } while (n >= PAGE_SIZE) { - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.notify_free); index++; n -= PAGE_SIZE; @@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct block_device *bdev, zram = bdev->bd_disk->private_data; meta = zram->meta; - bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); + zram_lock_table(&meta->table[index]); zram_free_page(zram, index); - bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); + zram_unlock_table(&meta->table[index]); atomic64_inc(&zram->stats.notify_free); } diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h index 74fcf10da374..fd4020c99b9e 100644 --- a/drivers/block/zram/zram_drv.h +++ b/drivers/block/zram/zram_drv.h @@ -73,6 +73,9 @@ enum zram_pageflags { struct zram_table_entry { unsigned long handle; unsigned long value; +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t lock; +#endif }; struct zram_stats { @@ -120,4 +123,42 @@ struct zram { */ bool claim; /* Protected by bdev->bd_mutex */ }; + +#ifndef CONFIG_PREEMPT_RT_BASE +static inline void zram_lock_table(struct zram_table_entry *table) +{ + bit_spin_lock(ZRAM_ACCESS, &table->value); +} + +static inline void zram_unlock_table(struct zram_table_entry *table) +{ + bit_spin_unlock(ZRAM_ACCESS, &table->value); +} + +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { } +#else /* CONFIG_PREEMPT_RT_BASE */ +static inline void zram_lock_table(struct zram_table_entry *table) +{ + spin_lock(&table->lock); + __set_bit(ZRAM_ACCESS, &table->value); +} + +static inline void zram_unlock_table(struct zram_table_entry *table) +{ + __clear_bit(ZRAM_ACCESS, &table->value); + spin_unlock(&table->lock); +} + +static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) +{ + size_t num_pages = disksize >> PAGE_SHIFT; + size_t index; + + for (index = 0; index < num_pages; index++) { + spinlock_t *lock = &meta->table[index].lock; + spin_lock_init(lock); + } +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + #endif diff --git a/drivers/char/random.c b/drivers/char/random.c index 08d1dd58c0d2..25ee319dc8e3 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -262,6 +262,7 @@ #include <linux/syscalls.h> #include <linux/completion.h> #include <linux/uuid.h> +#include <linux/locallock.h> #include <crypto/chacha20.h> #include <asm/processor.h> @@ -1028,8 +1029,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) } sample; long delta, delta2, delta3; - preempt_disable(); - sample.jiffies = jiffies; sample.cycles = random_get_entropy(); sample.num = num; @@ -1070,7 +1069,6 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) */ credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); } - preempt_enable(); } void add_input_randomness(unsigned int type, unsigned int code, @@ -1123,28 +1121,27 @@ static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) return *(ptr + f->reg_idx++); } -void add_interrupt_randomness(int irq, int irq_flags) +void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) { struct entropy_store *r; struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); - struct pt_regs *regs = get_irq_regs(); unsigned long now = jiffies; cycles_t cycles = random_get_entropy(); __u32 c_high, j_high; - __u64 ip; unsigned long seed; int credit = 0; if (cycles == 0) - cycles = get_reg(fast_pool, regs); + cycles = get_reg(fast_pool, NULL); c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; j_high = (sizeof(now) > 4) ? now >> 32 : 0; fast_pool->pool[0] ^= cycles ^ j_high ^ irq; fast_pool->pool[1] ^= now ^ c_high; - ip = regs ? instruction_pointer(regs) : _RET_IP_; + if (!ip) + ip = _RET_IP_; fast_pool->pool[2] ^= ip; fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : - get_reg(fast_pool, regs); + get_reg(fast_pool, NULL); fast_mix(fast_pool); add_interrupt_bench(cycles); @@ -2056,6 +2053,7 @@ struct batched_entropy { * goal of being quite fast and not depleting entropy. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long); +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock); unsigned long get_random_long(void) { unsigned long ret; @@ -2064,13 +2062,13 @@ unsigned long get_random_long(void) if (arch_get_random_long(&ret)) return ret; - batch = &get_cpu_var(batched_entropy_long); + batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long); if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) { extract_crng((u8 *)batch->entropy_long); batch->position = 0; } ret = batch->entropy_long[batch->position++]; - put_cpu_var(batched_entropy_long); + put_locked_var(batched_entropy_long_lock, batched_entropy_long); return ret; } EXPORT_SYMBOL(get_random_long); @@ -2082,6 +2080,8 @@ unsigned int get_random_int(void) } #else static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int); +static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock); + unsigned int get_random_int(void) { unsigned int ret; @@ -2090,13 +2090,13 @@ unsigned int get_random_int(void) if (arch_get_random_int(&ret)) return ret; - batch = &get_cpu_var(batched_entropy_int); + batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int); if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) { extract_crng((u8 *)batch->entropy_int); batch->position = 0; } ret = batch->entropy_int[batch->position++]; - put_cpu_var(batched_entropy_int); + put_locked_var(batched_entropy_int_lock, batched_entropy_int); return ret; } #endif diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 4da2af9694a2..5b6f57f500b8 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -23,8 +23,7 @@ * this 32 bit free-running counter. the second channel is not used. * * - The third channel may be used to provide a 16-bit clockevent - * source, used in either periodic or oneshot mode. This runs - * at 32 KiHZ, and can handle delays of up to two seconds. + * source, used in either periodic or oneshot mode. * * A boot clocksource and clockevent source are also currently needed, * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so @@ -74,6 +73,8 @@ static struct clocksource clksrc = { struct tc_clkevt_device { struct clock_event_device clkevt; struct clk *clk; + bool clk_enabled; + u32 freq; void __iomem *regs; }; @@ -82,15 +83,26 @@ static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt) return container_of(clkevt, struct tc_clkevt_device, clkevt); } -/* For now, we always use the 32K clock ... this optimizes for NO_HZ, - * because using one of the divided clocks would usually mean the - * tick rate can never be less than several dozen Hz (vs 0.5 Hz). - * - * A divided clock could be good for high resolution timers, since - * 30.5 usec resolution can seem "low". - */ static u32 timer_clock; +static void tc_clk_disable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + clk_disable(tcd->clk); + tcd->clk_enabled = false; +} + +static void tc_clk_enable(struct clock_event_device *d) +{ + struct tc_clkevt_device *tcd = to_tc_clkevt(d); + + if (tcd->clk_enabled) + return; + clk_enable(tcd->clk); + tcd->clk_enabled = true; +} + static int tc_shutdown(struct clock_event_device *d) { struct tc_clkevt_device *tcd = to_tc_clkevt(d); @@ -98,8 +110,14 @@ static int tc_shutdown(struct clock_event_device *d) __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); + return 0; +} + +static int tc_shutdown_clk_off(struct clock_event_device *d) +{ + tc_shutdown(d); if (!clockevent_state_detached(d)) - clk_disable(tcd->clk); + tc_clk_disable(d); return 0; } @@ -112,9 +130,9 @@ static int tc_set_oneshot(struct clock_event_device *d) if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) tc_shutdown(d); - clk_enable(tcd->clk); + tc_clk_enable(d); - /* slow clock, count up to RC, then irq and stop */ + /* count up to RC, then irq and stop */ __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -134,12 +152,12 @@ static int tc_set_periodic(struct clock_event_device *d) /* By not making the gentime core emulate periodic mode on top * of oneshot, we get lower overhead and improved accuracy. */ - clk_enable(tcd->clk); + tc_clk_enable(d); - /* slow clock, count up to RC, then irq and restart */ + /* count up to RC, then irq and restart */ __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); /* Enable clock and interrupts on RC compare */ __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); @@ -166,9 +184,13 @@ static struct tc_clkevt_device clkevt = { .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, /* Should be lower than at91rm9200's system timer */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK .rating = 125, +#else + .rating = 200, +#endif .set_next_event = tc_next_event, - .set_state_shutdown = tc_shutdown, + .set_state_shutdown = tc_shutdown_clk_off, .set_state_periodic = tc_set_periodic, .set_state_oneshot = tc_set_oneshot, }, @@ -188,8 +210,9 @@ static irqreturn_t ch2_irq(int irq, void *handle) return IRQ_NONE; } -static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) +static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) { + unsigned divisor = atmel_tc_divisors[divisor_idx]; int ret; struct clk *t2_clk = tc->clk[2]; int irq = tc->irq[2]; @@ -210,7 +233,11 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) clkevt.regs = tc->regs; clkevt.clk = t2_clk; - timer_clock = clk32k_divisor_idx; + timer_clock = divisor_idx; + if (!divisor) + clkevt.freq = 32768; + else + clkevt.freq = clk_get_rate(t2_clk) / divisor; clkevt.clkevt.cpumask = cpumask_of(0); @@ -221,7 +248,7 @@ static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) return ret; } - clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); + clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); return ret; } @@ -358,7 +385,11 @@ static int __init tcb_clksrc_init(void) goto err_disable_t1; /* channel 2: periodic and oneshot timer support */ +#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ret = setup_clkevents(tc, clk32k_divisor_idx); +#else + ret = setup_clkevents(tc, best_divisor_idx); +#endif if (ret) goto err_unregister_clksrc; diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index 6555821bbdae..93288849b2bd 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c @@ -46,6 +46,7 @@ struct pit_data { u32 cycle; u32 cnt; unsigned int irq; + bool irq_requested; struct clk *mck; }; @@ -96,15 +97,29 @@ static int pit_clkevt_shutdown(struct clock_event_device *dev) /* disable irq, leaving the clocksource active */ pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); + if (data->irq_requested) { + free_irq(data->irq, data); + data->irq_requested = false; + } return 0; } +static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id); /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ static int pit_clkevt_set_periodic(struct clock_event_device *dev) { struct pit_data *data = clkevt_to_pit_data(dev); + int ret; + + ret = request_irq(data->irq, at91sam926x_pit_interrupt, + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, + "at91_tick", data); + if (ret) + panic(pr_fmt("Unable to setup IRQ\n")); + + data->irq_requested = true; /* update clocksource counter */ data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); @@ -230,15 +245,6 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node) return ret; } - /* Set up irq handler */ - ret = request_irq(data->irq, at91sam926x_pit_interrupt, - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, - "at91_tick", data); - if (ret) { - pr_err("Unable to setup IRQ\n"); - return ret; - } - /* Set up and register clockevents */ data->clkevt.name = "pit"; data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c index e90ab5b63a90..9e124087c55f 100644 --- a/drivers/clocksource/timer-atmel-st.c +++ b/drivers/clocksource/timer-atmel-st.c @@ -115,18 +115,29 @@ static void clkdev32k_disable_and_flush_irq(void) last_crtr = read_CRTR(); } +static int atmel_st_irq; + static int clkevt32k_shutdown(struct clock_event_device *evt) { clkdev32k_disable_and_flush_irq(); irqmask = 0; regmap_write(regmap_st, AT91_ST_IER, irqmask); + free_irq(atmel_st_irq, regmap_st); return 0; } static int clkevt32k_set_oneshot(struct clock_event_device *dev) { + int ret; + clkdev32k_disable_and_flush_irq(); + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, + "at91_tick", regmap_st); + if (ret) + panic(pr_fmt("Unable to setup IRQ\n")); + /* * ALM for oneshot irqs, set by next_event() * before 32 seconds have passed. @@ -139,8 +150,16 @@ static int clkevt32k_set_oneshot(struct clock_event_device *dev) static int clkevt32k_set_periodic(struct clock_event_device *dev) { + int ret; + clkdev32k_disable_and_flush_irq(); + ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, + IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, + "at91_tick", regmap_st); + if (ret) + panic(pr_fmt("Unable to setup IRQ\n")); + /* PIT for periodic irqs; fixed rate of 1/HZ */ irqmask = AT91_ST_PITS; regmap_write(regmap_st, AT91_ST_PIMR, timer_latch); @@ -198,7 +217,7 @@ static int __init atmel_st_timer_init(struct device_node *node) { struct clk *sclk; unsigned int sclk_rate, val; - int irq, ret; + int ret; regmap_st = syscon_node_to_regmap(node); if (IS_ERR(regmap_st)) { @@ -212,21 +231,12 @@ static int __init atmel_st_timer_init(struct device_node *node) regmap_read(regmap_st, AT91_ST_SR, &val); /* Get the interrupts property */ - irq = irq_of_parse_and_map(node, 0); - if (!irq) { + atmel_st_irq = irq_of_parse_and_map(node, 0); + if (!atmel_st_irq) { pr_err("Unable to get IRQ from DT\n"); return -EINVAL; } - /* Make IRQs happen for the system timer */ - ret = request_irq(irq, at91rm9200_timer_interrupt, - IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, - "at91_tick", regmap_st); - if (ret) { - pr_err("Unable to setup IRQ\n"); - return ret; - } - sclk = of_clk_get(node, 0); if (IS_ERR(sclk)) { pr_err("Unable to get slow clock\n"); diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c index a782ce87715c..19d265948526 100644 --- a/drivers/connector/cn_proc.c +++ b/drivers/connector/cn_proc.c @@ -32,6 +32,7 @@ #include <linux/pid_namespace.h> #include <linux/cn_proc.h> +#include <linux/locallock.h> /* * Size of a cn_msg followed by a proc_event structure. Since the @@ -54,10 +55,11 @@ static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; /* proc_event_counts is used as the sequence number of the netlink message */ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; +static DEFINE_LOCAL_IRQ_LOCK(send_msg_lock); static inline void send_msg(struct cn_msg *msg) { - preempt_disable(); + local_lock(send_msg_lock); msg->seq = __this_cpu_inc_return(proc_event_counts) - 1; ((struct proc_event *)msg->data)->cpu = smp_processor_id(); @@ -70,7 +72,7 @@ static inline void send_msg(struct cn_msg *msg) */ cn_netlink_send(msg, 0, CN_IDX_PROC, GFP_NOWAIT); - preempt_enable(); + local_unlock(send_msg_lock); } void proc_fork_connector(struct task_struct *task) diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index adbd1de1cea5..1fac5074f2cf 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 @@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI config X86_POWERNOW_K8 tristate "AMD Opteron/Athlon64 PowerNow!" - depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ + depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE help This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. Support for K10 and newer processors is now in acpi-cpufreq. diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2117f172d7a2..96c15501b0c8 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_params *params, if (ret) return ret; +#ifndef CONFIG_PREEMPT_RT_BASE trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); +#endif i915_gem_execbuffer_move_to_active(vmas, params->request); diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 755d78832a66..97fb03dc4971 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c @@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) +#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 02908e37c228..05c0480576e1 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_disable_rt(); /* Get optional system timestamp before query. */ if (stime) @@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_enable_rt(); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index f8efd20e4a90..5abe6e027888 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -12134,7 +12134,7 @@ void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe) struct intel_crtc *intel_crtc = to_intel_crtc(crtc); struct intel_flip_work *work; - WARN_ON(!in_interrupt()); + WARN_ON_NONRT(!in_interrupt()); if (crtc == NULL) return; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 64f4e2e18594..aebf1e9eabcb 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -35,6 +35,7 @@ #include <drm/drm_rect.h> #include <drm/drm_atomic.h> #include <drm/drm_plane_helper.h> +#include <linux/locallock.h> #include "intel_drv.h" #include "intel_frontbuffer.h" #include <drm/i915_drm.h> @@ -65,6 +66,8 @@ int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 1000 * adjusted_mode->crtc_htotal); } +static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); + /** * intel_pipe_update_start() - start update of a set of display registers * @crtc: the crtc of which the registers are going to be updated @@ -98,7 +101,7 @@ void intel_pipe_update_start(struct intel_crtc *crtc) min = vblank_start - intel_usecs_to_scanlines(adjusted_mode, 100); max = vblank_start - 1; - local_irq_disable(); + local_lock_irq(pipe_update_lock); if (min <= 0 || max <= 0) return; @@ -128,11 +131,11 @@ void intel_pipe_update_start(struct intel_crtc *crtc) break; } - local_irq_enable(); + local_unlock_irq(pipe_update_lock); timeout = schedule_timeout(timeout); - local_irq_disable(); + local_lock_irq(pipe_update_lock); } finish_wait(wq, &wait); @@ -202,7 +205,7 @@ void intel_pipe_update_end(struct intel_crtc *crtc, struct intel_flip_work *work crtc->base.state->event = NULL; } - local_irq_enable(); + local_unlock_irq(pipe_update_lock); if (crtc->debug.start_vbl_count && crtc->debug.start_vbl_count != end_vbl_count) { diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 192b2d3a79cb..d5372a207326 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index cdb8cb568c15..b6d7fd964cbc 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1845,6 +1845,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, struct radeon_device *rdev = dev->dev_private; /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_disable_rt(); /* Get optional system timestamp before query. */ if (stime) @@ -1937,6 +1938,7 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, *etime = ktime_get(); /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ + preempt_enable_rt(); /* Decode into vertical and horizontal scanout position. */ *vpos = position & 0x1fff; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 0276d2ef06ee..8868045eabde 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -761,6 +761,8 @@ static void vmbus_isr(void) void *page_addr; struct hv_message *msg; union hv_synic_event_flags *event; + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; bool handled = false; page_addr = hv_context.synic_event_page[cpu]; @@ -808,7 +810,7 @@ static void vmbus_isr(void) tasklet_schedule(hv_context.msg_dpc[cpu]); } - add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0); + add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0, ip); } diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c index 36f76e28a0bf..394f142f90c7 100644 --- a/drivers/ide/alim15x3.c +++ b/drivers/ide/alim15x3.c @@ -234,7 +234,7 @@ static int init_chipset_ali15x3(struct pci_dev *dev) isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - local_irq_save(flags); + local_irq_save_nort(flags); if (m5229_revision < 0xC2) { /* @@ -325,7 +325,7 @@ out: } pci_dev_put(north); pci_dev_put(isa_dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); return 0; } diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c index 0ceae5cbd89a..c212e85d7f3e 100644 --- a/drivers/ide/hpt366.c +++ b/drivers/ide/hpt366.c @@ -1236,7 +1236,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, dma_old = inb(base + 2); - local_irq_save(flags); + local_irq_save_nort(flags); dma_new = dma_old; pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); @@ -1247,7 +1247,7 @@ static int init_dma_hpt366(ide_hwif_t *hwif, if (dma_new != dma_old) outb(dma_new, base + 2); - local_irq_restore(flags); + local_irq_restore_nort(flags); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); diff --git a/drivers/ide/ide-io-std.c b/drivers/ide/ide-io-std.c index 19763977568c..4169433faab5 100644 --- a/drivers/ide/ide-io-std.c +++ b/drivers/ide/ide-io-std.c @@ -175,7 +175,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -186,7 +186,7 @@ void ide_input_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, insl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; @@ -219,7 +219,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, unsigned long uninitialized_var(flags); if ((io_32bit & 2) && !mmio) { - local_irq_save(flags); + local_irq_save_nort(flags); ata_vlb_sync(io_ports->nsect_addr); } @@ -230,7 +230,7 @@ void ide_output_data(ide_drive_t *drive, struct ide_cmd *cmd, void *buf, outsl(data_addr, buf, words); if ((io_32bit & 2) && !mmio) - local_irq_restore(flags); + local_irq_restore_nort(flags); if (((len + 1) & 3) < 2) return; diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 669ea1e45795..e12e43e62245 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long data) /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ - local_irq_disable(); + local_irq_disable_nort(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c index 376f2dc410c5..f014dd1b73dc 100644 --- a/drivers/ide/ide-iops.c +++ b/drivers/ide/ide-iops.c @@ -129,12 +129,12 @@ int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, if ((stat & ATA_BUSY) == 0) break; - local_irq_restore(flags); + local_irq_restore_nort(flags); *rstat = stat; return -EBUSY; } } - local_irq_restore(flags); + local_irq_restore_nort(flags); } /* * Allow status to settle, then read it again. diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 0b63facd1d87..4ceba37afc0c 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -196,10 +196,10 @@ static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) int bswap = 1; /* local CPU only; some systems need this */ - local_irq_save(flags); + local_irq_save_nort(flags); /* read 512 bytes of id info */ hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); - local_irq_restore(flags); + local_irq_restore_nort(flags); drive->dev_flags |= IDE_DFLAG_ID_READ; #ifdef DEBUG diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index a716693417a3..be0568c722d6 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c @@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, page_is_high = PageHighMem(page); if (page_is_high) - local_irq_save(flags); + local_irq_save_nort(flags); buf = kmap_atomic(page) + offset; @@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, kunmap_atomic(buf); if (page_is_high) - local_irq_restore(flags); + local_irq_restore_nort(flags); len -= nr_bytes; } @@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, } if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) - local_irq_disable(); + local_irq_disable_nort(); ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index fddff403d5d2..cca1bb4fbfe3 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -902,7 +902,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) ipoib_dbg_mcast(priv, "restarting multicast task\n"); - local_irq_save(flags); + local_irq_save_nort(flags); netif_addr_lock(dev); spin_lock(&priv->lock); @@ -984,7 +984,7 @@ void ipoib_mcast_restart_task(struct work_struct *work) spin_unlock(&priv->lock); netif_addr_unlock(dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); /* * make sure the in-flight joins have finished before we attempt diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 4a2a9e370be7..e970d9afd179 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c @@ -91,13 +91,13 @@ static int gameport_measure_speed(struct gameport *gameport) tx = ~0; for (i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); t1 = ktime_get_ns(); for (t = 0; t < 50; t++) gameport_read(gameport); t2 = ktime_get_ns(); t3 = ktime_get_ns(); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); t = (t2 - t1) - (t3 - t2); if (t < tx) @@ -124,12 +124,12 @@ static int old_gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); GET_TIME(t1); for (t = 0; t < 50; t++) gameport_read(gameport); GET_TIME(t2); GET_TIME(t3); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; } @@ -148,11 +148,11 @@ static int old_gameport_measure_speed(struct gameport *gameport) tx = 1 << 30; for(i = 0; i < 50; i++) { - local_irq_save(flags); + local_irq_save_nort(flags); t1 = rdtsc(); for (t = 0; t < 50; t++) gameport_read(gameport); t2 = rdtsc(); - local_irq_restore(flags); + local_irq_restore_nort(flags); udelay(i * 10); if (t2 - t1 < tx) tx = t2 - t1; } diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index c380b7e8f1c6..8604651dfb59 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_dev_data *dev_data, int ret; /* - * Must be called with IRQs disabled. Warn here to detect early - * when its not. + * Must be called with IRQs disabled on a non RT kernel. Warn here to + * detect early when its not. */ - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); /* lock domain */ spin_lock(&domain->lock); @@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu_dev_data *dev_data) struct protection_domain *domain; /* - * Must be called with IRQs disabled. Warn here to detect early - * when its not. + * Must be called with IRQs disabled on a non RT kernel. Warn here to + * detect early when its not. */ - WARN_ON(!irqs_disabled()); + WARN_ON_NONRT(!irqs_disabled()); if (WARN_ON(!dev_data->domain)) return; diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 002f8a421efa..980f41f1a194 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -479,7 +479,7 @@ struct deferred_flush_data { struct deferred_flush_table *tables; }; -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; @@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, struct intel_iommu *iommu; struct deferred_flush_entry *entry; struct deferred_flush_data *flush_data; - unsigned int cpuid; - cpuid = get_cpu(); - flush_data = per_cpu_ptr(&deferred_flush, cpuid); + flush_data = raw_cpu_ptr(&deferred_flush); /* Flush all CPUs' entries to avoid deferring too much. If * this becomes a bottleneck, can just flush us, and rely on @@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, } flush_data->size++; spin_unlock_irqrestore(&flush_data->lock, flags); - - put_cpu(); } static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index e23001bfcfee..359d5d169ec0 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/bitops.h> +#include <linux/cpu.h> static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, @@ -420,10 +421,8 @@ retry: /* Try replenishing IOVAs by flushing rcache. */ flushed_rcache = true; - preempt_disable(); for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); - preempt_enable(); goto retry; } @@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, bool can_insert = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_full(cpu_rcache->loaded)) { @@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, iova_magazine_push(cpu_rcache->loaded, iova_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); if (mag_to_free) { iova_magazine_free_pfns(mag_to_free, iovad); @@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, bool has_pfn = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_empty(cpu_rcache->loaded)) { @@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); return iova_pfn; } diff --git a/drivers/leds/trigger/Kconfig b/drivers/leds/trigger/Kconfig index 3f9ddb9fafa7..09da5b6b44a1 100644 --- a/drivers/leds/trigger/Kconfig +++ b/drivers/leds/trigger/Kconfig @@ -69,7 +69,7 @@ config LEDS_TRIGGER_BACKLIGHT config LEDS_TRIGGER_CPU bool "LED CPU Trigger" - depends on LEDS_TRIGGERS + depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE help This allows LEDs to be controlled by active CPUs. This shows the active CPUs across an array of LEDs so you can see which diff --git a/drivers/md/bcache/Kconfig b/drivers/md/bcache/Kconfig index 4d200883c505..98b64ed5cb81 100644 --- a/drivers/md/bcache/Kconfig +++ b/drivers/md/bcache/Kconfig @@ -1,6 +1,7 @@ config BCACHE tristate "Block device as cache" + depends on !PREEMPT_RT_FULL ---help--- Allows a block device to be used as cache for other devices; uses a btree for indexing and the layout is optimized for SSDs. diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index ba7c4c685db3..834ec328f217 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -842,7 +842,7 @@ static void dm_old_request_fn(struct request_queue *q) /* Establish tio->ti before queuing work (map_tio_request) */ tio->ti = ti; kthread_queue_work(&md->kworker, &tio->work); - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); } } diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 7aea0221530c..4d8cfce1de86 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -1934,8 +1934,9 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) struct raid5_percpu *percpu; unsigned long cpu; - cpu = get_cpu(); + cpu = get_cpu_light(); percpu = per_cpu_ptr(conf->percpu, cpu); + spin_lock(&percpu->lock); if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { ops_run_biofill(sh); overlap_clear++; @@ -1991,7 +1992,8 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) if (test_and_clear_bit(R5_Overlap, &dev->flags)) wake_up(&sh->raid_conf->wait_for_overlap); } - put_cpu(); + spin_unlock(&percpu->lock); + put_cpu_light(); } static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp, @@ -6407,6 +6409,7 @@ static int raid456_cpu_up_prepare(unsigned int cpu, struct hlist_node *node) __func__, cpu); return -ENOMEM; } + spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); return 0; } @@ -6417,7 +6420,6 @@ static int raid5_alloc_percpu(struct r5conf *conf) conf->percpu = alloc_percpu(struct raid5_percpu); if (!conf->percpu) return -ENOMEM; - err = cpuhp_state_add_instance(CPUHP_MD_RAID5_PREPARE, &conf->node); if (!err) { conf->scribble_disks = max(conf->raid_disks, diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 57ec49f0839e..0739604990b7 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -504,6 +504,7 @@ struct r5conf { int recovery_disabled; /* per cpu variables */ struct raid5_percpu { + spinlock_t lock; /* Protection for -RT */ struct page *spare_page; /* Used when checking P/Q in raid6 */ struct flex_array *scribble; /* space for constructing buffer * lists and performing address diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 64971baf11fa..215e91e36198 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -54,6 +54,7 @@ config AD525X_DPOT_SPI config ATMEL_TCLIB bool "Atmel AT32/AT91 Timer/Counter Library" depends on (AVR32 || ARCH_AT91) + default y if PREEMPT_RT_FULL help Select this if you want a library to allocate the Timer/Counter blocks found on many Atmel processors. This facilitates using @@ -69,8 +70,7 @@ config ATMEL_TCB_CLKSRC are combined to make a single 32-bit timer. When GENERIC_CLOCKEVENTS is defined, the third timer channel - may be used as a clock event device supporting oneshot mode - (delays of up to two seconds) based on the 32 KiHz clock. + may be used as a clock event device supporting oneshot mode. config ATMEL_TCB_CLKSRC_BLOCK int @@ -84,6 +84,15 @@ config ATMEL_TCB_CLKSRC_BLOCK TC can be used for other purposes, such as PWM generation and interval timing. +config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + bool "TC Block use 32 KiHz clock" + depends on ATMEL_TCB_CLKSRC + default y if !PREEMPT_RT_FULL + help + Select this to use 32 KiHz base clock rate as TC block clock + source for clock events. + + config DUMMY_IRQ tristate "Dummy IRQ handler" default n diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index df990bb8c873..1a162709a85e 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) struct sg_mapping_iter *sg_miter = &host->sg_miter; struct variant_data *variant = host->variant; void __iomem *base = host->base; - unsigned long flags; u32 status; status = readl(base + MMCISTATUS); dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); - local_irq_save(flags); - do { unsigned int remain, len; char *buffer; @@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id) sg_miter_stop(sg_miter); - local_irq_restore(flags); - /* * If we have less than the fifo 'half-full' threshold to transfer, * trigger a PIO interrupt as soon as any data is available. diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 9133e7926da5..63afb921ed40 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -842,9 +842,9 @@ static void poll_vortex(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } #endif @@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net_device *dev) * Block interrupts because vortex_interrupt does a bare spin_lock() */ unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); if (vp->full_bus_master_tx) boomerang_interrupt(dev->irq, dev); else vortex_interrupt(dev->irq, dev); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index da4c2d8a4173..1420dfb56bac 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(struct net_device *dev) struct rtl8139_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; - disable_irq(irq); + disable_irq_nosync(irq); rtl8139_interrupt(irq, dev); enable_irq(irq); } diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index bca6935a94db..d7a35ee34d03 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ezusb_priv *upriv, while (!ctx->done.done && msecs--) udelay(1000); } else { - wait_event_interruptible(ctx->done.wait, + swait_event_interruptible(ctx->done.wait, ctx->done.done); } break; diff --git a/drivers/pci/access.c b/drivers/pci/access.c index d11cdbb8fba3..223bbb9acb03 100644 --- a/drivers/pci/access.c +++ b/drivers/pci/access.c @@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev) WARN_ON(!dev->block_cfg_access); dev->block_cfg_access = 0; - wake_up_all(&pci_cfg_wait); + wake_up_all_locked(&pci_cfg_wait); raw_spin_unlock_irqrestore(&pci_lock, flags); } EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index bedce3453dd3..faf038978650 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -61,7 +61,7 @@ struct msm_pinctrl { struct notifier_block restart_nb; int irq; - spinlock_t lock; + raw_spinlock_t lock; DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); @@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, if (WARN_ON(i == g->nfuncs)) return -EINVAL; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~mask; val |= i << g->mux_bit; writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -323,14 +323,14 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_OUTPUT: /* set output value */ - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (arg) val |= BIT(g->out_bit); else val &= ~BIT(g->out_bit); writel(val, pctrl->regs + g->io_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); /* enable output */ arg = 1; @@ -351,12 +351,12 @@ static int msm_config_group_set(struct pinctrl_dev *pctldev, return -EINVAL; } - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~(mask << bit); val |= arg << bit; writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } return 0; @@ -384,13 +384,13 @@ static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->ctl_reg); val &= ~BIT(g->oe_bit); writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -404,7 +404,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (value) @@ -417,7 +417,7 @@ static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, in val |= BIT(g->oe_bit); writel(val, pctrl->regs + g->ctl_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) g = &pctrl->soc->groups[offset]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->io_reg); if (value) @@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int value) val &= ~BIT(g->out_bit); writel(val, pctrl->regs + g->io_reg); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } #ifdef CONFIG_DEBUG_FS @@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_cfg_reg); val &= ~BIT(g->intr_enable_bit); @@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) clear_bit(d->hwirq, pctrl->enabled_irqs); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void msm_gpio_irq_unmask(struct irq_data *d) @@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_cfg_reg); val |= BIT(g->intr_enable_bit); @@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) set_bit(d->hwirq, pctrl->enabled_irqs); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static void msm_gpio_irq_ack(struct irq_data *d) @@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_data *d) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = readl(pctrl->regs + g->intr_status_reg); if (g->intr_ack_high) @@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_data *d) if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); } static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) @@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) g = &pctrl->soc->groups[d->hwirq]; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); /* * For hw without possibility of detecting both edges @@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) irq_set_handler_locked(d, handle_level_irq); @@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) struct msm_pinctrl *pctrl = gpiochip_get_data(gc); unsigned long flags; - spin_lock_irqsave(&pctrl->lock, flags); + raw_spin_lock_irqsave(&pctrl->lock, flags); irq_set_irq_wake(pctrl->irq, on); - spin_unlock_irqrestore(&pctrl->lock, flags); + raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; } @@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_device *pdev, pctrl->soc = soc_data; pctrl->chip = msm_gpio_template; - spin_lock_init(&pctrl->lock); + raw_spin_lock_init(&pctrl->lock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); pctrl->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 9bd41a35a78a..8e2d436c2e3f 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -1455,11 +1455,11 @@ err2: static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) { struct fcoe_percpu_s *fps; - int rc; + int rc, cpu = get_cpu_light(); - fps = &get_cpu_var(fcoe_percpu); + fps = &per_cpu(fcoe_percpu, cpu); rc = fcoe_get_paged_crc_eof(skb, tlen, fps); - put_cpu_var(fcoe_percpu); + put_cpu_light(); return rc; } @@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(struct fc_lport *lport, return 0; } - stats = per_cpu_ptr(lport->stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu_light()); stats->InvalidCRCCount++; if (stats->InvalidCRCCount < 5) printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); - put_cpu(); + put_cpu_light(); return -EINVAL; } @@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_buff *skb) */ hp = (struct fcoe_hdr *) skb_network_header(skb); - stats = per_cpu_ptr(lport->stats, get_cpu()); + stats = per_cpu_ptr(lport->stats, get_cpu_light()); if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) { if (stats->ErrorFrames < 5) printk(KERN_WARNING "fcoe: FCoE version " @@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_buff *skb) goto drop; if (!fcoe_filter_frames(lport, fp)) { - put_cpu(); + put_cpu_light(); fc_exch_recv(lport, fp); return; } drop: stats->ErrorFrames++; - put_cpu(); + put_cpu_light(); kfree_skb(skb); } diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c index dcf36537a767..1a1f2e46452c 100644 --- a/drivers/scsi/fcoe/fcoe_ctlr.c +++ b/drivers/scsi/fcoe/fcoe_ctlr.c @@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) INIT_LIST_HEAD(&del_list); - stats = per_cpu_ptr(fip->lp->stats, get_cpu()); + stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; @@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip) sel_time = fcf->time; } } - put_cpu(); + put_cpu_light(); list_for_each_entry_safe(fcf, next, &del_list, list) { /* Removes fcf from current list */ diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index 16ca31ad5ec0..c3987347e762 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport, } memset(ep, 0, sizeof(*ep)); - cpu = get_cpu(); + cpu = get_cpu_light(); pool = per_cpu_ptr(mp->pool, cpu); spin_lock_bh(&pool->lock); - put_cpu(); + put_cpu_light(); /* peek cache of free slot */ if (pool->left != FC_XID_UNKNOWN) { diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c index 87f5e694dbed..23c0a50fb6aa 100644 --- a/drivers/scsi/libsas/sas_ata.c +++ b/drivers/scsi/libsas/sas_ata.c @@ -190,7 +190,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) /* TODO: audit callers to ensure they are ready for qc_issue to * unconditionally re-enable interrupts */ - local_irq_save(flags); + local_irq_save_nort(flags); spin_unlock(ap->lock); /* If the device fell off, no sense in issuing commands */ @@ -252,7 +252,7 @@ static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) out: spin_lock(ap->lock); - local_irq_restore(flags); + local_irq_restore_nort(flags); return ret; } diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index edc48f3b8230..ee5c6f9dfb6f 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -59,12 +59,12 @@ qla2x00_poll(struct rsp_que *rsp) { unsigned long flags; struct qla_hw_data *ha = rsp->hw; - local_irq_save(flags); + local_irq_save_nort(flags); if (IS_P3P_TYPE(ha)) qla82xx_poll(0, rsp); else ha->isp_ops->intr_handler(0, rsp); - local_irq_restore(flags); + local_irq_restore_nort(flags); } static inline uint8_t * diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index bddaabb288d4..8de0ec4222fe 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3129,7 +3129,11 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp) * kref_put(). */ kref_get(&qentry->irq_notify.kref); +#ifdef CONFIG_PREEMPT_RT_BASE + swork_queue(&qentry->irq_notify.swork); +#else schedule_work(&qentry->irq_notify.work); +#endif } /* diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 95f4c1bcdb4c..0be934799bff 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -29,6 +29,7 @@ #include <linux/pm.h> #include <linux/thermal.h> #include <linux/debugfs.h> +#include <linux/swork.h> #include <asm/cpu_device_id.h> #include <asm/mce.h> @@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_work_fn(struct work_struct *work) } } -static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +static void platform_thermal_notify_work(struct swork_event *event) { unsigned long flags; int cpu = smp_processor_id(); @@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) pkg_work_scheduled[phy_id]) { disable_pkg_thres_interrupt(); spin_unlock_irqrestore(&pkg_work_lock, flags); - return -EINVAL; + return; } pkg_work_scheduled[phy_id] = 1; spin_unlock_irqrestore(&pkg_work_lock, flags); @@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) schedule_delayed_work_on(cpu, &per_cpu(pkg_temp_thermal_threshold_work, cpu), msecs_to_jiffies(notify_delay_ms)); +} + +#ifdef CONFIG_PREEMPT_RT_FULL +static struct swork_event notify_work; + +static int thermal_notify_work_init(void) +{ + int err; + + err = swork_get(); + if (err) + return err; + + INIT_SWORK(¬ify_work, platform_thermal_notify_work); return 0; } +static void thermal_notify_work_cleanup(void) +{ + swork_put(); +} + +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +{ + swork_queue(¬ify_work); + return 0; +} + +#else /* !CONFIG_PREEMPT_RT_FULL */ + +static int thermal_notify_work_init(void) { return 0; } + +static void thermal_notify_work_cleanup(void) { } + +static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) +{ + platform_thermal_notify_work(NULL); + + return 0; +} +#endif /* CONFIG_PREEMPT_RT_FULL */ + static int find_siblings_cpu(int cpu) { int i; @@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(void) if (!x86_match_cpu(pkg_temp_thermal_ids)) return -ENODEV; + if (!thermal_notify_work_init()) + return -ENODEV; + spin_lock_init(&pkg_work_lock); platform_thermal_package_notify = pkg_temp_thermal_platform_thermal_notify; @@ -609,7 +652,7 @@ err_ret: kfree(pkg_work_scheduled); platform_thermal_package_notify = NULL; platform_thermal_package_rate_control = NULL; - + thermal_notify_work_cleanup(); return -ENODEV; } @@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit(void) mutex_unlock(&phy_dev_list_mutex); platform_thermal_package_notify = NULL; platform_thermal_package_rate_control = NULL; + thermal_notify_work_cleanup(); for_each_online_cpu(i) cancel_delayed_work_sync( &per_cpu(pkg_temp_thermal_threshold_work, i)); diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c index e8819aa20415..dd7f9bf45d6c 100644 --- a/drivers/tty/serial/8250/8250_core.c +++ b/drivers/tty/serial/8250/8250_core.c @@ -58,7 +58,16 @@ static struct uart_driver serial8250_reg; static unsigned int skip_txen_test; /* force skip of txen test at init time */ -#define PASS_LIMIT 512 +/* + * On -rt we can have a more delays, and legitimately + * so - so don't drop work spuriously and spam the + * syslog: + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define PASS_LIMIT 1000000 +#else +# define PASS_LIMIT 512 +#endif #include <asm/serial.h> /* diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 1ef31e3ee4a1..ff9f4e50563c 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -35,6 +35,7 @@ #include <linux/nmi.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/kdb.h> #include <linux/uaccess.h> #include <linux/pm_runtime.h> #include <linux/timer.h> @@ -3140,9 +3141,9 @@ void serial8250_console_write(struct uart_8250_port *up, const char *s, serial8250_rpm_get(up); - if (port->sysrq) + if (port->sysrq || oops_in_progress) locked = 0; - else if (oops_in_progress) + else if (in_kdb_printk()) locked = spin_trylock_irqsave(&port->lock, flags); else spin_lock_irqsave(&port->lock, flags); diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c index e2c33b9528d8..53af53c43e8c 100644 --- a/drivers/tty/serial/amba-pl011.c +++ b/drivers/tty/serial/amba-pl011.c @@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) clk_enable(uap->clk); - local_irq_save(flags); + /* + * local_irq_save(flags); + * + * This local_irq_save() is nonsense. If we come in via sysrq + * handling then interrupts are already disabled. Aside of + * that the port.sysrq check is racy on SMP regardless. + */ if (uap->port.sysrq) locked = 0; else if (oops_in_progress) - locked = spin_trylock(&uap->port.lock); + locked = spin_trylock_irqsave(&uap->port.lock, flags); else - spin_lock(&uap->port.lock); + spin_lock_irqsave(&uap->port.lock, flags); /* * First save the CR then disable the interrupts @@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co, const char *s, unsigned int count) pl011_write(old_cr, uap, REG_CR); if (locked) - spin_unlock(&uap->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&uap->port.lock, flags); clk_disable(uap->clk); } diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c index 44e5b5bf713b..400140d1dfff 100644 --- a/drivers/tty/serial/omap-serial.c +++ b/drivers/tty/serial/omap-serial.c @@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_get_sync(up->dev); - local_irq_save(flags); - if (up->port.sysrq) - locked = 0; - else if (oops_in_progress) - locked = spin_trylock(&up->port.lock); + if (up->port.sysrq || oops_in_progress) + locked = spin_trylock_irqsave(&up->port.lock, flags); else - spin_lock(&up->port.lock); + spin_lock_irqsave(&up->port.lock, flags); /* * First save the IER then disable the interrupts @@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console *co, const char *s, pm_runtime_mark_last_busy(up->dev); pm_runtime_put_autosuspend(up->dev); if (locked) - spin_unlock(&up->port.lock); - local_irq_restore(flags); + spin_unlock_irqrestore(&up->port.lock, flags); } static int __init diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index 882fc4e08284..14ab6046e440 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1764,9 +1764,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb) * and no one may trigger the above deadlock situation when * running complete() in tasklet. */ - local_irq_save(flags); + local_irq_save_nort(flags); urb->complete(urb); - local_irq_restore(flags); + local_irq_restore_nort(flags); usb_anchor_resume_wakeups(anchor); atomic_dec(&urb->use_count); diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index f9c99803a43d..e7adb023b921 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data *ffs) pr_info("%s(): freeing\n", __func__); ffs_data_clear(ffs); BUG_ON(waitqueue_active(&ffs->ev.waitq) || - waitqueue_active(&ffs->ep0req_completion.wait)); + swait_active(&ffs->ep0req_completion.wait)); kfree(ffs->dev_name); kfree(ffs); } diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index f69dbd4bcd18..3b7638322f9f 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -347,7 +347,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) spin_unlock_irq (&epdata->dev->lock); if (likely (value == 0)) { - value = wait_event_interruptible (done.wait, done.done); + value = swait_event_interruptible (done.wait, done.done); if (value != 0) { spin_lock_irq (&epdata->dev->lock); if (likely (epdata->ep != NULL)) { @@ -356,7 +356,7 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) usb_ep_dequeue (epdata->ep, epdata->req); spin_unlock_irq (&epdata->dev->lock); - wait_event (done.wait, done.done); + swait_event (done.wait, done.done); if (epdata->status == -ECONNRESET) epdata->status = -EINTR; } else { @@ -40,6 +40,7 @@ #include <linux/ramfs.h> #include <linux/percpu-refcount.h> #include <linux/mount.h> +#include <linux/swork.h> #include <asm/kmap_types.h> #include <asm/uaccess.h> @@ -115,7 +116,7 @@ struct kioctx { struct page **ring_pages; long nr_pages; - struct work_struct free_work; + struct swork_event free_work; /* * signals when all in-flight requests are done @@ -258,6 +259,7 @@ static int __init aio_setup(void) .mount = aio_mount, .kill_sb = kill_anon_super, }; + BUG_ON(swork_get()); aio_mnt = kern_mount(&aio_fs); if (IS_ERR(aio_mnt)) panic("Failed to create aio fs mount."); @@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb *kiocb) return cancel(&kiocb->common); } -static void free_ioctx(struct work_struct *work) +static void free_ioctx(struct swork_event *sev) { - struct kioctx *ctx = container_of(work, struct kioctx, free_work); + struct kioctx *ctx = container_of(sev, struct kioctx, free_work); pr_debug("freeing %p\n", ctx); @@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percpu_ref *ref) if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) complete(&ctx->rq_wait->comp); - INIT_WORK(&ctx->free_work, free_ioctx); - schedule_work(&ctx->free_work); + INIT_SWORK(&ctx->free_work, free_ioctx); + swork_queue(&ctx->free_work); } /* @@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percpu_ref *ref) * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - * now it's safe to cancel any that need to be. */ -static void free_ioctx_users(struct percpu_ref *ref) +static void free_ioctx_users_work(struct swork_event *sev) { - struct kioctx *ctx = container_of(ref, struct kioctx, users); + struct kioctx *ctx = container_of(sev, struct kioctx, free_work); struct aio_kiocb *req; spin_lock_irq(&ctx->ctx_lock); @@ -632,6 +634,14 @@ static void free_ioctx_users(struct percpu_ref *ref) percpu_ref_put(&ctx->reqs); } +static void free_ioctx_users(struct percpu_ref *ref) +{ + struct kioctx *ctx = container_of(ref, struct kioctx, users); + + INIT_SWORK(&ctx->free_work, free_ioctx_users_work); + swork_queue(&ctx->free_work); +} + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) { unsigned i, new_nr; diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index a1fba4285277..3796769b4cd1 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h @@ -31,6 +31,7 @@ #include <linux/sched.h> #include <linux/mount.h> #include <linux/namei.h> +#include <linux/delay.h> #include <asm/current.h> #include <linux/uaccess.h> diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index d8e6d421c27f..2e689ab1306b 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -148,7 +148,7 @@ again: parent = p->d_parent; if (!spin_trylock(&parent->d_lock)) { spin_unlock(&p->d_lock); - cpu_relax(); + cpu_chill(); goto relock; } spin_unlock(&p->d_lock); diff --git a/fs/buffer.c b/fs/buffer.c index 5d8f496d624e..48074bd91ea3 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -301,8 +301,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) * decide that the page is now completely done. */ first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -315,8 +314,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors and they are all @@ -328,9 +326,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate) return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /* @@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_write(bh); unlock_buffer(bh); @@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); end_page_writeback(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } EXPORT_SYMBOL(end_buffer_async_write); @@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gfp_t gfp_flags) struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); if (ret) { INIT_LIST_HEAD(&ret->b_assoc_buffers); + buffer_head_init_locks(ret); preempt_disable(); __this_cpu_inc(bh_accounting.nr); recalc_bh_state(); diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index a27fc8791551..791aecb7c1ac 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c @@ -80,7 +80,7 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name, struct inode *inode; struct super_block *sb = parent->d_sb; struct cifs_sb_info *cifs_sb = CIFS_SB(sb); - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); cifs_dbg(FYI, "%s: for %s\n", __func__, name->name); diff --git a/fs/dcache.c b/fs/dcache.c index 67957f5b325c..37948da28742 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -19,6 +19,7 @@ #include <linux/mm.h> #include <linux/fs.h> #include <linux/fsnotify.h> +#include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/hash.h> @@ -777,6 +778,8 @@ static inline bool fast_dput(struct dentry *dentry) */ void dput(struct dentry *dentry) { + struct dentry *parent; + if (unlikely(!dentry)) return; @@ -815,9 +818,18 @@ repeat: return; kill_it: - dentry = dentry_kill(dentry); - if (dentry) { - cond_resched(); + parent = dentry_kill(dentry); + if (parent) { + int r; + + if (parent == dentry) { + /* the task with the highest priority won't schedule */ + r = cond_resched(); + if (!r) + cpu_chill(); + } else { + dentry = parent; + } goto repeat; } } @@ -2352,7 +2364,7 @@ again: if (dentry->d_lockref.count == 1) { if (!spin_trylock(&inode->i_lock)) { spin_unlock(&dentry->d_lock); - cpu_relax(); + cpu_chill(); goto again; } dentry->d_flags &= ~DCACHE_CANT_MOUNT; @@ -2412,21 +2424,24 @@ static inline void end_dir_add(struct inode *dir, unsigned n) static void d_wait_lookup(struct dentry *dentry) { - if (d_in_lookup(dentry)) { - DECLARE_WAITQUEUE(wait, current); - add_wait_queue(dentry->d_wait, &wait); - do { - set_current_state(TASK_UNINTERRUPTIBLE); - spin_unlock(&dentry->d_lock); - schedule(); - spin_lock(&dentry->d_lock); - } while (d_in_lookup(dentry)); - } + struct swait_queue __wait; + + if (!d_in_lookup(dentry)) + return; + + INIT_LIST_HEAD(&__wait.task_list); + do { + prepare_to_swait(dentry->d_wait, &__wait, TASK_UNINTERRUPTIBLE); + spin_unlock(&dentry->d_lock); + schedule(); + spin_lock(&dentry->d_lock); + } while (d_in_lookup(dentry)); + finish_swait(dentry->d_wait, &__wait); } struct dentry *d_alloc_parallel(struct dentry *parent, const struct qstr *name, - wait_queue_head_t *wq) + struct swait_queue_head *wq) { unsigned int hash = name->hash; struct hlist_bl_head *b = in_lookup_hash(parent, hash); @@ -2535,7 +2550,7 @@ void __d_lookup_done(struct dentry *dentry) hlist_bl_lock(b); dentry->d_flags &= ~DCACHE_PAR_LOOKUP; __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); - wake_up_all(dentry->d_wait); + swake_up_all(dentry->d_wait); dentry->d_wait = NULL; hlist_bl_unlock(b); INIT_HLIST_NODE(&dentry->d_u.d_alias); @@ -3632,6 +3647,11 @@ EXPORT_SYMBOL(d_genocide); void __init vfs_caches_init_early(void) { + int i; + + for (i = 0; i < ARRAY_SIZE(in_lookup_hashtable); i++) + INIT_HLIST_BL_HEAD(&in_lookup_hashtable[i]); + dcache_init_early(); inode_init_early(); } diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 3cbc30413add..41a94f552aab 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -510,12 +510,12 @@ static int ep_poll_wakeup_proc(void *priv, void *cookie, int call_nests) */ static void ep_poll_safewake(wait_queue_head_t *wq) { - int this_cpu = get_cpu(); + int this_cpu = get_cpu_light(); ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - put_cpu(); + put_cpu_light(); } static void ep_remove_wait_queue(struct eppoll_entry *pwq) diff --git a/fs/exec.c b/fs/exec.c index b8c43be24751..71f4c6ec2bb8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -1038,12 +1038,14 @@ static int exec_mmap(struct mm_struct *mm) } } task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; tsk->mm = mm; tsk->active_mm = mm; activate_mm(active_mm, mm); tsk->mm->vmacache_seqnum = 0; vmacache_flush(tsk); + preempt_enable_rt(); task_unlock(tsk); if (old_mm) { up_read(&old_mm->mmap_sem); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 642c57b8de7b..8494b9308333 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct file *file, struct inode *dir = d_inode(parent); struct fuse_conn *fc; struct inode *inode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); if (!o->nodeid) { /* diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index 684996c8a3a4..6e18a06aaabe 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -116,6 +116,8 @@ void __jbd2_log_wait_for_space(journal_t *journal) nblocks = jbd2_space_needed(journal); while (jbd2_log_space_left(journal) < nblocks) { write_unlock(&journal->j_state_lock); + if (current->plug) + io_schedule(); mutex_lock(&journal->j_checkpoint_mutex); /* diff --git a/fs/locks.c b/fs/locks.c index 22c5b4aa4961..269c6a44449a 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request) return -ENOMEM; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); if (request->fl_flags & FL_ACCESS) goto find_conflict; @@ -976,7 +976,7 @@ find_conflict: out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); if (new_fl) locks_free_lock(new_fl); locks_dispose_list(&dispose); @@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, new_fl2 = locks_alloc_lock(); } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); /* * New lock request. Walk all POSIX locks and look for conflicts. If @@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request, } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); /* * Free any unused locks. */ @@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); @@ -1512,13 +1512,13 @@ restart: locks_insert_block(fl, new_fl); trace_break_lease_block(inode, new_fl); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); error = wait_event_interruptible_timeout(new_fl->fl_wait, !new_fl->fl_next, break_time); - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); trace_break_lease_unblock(inode, new_fl); locks_delete_block(new_fl); @@ -1535,7 +1535,7 @@ restart: } out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); locks_free_lock(new_fl); return error; @@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp) ctx = smp_load_acquire(&inode->i_flctx); if (ctx && !list_empty_careful(&ctx->flc_lease)) { - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { @@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp) break; } spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } @@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr return -EINVAL; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); time_out_leases(inode, &dispose); error = check_conflicting_open(dentry, arg, lease->fl_flags); @@ -1765,7 +1765,7 @@ out_setup: lease->fl_lmops->lm_setup(lease, priv); out: spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); if (is_deleg) inode_unlock(inode); @@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct file *filp, void *owner) return error; } - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry(fl, &ctx->flc_lease, fl_list) { if (fl->fl_file == filp && @@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct file *filp, void *owner) if (victim) error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); return error; } @@ -2532,13 +2532,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx) if (list_empty(&ctx->flc_lease)) return; - percpu_down_read_preempt_disable(&file_rwsem); + percpu_down_read(&file_rwsem); spin_lock(&ctx->flc_lock); list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) if (filp == fl->fl_file) lease_modify(fl, F_UNLCK, &dispose); spin_unlock(&ctx->flc_lock); - percpu_up_read_preempt_enable(&file_rwsem); + percpu_up_read(&file_rwsem); locks_dispose_list(&dispose); } diff --git a/fs/namei.c b/fs/namei.c index 66209f720146..cc5320e29fc1 100644 --- a/fs/namei.c +++ b/fs/namei.c @@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const struct qstr *name, { struct dentry *dentry = ERR_PTR(-ENOENT), *old; struct inode *inode = dir->d_inode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); inode_lock_shared(inode); /* Don't go there if it's already dead */ @@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata *nd, struct path *path, struct dentry *dentry; int error, create_error = 0; umode_t mode = op->mode; - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); if (unlikely(IS_DEADDIR(dir_inode))) return -ENOENT; diff --git a/fs/namespace.c b/fs/namespace.c index d7360f9897b4..da188c6966a3 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -14,6 +14,7 @@ #include <linux/mnt_namespace.h> #include <linux/user_namespace.h> #include <linux/namei.h> +#include <linux/delay.h> #include <linux/security.h> #include <linux/idr.h> #include <linux/init.h> /* init_rootfs */ @@ -357,8 +358,11 @@ int __mnt_want_write(struct vfsmount *m) * incremented count after it has set MNT_WRITE_HOLD. */ smp_mb(); - while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) - cpu_relax(); + while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { + preempt_enable(); + cpu_chill(); + preempt_disable(); + } /* * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will * be set to match its requirements. So we must not load that until diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index dff600ae0d74..d726d2e09353 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -150,11 +150,11 @@ again: sp = state->owner; /* Block nfs4_proc_unlck */ mutex_lock(&sp->so_delegreturn_mutex); - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + seq = read_seqbegin(&sp->so_reclaim_seqlock); err = nfs4_open_delegation_recall(ctx, state, stateid, type); if (!err) err = nfs_delegation_claim_locks(ctx, state, stateid); - if (!err && read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (!err && read_seqretry(&sp->so_reclaim_seqlock, seq)) err = -EAGAIN; mutex_unlock(&sp->so_delegreturn_mutex); put_nfs_open_context(ctx); diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index d04ec3814779..ba90d41d3c34 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -485,7 +485,7 @@ static void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) { struct qstr filename = QSTR_INIT(entry->name, entry->len); - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); struct dentry *dentry; struct dentry *alias; struct inode *dir = d_inode(parent); @@ -1491,7 +1491,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, struct file *file, unsigned open_flags, umode_t mode, int *opened) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); struct nfs_open_context *ctx; struct dentry *res; struct iattr attr = { .ia_valid = ATTR_OPEN }; @@ -1806,7 +1806,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) trace_nfs_rmdir_enter(dir, dentry); if (d_really_is_positive(dentry)) { +#ifdef CONFIG_PREEMPT_RT_BASE + down(&NFS_I(d_inode(dentry))->rmdir_sem); +#else down_write(&NFS_I(d_inode(dentry))->rmdir_sem); +#endif error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); /* Ensure the VFS deletes this inode */ switch (error) { @@ -1816,7 +1820,11 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) case -ENOENT: nfs_dentry_handle_enoent(dentry); } +#ifdef CONFIG_PREEMPT_RT_BASE + up(&NFS_I(d_inode(dentry))->rmdir_sem); +#else up_write(&NFS_I(d_inode(dentry))->rmdir_sem); +#endif } else error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name); trace_nfs_rmdir_exit(dir, dentry, error); diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 76ae25661d3f..89159d298278 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1957,7 +1957,11 @@ static void init_once(void *foo) nfsi->nrequests = 0; nfsi->commit_info.ncommit = 0; atomic_set(&nfsi->commit_info.rpcs_out, 0); +#ifdef CONFIG_PREEMPT_RT_BASE + sema_init(&nfsi->rmdir_sem, 1); +#else init_rwsem(&nfsi->rmdir_sem); +#endif nfs4_init_once(nfsi); } diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 1452177c822d..f43b01d54c59 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h @@ -111,7 +111,7 @@ struct nfs4_state_owner { unsigned long so_flags; struct list_head so_states; struct nfs_seqid_counter so_seqid; - seqcount_t so_reclaim_seqcount; + seqlock_t so_reclaim_seqlock; struct mutex so_delegreturn_mutex; }; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index a53b8e0c896a..da0a483c5442 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2695,7 +2695,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, unsigned int seq; int ret; - seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); + seq = raw_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); ret = _nfs4_proc_open(opendata); if (ret != 0) @@ -2733,7 +2733,7 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (d_inode(dentry) == state->inode) { nfs_inode_attach_open_context(ctx); - if (read_seqcount_retry(&sp->so_reclaim_seqcount, seq)) + if (read_seqretry(&sp->so_reclaim_seqlock, seq)) nfs4_schedule_stateid_recovery(server, state); } out: diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 92671914067f..44038480c88c 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -488,7 +488,7 @@ nfs4_alloc_state_owner(struct nfs_server *server, nfs4_init_seqid_counter(&sp->so_seqid); atomic_set(&sp->so_count, 1); INIT_LIST_HEAD(&sp->so_lru); - seqcount_init(&sp->so_reclaim_seqcount); + seqlock_init(&sp->so_reclaim_seqlock); mutex_init(&sp->so_delegreturn_mutex); return sp; } @@ -1498,8 +1498,12 @@ static int nfs4_reclaim_open_state(struct nfs4_state_owner *sp, const struct nfs * recovering after a network partition or a reboot from a * server that doesn't support a grace period. */ +#ifdef CONFIG_PREEMPT_RT_FULL + write_seqlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_begin(&sp->so_reclaim_seqlock.seqcount); +#endif spin_lock(&sp->so_lock); - raw_write_seqcount_begin(&sp->so_reclaim_seqcount); restart: list_for_each_entry(state, &sp->so_states, open_states) { if (!test_and_clear_bit(ops->state_flag_bit, &state->flags)) @@ -1568,14 +1572,20 @@ restart: spin_lock(&sp->so_lock); goto restart; } - raw_write_seqcount_end(&sp->so_reclaim_seqcount); spin_unlock(&sp->so_lock); +#ifdef CONFIG_PREEMPT_RT_FULL + write_sequnlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); +#endif return 0; out_err: nfs4_put_open_state(state); - spin_lock(&sp->so_lock); - raw_write_seqcount_end(&sp->so_reclaim_seqcount); - spin_unlock(&sp->so_lock); +#ifdef CONFIG_PREEMPT_RT_FULL + write_sequnlock(&sp->so_reclaim_seqlock); +#else + write_seqcount_end(&sp->so_reclaim_seqlock.seqcount); +#endif return status; } diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index 191aa577dd1f..58990c8f52e0 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -12,7 +12,7 @@ #include <linux/sunrpc/clnt.h> #include <linux/nfs_fs.h> #include <linux/sched.h> -#include <linux/wait.h> +#include <linux/swait.h> #include <linux/namei.h> #include <linux/fsnotify.h> @@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct rpc_task *task, void *calldata) rpc_restart_call_prepare(task); } +#ifdef CONFIG_PREEMPT_RT_BASE +static void nfs_down_anon(struct semaphore *sema) +{ + down(sema); +} + +static void nfs_up_anon(struct semaphore *sema) +{ + up(sema); +} + +#else +static void nfs_down_anon(struct rw_semaphore *rwsem) +{ + down_read_non_owner(rwsem); +} + +static void nfs_up_anon(struct rw_semaphore *rwsem) +{ + up_read_non_owner(rwsem); +} +#endif + /** * nfs_async_unlink_release - Release the sillydelete data. * @task: rpc_task of the sillydelete @@ -64,7 +87,7 @@ static void nfs_async_unlink_release(void *calldata) struct dentry *dentry = data->dentry; struct super_block *sb = dentry->d_sb; - up_read_non_owner(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); + nfs_up_anon(&NFS_I(d_inode(dentry->d_parent))->rmdir_sem); d_lookup_done(dentry); nfs_free_unlinkdata(data); dput(dentry); @@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) struct inode *dir = d_inode(dentry->d_parent); struct dentry *alias; - down_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_down_anon(&NFS_I(dir)->rmdir_sem); alias = d_alloc_parallel(dentry->d_parent, &data->args.name, &data->wq); if (IS_ERR(alias)) { - up_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_up_anon(&NFS_I(dir)->rmdir_sem); return 0; } if (!d_in_lookup(alias)) { @@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry *dentry, struct nfs_unlinkdata *data) ret = 0; spin_unlock(&alias->d_lock); dput(alias); - up_read_non_owner(&NFS_I(dir)->rmdir_sem); + nfs_up_anon(&NFS_I(dir)->rmdir_sem); /* * If we'd displaced old cached devname, free it. At that * point dentry is definitely not a root, so we won't need @@ -182,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry, const struct qstr *name) goto out_free_name; } data->res.dir_attr = &data->dir_attr; - init_waitqueue_head(&data->wq); + init_swait_queue_head(&data->wq); status = -EBUSY; spin_lock(&dentry->d_lock); diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c index fe251f187ff8..e89da4fb14c2 100644 --- a/fs/ntfs/aops.c +++ b/fs/ntfs/aops.c @@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) ofs = 0; if (file_ofs < init_size) ofs = init_size - file_ofs; - local_irq_save(flags); + local_irq_save_nort(flags); kaddr = kmap_atomic(page); memset(kaddr + bh_offset(bh) + ofs, 0, bh->b_size - ofs); flush_dcache_page(page); kunmap_atomic(kaddr); - local_irq_restore(flags); + local_irq_restore_nort(flags); } } else { clear_buffer_uptodate(bh); @@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) "0x%llx.", (unsigned long long)bh->b_blocknr); } first = page_buffers(page); - local_irq_save(flags); - bit_spin_lock(BH_Uptodate_Lock, &first->b_state); + flags = bh_uptodate_lock_irqsave(first); clear_buffer_async_read(bh); unlock_buffer(bh); tmp = bh; @@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) } tmp = tmp->b_this_page; } while (tmp != bh); - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); + bh_uptodate_unlock_irqrestore(first, flags); /* * If none of the buffers had errors then we can set the page uptodate, * but we first have to perform the post read mst fixups, if the @@ -145,13 +143,13 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) recs = PAGE_SIZE / rec_size; /* Should have been verified before we got here... */ BUG_ON(!recs); - local_irq_save(flags); + local_irq_save_nort(flags); kaddr = kmap_atomic(page); for (i = 0; i < recs; i++) post_read_mst_fixup((NTFS_RECORD*)(kaddr + i * rec_size), rec_size); kunmap_atomic(kaddr); - local_irq_restore(flags); + local_irq_restore_nort(flags); flush_dcache_page(page); if (likely(page_uptodate && !PageError(page))) SetPageUptodate(page); @@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate) unlock_page(page); return; still_busy: - bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); - local_irq_restore(flags); - return; + bh_uptodate_unlock_irqrestore(first, flags); } /** diff --git a/fs/proc/base.c b/fs/proc/base.c index e67fec3c9856..0edc16f95596 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file, struct dir_context *ctx, child = d_hash_and_lookup(dir, &qname); if (!child) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) goto end_instantiate; diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index d4e37acd4821..000cea46434a 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c @@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct file *file, child = d_lookup(dir, &qname); if (!child) { - DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); + DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(wq); child = d_alloc_parallel(dir, &qname, &wq); if (IS_ERR(child)) return false; diff --git a/fs/timerfd.c b/fs/timerfd.c index ab8dd1538381..5580853f57dd 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c @@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, int flags, break; } spin_unlock_irq(&ctx->wqh.lock); - cpu_relax(); + if (isalarm(ctx)) + hrtimer_wait_for_timer(&ctx->t.alarm.timer); + else + hrtimer_wait_for_timer(&ctx->t.tmr); } /* diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h index e861a24f06f2..b5c97d3059c7 100644 --- a/include/acpi/platform/aclinux.h +++ b/include/acpi/platform/aclinux.h @@ -133,6 +133,7 @@ #define acpi_cache_t struct kmem_cache #define acpi_spinlock spinlock_t * +#define acpi_raw_spinlock raw_spinlock_t * #define acpi_cpu_flags unsigned long /* Use native linux version of acpi_os_allocate_zeroed */ @@ -151,6 +152,20 @@ #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock +#define acpi_os_create_raw_lock(__handle) \ +({ \ + raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ + \ + if (lock) { \ + *(__handle) = lock; \ + raw_spin_lock_init(*(__handle)); \ + } \ + lock ? AE_OK : AE_NO_MEMORY; \ + }) + +#define acpi_os_delete_raw_lock(__handle) kfree(__handle) + + /* * OSL interfaces used by debugger/disassembler */ diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 6f96247226a4..fa53a21263c2 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -215,6 +215,20 @@ void __warn(const char *file, int line, void *caller, unsigned taint, # define WARN_ON_SMP(x) ({0;}) #endif +#ifdef CONFIG_PREEMPT_RT_BASE +# define BUG_ON_RT(c) BUG_ON(c) +# define BUG_ON_NONRT(c) do { } while (0) +# define WARN_ON_RT(condition) WARN_ON(condition) +# define WARN_ON_NONRT(condition) do { } while (0) +# define WARN_ON_ONCE_NONRT(condition) do { } while (0) +#else +# define BUG_ON_RT(c) do { } while (0) +# define BUG_ON_NONRT(c) BUG_ON(c) +# define WARN_ON_RT(condition) do { } while (0) +# define WARN_ON_NONRT(condition) WARN_ON(condition) +# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) +#endif + #endif /* __ASSEMBLY__ */ #endif diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 535ab2e13d2e..cfc246899473 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) return unique_tag & BLK_MQ_UNIQUE_TAG_MASK; } - +void __blk_mq_complete_request_remote_work(struct work_struct *work); int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index f6a816129856..ec7a4676f8a8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -89,6 +89,7 @@ struct request { struct list_head queuelist; union { struct call_single_data csd; + struct work_struct work; u64 fifo_time; }; @@ -467,7 +468,7 @@ struct request_queue { struct throtl_data *td; #endif struct rcu_head rcu_head; - wait_queue_head_t mq_freeze_wq; + struct swait_queue_head mq_freeze_wq; struct percpu_ref q_usage_counter; struct list_head all_q_node; diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 8fdcb783197d..d07dbeec7bc1 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h @@ -3,6 +3,39 @@ #include <linux/preempt.h> +#ifdef CONFIG_PREEMPT_RT_FULL + +extern void __local_bh_disable(void); +extern void _local_bh_enable(void); +extern void __local_bh_enable(void); + +static inline void local_bh_disable(void) +{ + __local_bh_disable(); +} + +static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) +{ + __local_bh_disable(); +} + +static inline void local_bh_enable(void) +{ + __local_bh_enable(); +} + +static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) +{ + __local_bh_enable(); +} + +static inline void local_bh_enable_ip(unsigned long ip) +{ + __local_bh_enable(); +} + +#else + #ifdef CONFIG_TRACE_IRQFLAGS extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); #else @@ -30,5 +63,6 @@ static inline void local_bh_enable(void) { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); } +#endif #endif /* _LINUX_BH_H */ diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index ebbacd14d450..be5e87f6360a 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -75,8 +75,50 @@ struct buffer_head { struct address_space *b_assoc_map; /* mapping this buffer is associated with */ atomic_t b_count; /* users using this buffer_head */ +#ifdef CONFIG_PREEMPT_RT_BASE + spinlock_t b_uptodate_lock; +#if IS_ENABLED(CONFIG_JBD2) + spinlock_t b_state_lock; + spinlock_t b_journal_head_lock; +#endif +#endif }; +static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) +{ + unsigned long flags; + +#ifndef CONFIG_PREEMPT_RT_BASE + local_irq_save(flags); + bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); +#else + spin_lock_irqsave(&bh->b_uptodate_lock, flags); +#endif + return flags; +} + +static inline void +bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) +{ +#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); + local_irq_restore(flags); +#else + spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); +#endif +} + +static inline void buffer_head_init_locks(struct buffer_head *bh) +{ +#ifdef CONFIG_PREEMPT_RT_BASE + spin_lock_init(&bh->b_uptodate_lock); +#if IS_ENABLED(CONFIG_JBD2) + spin_lock_init(&bh->b_state_lock); + spin_lock_init(&bh->b_journal_head_lock); +#endif +#endif +} + /* * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() * and buffer_foo() functions. diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 6fb1c34cf805..ccd2a5addb56 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -16,6 +16,7 @@ #include <linux/percpu-refcount.h> #include <linux/percpu-rwsem.h> #include <linux/workqueue.h> +#include <linux/swork.h> #ifdef CONFIG_CGROUPS @@ -138,6 +139,7 @@ struct cgroup_subsys_state { /* percpu_ref killing and RCU release */ struct rcu_head rcu_head; struct work_struct destroy_work; + struct swork_event destroy_swork; }; /* diff --git a/include/linux/completion.h b/include/linux/completion.h index 5d5aaae3af43..3bca1590e29f 100644 --- a/include/linux/completion.h +++ b/include/linux/completion.h @@ -7,8 +7,7 @@ * Atomic wait-for-completion handler data structures. * See kernel/sched/completion.c for details. */ - -#include <linux/wait.h> +#include <linux/swait.h> /* * struct completion - structure used to maintain state for a "completion" @@ -24,11 +23,11 @@ */ struct completion { unsigned int done; - wait_queue_head_t wait; + struct swait_queue_head wait; }; #define COMPLETION_INITIALIZER(work) \ - { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } #define COMPLETION_INITIALIZER_ONSTACK(work) \ ({ init_completion(&work); work; }) @@ -73,7 +72,7 @@ struct completion { static inline void init_completion(struct completion *x) { x->done = 0; - init_waitqueue_head(&x->wait); + init_swait_queue_head(&x->wait); } /** diff --git a/include/linux/cpu.h b/include/linux/cpu.h index e571128ad99a..5e52d28c20c1 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -182,6 +182,8 @@ extern void get_online_cpus(void); extern void put_online_cpus(void); extern void cpu_hotplug_disable(void); extern void cpu_hotplug_enable(void); +extern void pin_current_cpu(void); +extern void unpin_current_cpu(void); #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) @@ -199,6 +201,8 @@ static inline void cpu_hotplug_done(void) {} #define put_online_cpus() do { } while (0) #define cpu_hotplug_disable() do { } while (0) #define cpu_hotplug_enable() do { } while (0) +static inline void pin_current_cpu(void) { } +static inline void unpin_current_cpu(void) { } #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) /* These aren't inline functions due to a GCC bug. */ diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ff295e166b2c..d532c60f3fb5 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h @@ -11,6 +11,7 @@ #include <linux/rcupdate.h> #include <linux/lockref.h> #include <linux/stringhash.h> +#include <linux/wait.h> struct path; struct vfsmount; @@ -100,7 +101,7 @@ struct dentry { union { struct list_head d_lru; /* LRU list */ - wait_queue_head_t *d_wait; /* in-lookup ones only */ + struct swait_queue_head *d_wait; /* in-lookup ones only */ }; struct list_head d_child; /* child of parent list */ struct list_head d_subdirs; /* our children */ @@ -230,7 +231,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op extern struct dentry * d_alloc(struct dentry *, const struct qstr *); extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, - wait_queue_head_t *); + struct swait_queue_head *); extern struct dentry * d_splice_alias(struct inode *, struct dentry *); extern struct dentry * d_add_ci(struct dentry *, struct inode *, struct qstr *); extern struct dentry * d_exact_alias(struct dentry *, struct inode *); diff --git a/include/linux/delay.h b/include/linux/delay.h index a6ecb34cf547..37caab306336 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h @@ -52,4 +52,10 @@ static inline void ssleep(unsigned int seconds) msleep(seconds * 1000); } +#ifdef CONFIG_PREEMPT_RT_FULL +extern void cpu_chill(void); +#else +# define cpu_chill() cpu_relax() +#endif + #endif /* defined(_LINUX_DELAY_H) */ diff --git a/include/linux/highmem.h b/include/linux/highmem.h index bb3f3297062a..a117a33ef72c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -7,6 +7,7 @@ #include <linux/mm.h> #include <linux/uaccess.h> #include <linux/hardirq.h> +#include <linux/sched.h> #include <asm/cacheflush.h> @@ -65,7 +66,7 @@ static inline void kunmap(struct page *page) static inline void *kmap_atomic(struct page *page) { - preempt_disable(); + preempt_disable_nort(); pagefault_disable(); return page_address(page); } @@ -74,7 +75,7 @@ static inline void *kmap_atomic(struct page *page) static inline void __kunmap_atomic(void *addr) { pagefault_enable(); - preempt_enable(); + preempt_enable_nort(); } #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) @@ -86,32 +87,51 @@ static inline void __kunmap_atomic(void *addr) #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) +#ifndef CONFIG_PREEMPT_RT_FULL DECLARE_PER_CPU(int, __kmap_atomic_idx); +#endif static inline int kmap_atomic_idx_push(void) { +#ifndef CONFIG_PREEMPT_RT_FULL int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; -#ifdef CONFIG_DEBUG_HIGHMEM +# ifdef CONFIG_DEBUG_HIGHMEM WARN_ON_ONCE(in_irq() && !irqs_disabled()); BUG_ON(idx >= KM_TYPE_NR); -#endif +# endif return idx; +#else + current->kmap_idx++; + BUG_ON(current->kmap_idx > KM_TYPE_NR); + return current->kmap_idx - 1; +#endif } static inline int kmap_atomic_idx(void) { +#ifndef CONFIG_PREEMPT_RT_FULL return __this_cpu_read(__kmap_atomic_idx) - 1; +#else + return current->kmap_idx - 1; +#endif } static inline void kmap_atomic_idx_pop(void) { -#ifdef CONFIG_DEBUG_HIGHMEM +#ifndef CONFIG_PREEMPT_RT_FULL +# ifdef CONFIG_DEBUG_HIGHMEM int idx = __this_cpu_dec_return(__kmap_atomic_idx); BUG_ON(idx < 0); -#else +# else __this_cpu_dec(__kmap_atomic_idx); +# endif +#else + current->kmap_idx--; +# ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(current->kmap_idx < 0); +# endif #endif } diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 5e00f80b1535..a34e10b55cde 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -87,6 +87,9 @@ enum hrtimer_restart { * @function: timer expiry callback function * @base: pointer to the timer base (per cpu and per clock) * @state: state information (See bit values above) + * @cb_entry: list entry to defer timers from hardirq context + * @irqsafe: timer can run in hardirq context + * @praecox: timer expiry time if expired at the time of programming * @is_rel: Set if the timer was armed relative * @start_pid: timer statistics field to store the pid of the task which * started the timer @@ -103,6 +106,11 @@ struct hrtimer { enum hrtimer_restart (*function)(struct hrtimer *); struct hrtimer_clock_base *base; u8 state; + struct list_head cb_entry; + int irqsafe; +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + ktime_t praecox; +#endif u8 is_rel; #ifdef CONFIG_TIMER_STATS int start_pid; @@ -123,11 +131,7 @@ struct hrtimer_sleeper { struct task_struct *task; }; -#ifdef CONFIG_64BIT # define HRTIMER_CLOCK_BASE_ALIGN 64 -#else -# define HRTIMER_CLOCK_BASE_ALIGN 32 -#endif /** * struct hrtimer_clock_base - the timer base for a specific clock @@ -136,6 +140,7 @@ struct hrtimer_sleeper { * timer to a base on another cpu. * @clockid: clock id for per_cpu support * @active: red black tree root node for the active timers + * @expired: list head for deferred timers. * @get_time: function to retrieve the current time of the clock * @offset: offset of this clock to the monotonic base */ @@ -144,6 +149,7 @@ struct hrtimer_clock_base { int index; clockid_t clockid; struct timerqueue_head active; + struct list_head expired; ktime_t (*get_time)(void); ktime_t offset; } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); @@ -187,6 +193,7 @@ struct hrtimer_cpu_base { raw_spinlock_t lock; seqcount_t seq; struct hrtimer *running; + struct hrtimer *running_soft; unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set_seq; @@ -203,6 +210,9 @@ struct hrtimer_cpu_base { unsigned int nr_hangs; unsigned int max_hang_time; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + wait_queue_head_t wait; +#endif struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; } ____cacheline_aligned; @@ -412,6 +422,13 @@ static inline void hrtimer_restart(struct hrtimer *timer) hrtimer_start_expires(timer, HRTIMER_MODE_ABS); } +/* Softirq preemption could deadlock timer removal */ +#ifdef CONFIG_PREEMPT_RT_BASE + extern void hrtimer_wait_for_timer(const struct hrtimer *timer); +#else +# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) +#endif + /* Query timers: */ extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); @@ -436,9 +453,15 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) * Helper function to check, whether the timer is running the callback * function */ -static inline int hrtimer_callback_running(struct hrtimer *timer) +static inline int hrtimer_callback_running(const struct hrtimer *timer) { - return timer->base->cpu_base->running == timer; + if (timer->base->cpu_base->running == timer) + return 1; +#ifdef CONFIG_PREEMPT_RT_BASE + if (timer->base->cpu_base->running_soft == timer) + return 1; +#endif + return 0; } /* Forward a hrtimer so it expires after now: */ diff --git a/include/linux/idr.h b/include/linux/idr.h index 083d61e92706..5899796f50cb 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h @@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp); * Each idr_preload() should be matched with an invocation of this * function. See idr_preload() for details. */ +#ifdef CONFIG_PREEMPT_RT_FULL +void idr_preload_end(void); +#else static inline void idr_preload_end(void) { preempt_enable(); } +#endif /** * idr_find - return pointer for given id diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 325f649d77ff..a56e263f5005 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -150,6 +150,12 @@ extern struct task_group root_task_group; # define INIT_PERF_EVENTS(tsk) #endif +#ifdef CONFIG_PREEMPT_RT_BASE +# define INIT_TIMER_LIST .posix_timer_list = NULL, +#else +# define INIT_TIMER_LIST +#endif + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN # define INIT_VTIME(tsk) \ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ @@ -164,6 +170,7 @@ extern struct task_group root_task_group; #ifdef CONFIG_RT_MUTEXES # define INIT_RT_MUTEXES(tsk) \ .pi_waiters = RB_ROOT, \ + .pi_top_task = NULL, \ .pi_waiters_leftmost = NULL, #else # define INIT_RT_MUTEXES(tsk) @@ -250,6 +257,7 @@ extern struct task_group root_task_group; .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ .timer_slack_ns = 50000, /* 50 usec default slack */ \ + INIT_TIMER_LIST \ .pids = { \ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 72f0721f75e7..480972ae47d3 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -14,6 +14,7 @@ #include <linux/hrtimer.h> #include <linux/kref.h> #include <linux/workqueue.h> +#include <linux/swork.h> #include <linux/atomic.h> #include <asm/ptrace.h> @@ -61,6 +62,7 @@ * interrupt handler after suspending interrupts. For system * wakeup devices users need to implement wakeup detection in * their interrupt handlers. + * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) */ #define IRQF_SHARED 0x00000080 #define IRQF_PROBE_SHARED 0x00000100 @@ -74,6 +76,7 @@ #define IRQF_NO_THREAD 0x00010000 #define IRQF_EARLY_RESUME 0x00020000 #define IRQF_COND_SUSPEND 0x00040000 +#define IRQF_NO_SOFTIRQ_CALL 0x00080000 #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) @@ -196,7 +199,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id); #ifdef CONFIG_LOCKDEP # define local_irq_enable_in_hardirq() do { } while (0) #else -# define local_irq_enable_in_hardirq() local_irq_enable() +# define local_irq_enable_in_hardirq() local_irq_enable_nort() #endif extern void disable_irq_nosync(unsigned int irq); @@ -216,6 +219,7 @@ extern void resume_device_irqs(void); * struct irq_affinity_notify - context for notification of IRQ affinity changes * @irq: Interrupt to which notification applies * @kref: Reference count, for internal use + * @swork: Swork item, for internal use * @work: Work item, for internal use * @notify: Function to be called on change. This will be * called in process context. @@ -227,7 +231,11 @@ extern void resume_device_irqs(void); struct irq_affinity_notify { unsigned int irq; struct kref kref; +#ifdef CONFIG_PREEMPT_RT_BASE + struct swork_event swork; +#else struct work_struct work; +#endif void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); void (*release)(struct kref *ref); }; @@ -406,9 +414,13 @@ extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, bool state); #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE extern bool force_irqthreads; +# else +# define force_irqthreads (true) +# endif #else -#define force_irqthreads (0) +#define force_irqthreads (false) #endif #ifndef __ARCH_SET_SOFTIRQ_PENDING @@ -465,9 +477,10 @@ struct softirq_action void (*action)(struct softirq_action *); }; +#ifndef CONFIG_PREEMPT_RT_FULL asmlinkage void do_softirq(void); asmlinkage void __do_softirq(void); - +static inline void thread_do_softirq(void) { do_softirq(); } #ifdef __ARCH_HAS_DO_SOFTIRQ void do_softirq_own_stack(void); #else @@ -476,13 +489,25 @@ static inline void do_softirq_own_stack(void) __do_softirq(); } #endif +#else +extern void thread_do_softirq(void); +#endif extern void open_softirq(int nr, void (*action)(struct softirq_action *)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); +#ifdef CONFIG_PREEMPT_RT_FULL +extern void __raise_softirq_irqoff_ksoft(unsigned int nr); +#else +static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) +{ + __raise_softirq_irqoff(nr); +} +#endif extern void raise_softirq_irqoff(unsigned int nr); extern void raise_softirq(unsigned int nr); +extern void softirq_check_pending_idle(void); DECLARE_PER_CPU(struct task_struct *, ksoftirqd); @@ -504,8 +529,9 @@ static inline struct task_struct *this_cpu_ksoftirqd(void) to be executed on some cpu at least once after this. * If the tasklet is already scheduled, but its execution is still not started, it will be executed only once. - * If this tasklet is already running on another CPU (or schedule is called - from tasklet itself), it is rescheduled for later. + * If this tasklet is already running on another CPU, it is rescheduled + for later. + * Schedule must not be called from the tasklet itself (a lockup occurs) * Tasklet is strictly serialized wrt itself, but not wrt another tasklets. If client needs some intertask synchronization, he makes it with spinlocks. @@ -530,27 +556,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data } enum { TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ - TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ + TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ + TASKLET_STATE_PENDING /* Tasklet is pending */ }; -#ifdef CONFIG_SMP +#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) +#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) +#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) + +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); } +static inline int tasklet_tryunlock(struct tasklet_struct *t) +{ + return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; +} + static inline void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_atomic(); clear_bit(TASKLET_STATE_RUN, &(t)->state); } -static inline void tasklet_unlock_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } -} +extern void tasklet_unlock_wait(struct tasklet_struct *t); + #else #define tasklet_trylock(t) 1 +#define tasklet_tryunlock(t) 1 #define tasklet_unlock_wait(t) do { } while (0) #define tasklet_unlock(t) do { } while (0) #endif @@ -599,12 +634,7 @@ static inline void tasklet_disable(struct tasklet_struct *t) smp_mb(); } -static inline void tasklet_enable(struct tasklet_struct *t) -{ - smp_mb__before_atomic(); - atomic_dec(&t->count); -} - +extern void tasklet_enable(struct tasklet_struct *t); extern void tasklet_kill(struct tasklet_struct *t); extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, @@ -635,6 +665,12 @@ void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) tasklet_kill(&ttimer->tasklet); } +#ifdef CONFIG_PREEMPT_RT_FULL +extern void softirq_early_init(void); +#else +static inline void softirq_early_init(void) { } +#endif + /* * Autoprobing for irqs: * diff --git a/include/linux/irq.h b/include/linux/irq.h index 39e3254e5769..8ebac94fbb9f 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -72,6 +72,7 @@ enum irqchip_irq_state; * IRQ_IS_POLLED - Always polled by another interrupt. Exclude * it from the spurious interrupt detection * mechanism and from core side polling. + * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) * IRQ_DISABLE_UNLAZY - Disable lazy irq disable */ enum { @@ -99,13 +100,14 @@ enum { IRQ_PER_CPU_DEVID = (1 << 17), IRQ_IS_POLLED = (1 << 18), IRQ_DISABLE_UNLAZY = (1 << 19), + IRQ_NO_SOFTIRQ_CALL = (1 << 20), }; #define IRQF_MODIFY_MASK \ (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ - IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) + IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 47b9ebd4a74f..2543aab05daa 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -16,6 +16,7 @@ #define IRQ_WORK_BUSY 2UL #define IRQ_WORK_FLAGS 3UL #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ +#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ struct irq_work { unsigned long flags; @@ -51,4 +52,10 @@ static inline bool irq_work_needs_cpu(void) { return false; } static inline void irq_work_run(void) { } #endif +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) +void irq_work_tick_soft(void); +#else +static inline void irq_work_tick_soft(void) { } +#endif + #endif /* _LINUX_IRQ_WORK_H */ diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index c9be57931b58..eeeb540971ae 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -66,6 +66,7 @@ struct irq_desc { unsigned int irqs_unhandled; atomic_t threads_handled; int threads_handled_last; + u64 random_ip; raw_spinlock_t lock; struct cpumask *percpu_enabled; const struct cpumask *percpu_affinity; diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 5dd1272d1ab2..9b77034f7c5e 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h @@ -25,8 +25,6 @@ # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) -# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, #else # define trace_hardirqs_on() do { } while (0) @@ -39,9 +37,15 @@ # define trace_softirqs_enabled(p) 0 # define trace_hardirq_enter() do { } while (0) # define trace_hardirq_exit() do { } while (0) +# define INIT_TRACE_IRQFLAGS +#endif + +#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) +# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) +#else # define lockdep_softirq_enter() do { } while (0) # define lockdep_softirq_exit() do { } while (0) -# define INIT_TRACE_IRQFLAGS #endif #if defined(CONFIG_IRQSOFF_TRACER) || \ @@ -148,4 +152,23 @@ #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) +/* + * local_irq* variants depending on RT/!RT + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define local_irq_disable_nort() do { } while (0) +# define local_irq_enable_nort() do { } while (0) +# define local_irq_save_nort(flags) local_save_flags(flags) +# define local_irq_restore_nort(flags) (void)(flags) +# define local_irq_disable_rt() local_irq_disable() +# define local_irq_enable_rt() local_irq_enable() +#else +# define local_irq_disable_nort() local_irq_disable() +# define local_irq_enable_nort() local_irq_enable() +# define local_irq_save_nort(flags) local_irq_save(flags) +# define local_irq_restore_nort(flags) local_irq_restore(flags) +# define local_irq_disable_rt() do { } while (0) +# define local_irq_enable_rt() do { } while (0) +#endif + #endif diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index dfaa1f4dcb0c..d57dd06544a1 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -347,32 +347,56 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh) static inline void jbd_lock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_State, &bh->b_state); +#else + spin_lock(&bh->b_state_lock); +#endif } static inline int jbd_trylock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_trylock(BH_State, &bh->b_state); +#else + return spin_trylock(&bh->b_state_lock); +#endif } static inline int jbd_is_locked_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE return bit_spin_is_locked(BH_State, &bh->b_state); +#else + return spin_is_locked(&bh->b_state_lock); +#endif } static inline void jbd_unlock_bh_state(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_State, &bh->b_state); +#else + spin_unlock(&bh->b_state_lock); +#endif } static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(BH_JournalHead, &bh->b_state); +#else + spin_lock(&bh->b_journal_head_lock); +#endif } static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_unlock(BH_JournalHead, &bh->b_state); +#else + spin_unlock(&bh->b_journal_head_lock); +#endif } #define J_ASSERT(assert) BUG_ON(!(assert)) diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 410decacff8f..0861bebfc188 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -167,6 +167,7 @@ extern __printf(2, 0) int vkdb_printf(enum kdb_msgsrc src, const char *fmt, extern __printf(1, 2) int kdb_printf(const char *, ...); typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); +#define in_kdb_printk() (kdb_trap_printk) extern void kdb_init(int level); /* Access to kdb specific polling devices */ @@ -201,6 +202,7 @@ extern int kdb_register_flags(char *, kdb_func_t, char *, char *, extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } +#define in_kdb_printk() (0) static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } diff --git a/include/linux/kernel.h b/include/linux/kernel.h index bc6ed52a39b9..7894d55e4998 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -194,6 +194,9 @@ extern int _cond_resched(void); */ # define might_sleep() \ do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) + +# define might_sleep_no_state_check() \ + do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) # define sched_annotate_sleep() (current->task_state_change = 0) #else static inline void ___might_sleep(const char *file, int line, @@ -201,6 +204,7 @@ extern int _cond_resched(void); static inline void __might_sleep(const char *file, int line, int preempt_offset) { } # define might_sleep() do { might_resched(); } while (0) +# define might_sleep_no_state_check() do { might_resched(); } while (0) # define sched_annotate_sleep() do { } while (0) #endif @@ -488,6 +492,7 @@ extern enum system_states { SYSTEM_HALT, SYSTEM_POWER_OFF, SYSTEM_RESTART, + SYSTEM_SUSPEND, } system_state; #define TAINT_PROPRIETARY_MODULE 0 diff --git a/include/linux/list_bl.h b/include/linux/list_bl.h index cb483305e1f5..4e5062316bb6 100644 --- a/include/linux/list_bl.h +++ b/include/linux/list_bl.h @@ -2,6 +2,7 @@ #define _LINUX_LIST_BL_H #include <linux/list.h> +#include <linux/spinlock.h> #include <linux/bit_spinlock.h> /* @@ -32,13 +33,24 @@ struct hlist_bl_head { struct hlist_bl_node *first; +#ifdef CONFIG_PREEMPT_RT_BASE + raw_spinlock_t lock; +#endif }; struct hlist_bl_node { struct hlist_bl_node *next, **pprev; }; -#define INIT_HLIST_BL_HEAD(ptr) \ - ((ptr)->first = NULL) + +#ifdef CONFIG_PREEMPT_RT_BASE +#define INIT_HLIST_BL_HEAD(h) \ +do { \ + (h)->first = NULL; \ + raw_spin_lock_init(&(h)->lock); \ +} while (0) +#else +#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL +#endif static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) { @@ -118,12 +130,26 @@ static inline void hlist_bl_del_init(struct hlist_bl_node *n) static inline void hlist_bl_lock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE bit_spin_lock(0, (unsigned long *)b); +#else + raw_spin_lock(&b->lock); +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __set_bit(0, (unsigned long *)b); +#endif +#endif } static inline void hlist_bl_unlock(struct hlist_bl_head *b) { +#ifndef CONFIG_PREEMPT_RT_BASE __bit_spin_unlock(0, (unsigned long *)b); +#else +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) + __clear_bit(0, (unsigned long *)b); +#endif + raw_spin_unlock(&b->lock); +#endif } static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) diff --git a/include/linux/locallock.h b/include/linux/locallock.h new file mode 100644 index 000000000000..845c77f1a5ca --- /dev/null +++ b/include/linux/locallock.h @@ -0,0 +1,278 @@ +#ifndef _LINUX_LOCALLOCK_H +#define _LINUX_LOCALLOCK_H + +#include <linux/percpu.h> +#include <linux/spinlock.h> + +#ifdef CONFIG_PREEMPT_RT_BASE + +#ifdef CONFIG_DEBUG_SPINLOCK +# define LL_WARN(cond) WARN_ON(cond) +#else +# define LL_WARN(cond) do { } while (0) +#endif + +/* + * per cpu lock based substitute for local_irq_*() + */ +struct local_irq_lock { + spinlock_t lock; + struct task_struct *owner; + int nestcnt; + unsigned long flags; +}; + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ + DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ + .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } + +#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ + DECLARE_PER_CPU(struct local_irq_lock, lvar) + +#define local_irq_lock_init(lvar) \ + do { \ + int __cpu; \ + for_each_possible_cpu(__cpu) \ + spin_lock_init(&per_cpu(lvar, __cpu).lock); \ + } while (0) + +/* + * spin_lock|trylock|unlock_local flavour that does not migrate disable + * used for __local_lock|trylock|unlock where get_local_var/put_local_var + * already takes care of the migrate_disable/enable + * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. + */ +#ifdef CONFIG_PREEMPT_RT_FULL +# define spin_lock_local(lock) rt_spin_lock__no_mg(lock) +# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock) +# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock) +#else +# define spin_lock_local(lock) spin_lock(lock) +# define spin_trylock_local(lock) spin_trylock(lock) +# define spin_unlock_local(lock) spin_unlock(lock) +#endif + +static inline void __local_lock(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + spin_lock_local(&lv->lock); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + } + lv->nestcnt++; +} + +#define local_lock(lvar) \ + do { __local_lock(&get_local_var(lvar)); } while (0) + +#define local_lock_on(lvar, cpu) \ + do { __local_lock(&per_cpu(lvar, cpu)); } while (0) + +static inline int __local_trylock(struct local_irq_lock *lv) +{ + if (lv->owner != current && spin_trylock_local(&lv->lock)) { + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; + return 1; + } + return 0; +} + +#define local_trylock(lvar) \ + ({ \ + int __locked; \ + __locked = __local_trylock(&get_local_var(lvar)); \ + if (!__locked) \ + put_local_var(lvar); \ + __locked; \ + }) + +static inline void __local_unlock(struct local_irq_lock *lv) +{ + LL_WARN(lv->nestcnt == 0); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return; + + lv->owner = NULL; + spin_unlock_local(&lv->lock); +} + +#define local_unlock(lvar) \ + do { \ + __local_unlock(this_cpu_ptr(&lvar)); \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_on(lvar, cpu) \ + do { __local_unlock(&per_cpu(lvar, cpu)); } while (0) + +static inline void __local_lock_irq(struct local_irq_lock *lv) +{ + spin_lock_irqsave(&lv->lock, lv->flags); + LL_WARN(lv->owner); + LL_WARN(lv->nestcnt); + lv->owner = current; + lv->nestcnt = 1; +} + +#define local_lock_irq(lvar) \ + do { __local_lock_irq(&get_local_var(lvar)); } while (0) + +#define local_lock_irq_on(lvar, cpu) \ + do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) + +static inline void __local_unlock_irq(struct local_irq_lock *lv) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + lv->owner = NULL; + lv->nestcnt = 0; + spin_unlock_irq(&lv->lock); +} + +#define local_unlock_irq(lvar) \ + do { \ + __local_unlock_irq(this_cpu_ptr(&lvar)); \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irq_on(lvar, cpu) \ + do { \ + __local_unlock_irq(&per_cpu(lvar, cpu)); \ + } while (0) + +static inline int __local_lock_irqsave(struct local_irq_lock *lv) +{ + if (lv->owner != current) { + __local_lock_irq(lv); + return 0; + } else { + lv->nestcnt++; + return 1; + } +} + +#define local_lock_irqsave(lvar, _flags) \ + do { \ + if (__local_lock_irqsave(&get_local_var(lvar))) \ + put_local_var(lvar); \ + _flags = __this_cpu_read(lvar.flags); \ + } while (0) + +#define local_lock_irqsave_on(lvar, _flags, cpu) \ + do { \ + __local_lock_irqsave(&per_cpu(lvar, cpu)); \ + _flags = per_cpu(lvar, cpu).flags; \ + } while (0) + +static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, + unsigned long flags) +{ + LL_WARN(!lv->nestcnt); + LL_WARN(lv->owner != current); + if (--lv->nestcnt) + return 0; + + lv->owner = NULL; + spin_unlock_irqrestore(&lv->lock, lv->flags); + return 1; +} + +#define local_unlock_irqrestore(lvar, flags) \ + do { \ + if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ + put_local_var(lvar); \ + } while (0) + +#define local_unlock_irqrestore_on(lvar, flags, cpu) \ + do { \ + __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ + } while (0) + +#define local_spin_trylock_irq(lvar, lock) \ + ({ \ + int __locked; \ + local_lock_irq(lvar); \ + __locked = spin_trylock(lock); \ + if (!__locked) \ + local_unlock_irq(lvar); \ + __locked; \ + }) + +#define local_spin_lock_irq(lvar, lock) \ + do { \ + local_lock_irq(lvar); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irq(lvar, lock) \ + do { \ + spin_unlock(lock); \ + local_unlock_irq(lvar); \ + } while (0) + +#define local_spin_lock_irqsave(lvar, lock, flags) \ + do { \ + local_lock_irqsave(lvar, flags); \ + spin_lock(lock); \ + } while (0) + +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + do { \ + spin_unlock(lock); \ + local_unlock_irqrestore(lvar, flags); \ + } while (0) + +#define get_locked_var(lvar, var) \ + (*({ \ + local_lock(lvar); \ + this_cpu_ptr(&var); \ + })) + +#define put_locked_var(lvar, var) local_unlock(lvar); + +#define local_lock_cpu(lvar) \ + ({ \ + local_lock(lvar); \ + smp_processor_id(); \ + }) + +#define local_unlock_cpu(lvar) local_unlock(lvar) + +#else /* PREEMPT_RT_BASE */ + +#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar +#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar + +static inline void local_irq_lock_init(int lvar) { } + +#define local_lock(lvar) preempt_disable() +#define local_unlock(lvar) preempt_enable() +#define local_lock_irq(lvar) local_irq_disable() +#define local_lock_irq_on(lvar, cpu) local_irq_disable() +#define local_unlock_irq(lvar) local_irq_enable() +#define local_unlock_irq_on(lvar, cpu) local_irq_enable() +#define local_lock_irqsave(lvar, flags) local_irq_save(flags) +#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) + +#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) +#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) +#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) +#define local_spin_lock_irqsave(lvar, lock, flags) \ + spin_lock_irqsave(lock, flags) +#define local_spin_unlock_irqrestore(lvar, lock, flags) \ + spin_unlock_irqrestore(lock, flags) + +#define get_locked_var(lvar, var) get_cpu_var(var) +#define put_locked_var(lvar, var) put_cpu_var(var) + +#define local_lock_cpu(lvar) get_cpu() +#define local_unlock_cpu(lvar) put_cpu() + +#endif + +#endif diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e8471c2ca83a..08bde1a7a987 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -11,6 +11,7 @@ #include <linux/completion.h> #include <linux/cpumask.h> #include <linux/uprobes.h> +#include <linux/rcupdate.h> #include <linux/page-flags-layout.h> #include <linux/workqueue.h> #include <asm/page.h> @@ -513,6 +514,9 @@ struct mm_struct { bool tlb_flush_batched; #endif struct uprobes_state uprobes_state; +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head delayed_drop; +#endif #ifdef CONFIG_X86_INTEL_MPX /* address of the bounds directory */ void __user *bd_addr; diff --git a/include/linux/module.h b/include/linux/module.h index 0c3207d26ac0..5944baaa3f28 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -496,6 +496,7 @@ static inline int module_is_live(struct module *mod) struct module *__module_text_address(unsigned long addr); struct module *__module_address(unsigned long addr); bool is_module_address(unsigned long addr); +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr); bool is_module_percpu_address(unsigned long addr); bool is_module_text_address(unsigned long addr); @@ -663,6 +664,11 @@ static inline bool is_module_percpu_address(unsigned long addr) return false; } +static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + static inline bool is_module_text_address(unsigned long addr) { return false; diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 2cb7531e7d7a..b3fdfc820216 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -19,6 +19,17 @@ #include <asm/processor.h> #include <linux/osq_lock.h> +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ + , .dep_map = { .name = #lockname } +#else +# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +#endif + +#ifdef CONFIG_PREEMPT_RT_FULL +# include <linux/mutex_rt.h> +#else + /* * Simple, straightforward mutexes with strict semantics: * @@ -99,13 +110,6 @@ do { \ static inline void mutex_destroy(struct mutex *lock) {} #endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ - , .dep_map = { .name = #lockname } -#else -# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -#endif - #define __MUTEX_INITIALIZER(lockname) \ { .count = ATOMIC_INIT(1) \ , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ @@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killable(struct mutex *lock); extern int mutex_trylock(struct mutex *lock); extern void mutex_unlock(struct mutex *lock); +#endif /* !PREEMPT_RT_FULL */ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/mutex_rt.h b/include/linux/mutex_rt.h new file mode 100644 index 000000000000..e0284edec655 --- /dev/null +++ b/include/linux/mutex_rt.h @@ -0,0 +1,89 @@ +#ifndef __LINUX_MUTEX_RT_H +#define __LINUX_MUTEX_RT_H + +#ifndef __LINUX_MUTEX_H +#error "Please include mutex.h" +#endif + +#include <linux/rtmutex.h> + +/* FIXME: Just for __lockfunc */ +#include <linux/spinlock.h> + +struct mutex { + struct rt_mutex lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __MUTEX_INITIALIZER(mutexname) \ + { \ + .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ + __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ + } + +#define DEFINE_MUTEX(mutexname) \ + struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) + +extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); +extern void __lockfunc _mutex_lock(struct mutex *lock); +extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); +extern int __lockfunc _mutex_lock_killable(struct mutex *lock); +extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); +extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); +extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); +extern int __lockfunc _mutex_trylock(struct mutex *lock); +extern void __lockfunc _mutex_unlock(struct mutex *lock); + +#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) +#define mutex_lock(l) _mutex_lock(l) +#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) +#define mutex_lock_killable(l) _mutex_lock_killable(l) +#define mutex_trylock(l) _mutex_trylock(l) +#define mutex_unlock(l) _mutex_unlock(l) + +#ifdef CONFIG_DEBUG_MUTEXES +#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) +#else +static inline void mutex_destroy(struct mutex *lock) {} +#endif + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible_nested(l, s) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable_nested(l, s) + +# define mutex_lock_nest_lock(lock, nest_lock) \ +do { \ + typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ + _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ +} while (0) + +#else +# define mutex_lock_nested(l, s) _mutex_lock(l) +# define mutex_lock_interruptible_nested(l, s) \ + _mutex_lock_interruptible(l) +# define mutex_lock_killable_nested(l, s) \ + _mutex_lock_killable(l) +# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) +#endif + +# define mutex_init(mutex) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), #mutex, &__key); \ +} while (0) + +# define __mutex_init(mutex, name, key) \ +do { \ + rt_mutex_init(&(mutex)->lock); \ + __mutex_do_init((mutex), name, key); \ +} while (0) + +#endif diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 23db1ae37464..67af02914bf8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handler_result_t; typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb); void __napi_schedule(struct napi_struct *n); + +/* + * When PREEMPT_RT_FULL is defined, all device interrupt handlers + * run as threads, and they can also be preempted (without PREEMPT_RT + * interrupt threads can not be preempted). Which means that calling + * __napi_schedule_irqoff() from an interrupt handler can be preempted + * and can corrupt the napi->poll_list. + */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define __napi_schedule_irqoff(n) __napi_schedule(n) +#else void __napi_schedule_irqoff(struct napi_struct *n); +#endif static inline bool napi_disable_pending(struct napi_struct *n) { @@ -2464,14 +2476,53 @@ void netdev_freemem(struct net_device *dev); void synchronize_net(void); int init_dummy_netdev(struct net_device *dev); -DECLARE_PER_CPU(int, xmit_recursion); #define XMIT_RECURSION_LIMIT 10 +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int dev_recursion_level(void) +{ + return current->xmit_recursion; +} + +static inline int xmit_rec_read(void) +{ + return current->xmit_recursion; +} + +static inline void xmit_rec_inc(void) +{ + current->xmit_recursion++; +} + +static inline void xmit_rec_dec(void) +{ + current->xmit_recursion--; +} + +#else + +DECLARE_PER_CPU(int, xmit_recursion); static inline int dev_recursion_level(void) { return this_cpu_read(xmit_recursion); } +static inline int xmit_rec_read(void) +{ + return __this_cpu_read(xmit_recursion); +} + +static inline void xmit_rec_inc(void) +{ + __this_cpu_inc(xmit_recursion); +} + +static inline void xmit_rec_dec(void) +{ + __this_cpu_dec(xmit_recursion); +} +#endif + struct net_device *dev_get_by_index(struct net *net, int ifindex); struct net_device *__dev_get_by_index(struct net *net, int ifindex); struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex); @@ -2856,6 +2907,7 @@ struct softnet_data { unsigned int dropped; struct sk_buff_head input_pkt_queue; struct napi_struct backlog; + struct sk_buff_head tofree_queue; }; diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 2ad1a2b289b5..b4d10155af54 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/static_key.h> +#include <linux/locallock.h> #include <uapi/linux/netfilter/x_tables.h> /* Test a struct->invflags and a boolean for inequality */ @@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_info *info); */ DECLARE_PER_CPU(seqcount_t, xt_recseq); +DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); + /* xt_tee_enabled - true if x_tables needs to handle reentrancy * * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. @@ -320,6 +323,9 @@ static inline unsigned int xt_write_recseq_begin(void) { unsigned int addend; + /* RT protection */ + local_lock(xt_write_lock); + /* * Low order bit of sequence is set if we already * called xt_write_recseq_begin(). @@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(unsigned int addend) /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ smp_wmb(); __this_cpu_add(xt_recseq.sequence, addend); + local_unlock(xt_write_lock); } /* diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 810124b33327..d54ca43d571f 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -165,7 +165,11 @@ struct nfs_inode { /* Readers: in-flight sillydelete RPC calls */ /* Writers: rmdir */ +#ifdef CONFIG_PREEMPT_RT_BASE + struct semaphore rmdir_sem; +#else struct rw_semaphore rmdir_sem; +#endif #if IS_ENABLED(CONFIG_NFS_V4) struct nfs4_cached_acl *nfs4_acl; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 3bf867a0c3b3..71c6bdd14c8a 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1490,7 +1490,7 @@ struct nfs_unlinkdata { struct nfs_removeargs args; struct nfs_removeres res; struct dentry *dentry; - wait_queue_head_t wq; + struct swait_queue_head wq; struct rpc_cred *cred; struct nfs_fattr dir_attr; long timeout; diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 4149868de4e6..babe5b9bcb91 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h @@ -6,7 +6,7 @@ * * Alan Cox <Alan.Cox@linux.org> */ - + #ifndef _LINUX_NOTIFIER_H #define _LINUX_NOTIFIER_H #include <linux/errno.h> @@ -42,9 +42,7 @@ * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. * As compensation, srcu_notifier_chain_unregister() is rather expensive. * SRCU notifier chains should be used when the chain will be called very - * often but notifier_blocks will seldom be removed. Also, SRCU notifier - * chains are slightly more difficult to use because they require special - * runtime initialization. + * often but notifier_blocks will seldom be removed. */ struct notifier_block; @@ -90,7 +88,7 @@ struct srcu_notifier_head { (name)->head = NULL; \ } while (0) -/* srcu_notifier_heads must be initialized and cleaned up dynamically */ +/* srcu_notifier_heads must be cleaned up dynamically */ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); #define srcu_cleanup_notifier_head(name) \ cleanup_srcu_struct(&(name)->srcu); @@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); .head = NULL } #define RAW_NOTIFIER_INIT(name) { \ .head = NULL } -/* srcu_notifier_heads cannot be initialized statically */ + +#define SRCU_NOTIFIER_INIT(name, pcpu) \ + { \ + .mutex = __MUTEX_INITIALIZER(name.mutex), \ + .head = NULL, \ + .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ + } #define ATOMIC_NOTIFIER_HEAD(name) \ struct atomic_notifier_head name = \ @@ -115,6 +119,18 @@ extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); struct raw_notifier_head name = \ RAW_NOTIFIER_INIT(name) +#define _SRCU_NOTIFIER_HEAD(name, mod) \ + static DEFINE_PER_CPU(struct srcu_struct_array, \ + name##_head_srcu_array); \ + mod struct srcu_notifier_head name = \ + SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) + +#define SRCU_NOTIFIER_HEAD(name) \ + _SRCU_NOTIFIER_HEAD(name, ) + +#define SRCU_NOTIFIER_HEAD_STATIC(name) \ + _SRCU_NOTIFIER_HEAD(name, static) + #ifdef __KERNEL__ extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, @@ -184,12 +200,12 @@ static inline int notifier_to_errno(int ret) /* * Declared notifiers so far. I can imagine quite a few more chains - * over time (eg laptop power reset chains, reboot chain (to clean + * over time (eg laptop power reset chains, reboot chain (to clean * device units up), device [un]mount chain, module load/unload chain, - * low memory chain, screenblank chain (for plug in modular screenblankers) + * low memory chain, screenblank chain (for plug in modular screenblankers) * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... */ - + /* CPU notfiers are defined in include/linux/cpu.h. */ /* netdevice notifiers are defined in include/linux/netdevice.h */ diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 5b2e6159b744..ea940f451606 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -4,7 +4,7 @@ #include <linux/atomic.h> #include <linux/rwsem.h> #include <linux/percpu.h> -#include <linux/wait.h> +#include <linux/swait.h> #include <linux/rcu_sync.h> #include <linux/lockdep.h> @@ -12,7 +12,7 @@ struct percpu_rw_semaphore { struct rcu_sync rss; unsigned int __percpu *read_count; struct rw_semaphore rw_sem; - wait_queue_head_t writer; + struct swait_queue_head writer; int readers_block; }; @@ -22,13 +22,13 @@ static struct percpu_rw_semaphore name = { \ .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \ .read_count = &__percpu_rwsem_rc_##name, \ .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \ - .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ + .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \ } extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern void __percpu_up_read(struct percpu_rw_semaphore *); -static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) +static inline void percpu_down_read(struct percpu_rw_semaphore *sem) { might_sleep(); @@ -46,16 +46,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * __this_cpu_inc(*sem->read_count); if (unlikely(!rcu_sync_is_idle(&sem->rss))) __percpu_down_read(sem, false); /* Unconditional memory barrier */ - barrier(); /* - * The barrier() prevents the compiler from + * The preempt_enable() prevents the compiler from * bleeding the critical section out. */ -} - -static inline void percpu_down_read(struct percpu_rw_semaphore *sem) -{ - percpu_down_read_preempt_disable(sem); preempt_enable(); } @@ -82,13 +76,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) return ret; } -static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) +static inline void percpu_up_read(struct percpu_rw_semaphore *sem) { - /* - * The barrier() prevents the compiler from - * bleeding the critical section out. - */ - barrier(); + preempt_disable(); /* * Same as in percpu_down_read(). */ @@ -101,12 +91,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); } -static inline void percpu_up_read(struct percpu_rw_semaphore *sem) -{ - preempt_disable(); - percpu_up_read_preempt_enable(sem); -} - extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *); diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 56939d3f6e53..b988bf40ad3e 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -18,6 +18,35 @@ #define PERCPU_MODULE_RESERVE 0 #endif +#ifdef CONFIG_PREEMPT_RT_FULL + +#define get_local_var(var) (*({ \ + migrate_disable(); \ + this_cpu_ptr(&var); })) + +#define put_local_var(var) do { \ + (void)&(var); \ + migrate_enable(); \ +} while (0) + +# define get_local_ptr(var) ({ \ + migrate_disable(); \ + this_cpu_ptr(var); }) + +# define put_local_ptr(var) do { \ + (void)(var); \ + migrate_enable(); \ +} while (0) + +#else + +#define get_local_var(var) get_cpu_var(var) +#define put_local_var(var) put_cpu_var(var) +#define get_local_ptr(var) get_cpu_ptr(var) +#define put_local_ptr(var) put_cpu_ptr(var) + +#endif + /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) @@ -110,6 +139,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, #endif extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); +extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr); extern bool is_kernel_percpu_address(unsigned long addr); #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) diff --git a/include/linux/pid.h b/include/linux/pid.h index 97b745ddece5..01a5460a0c85 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h @@ -2,6 +2,7 @@ #define _LINUX_PID_H #include <linux/rcupdate.h> +#include <linux/atomic.h> enum pid_type { diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 75e4e30677f1..1cfb1cb72354 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -50,7 +50,11 @@ #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) #define NMI_OFFSET (1UL << NMI_SHIFT) -#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#ifndef CONFIG_PREEMPT_RT_FULL +# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) +#else +# define SOFTIRQ_DISABLE_OFFSET (0) +#endif /* We use the MSB mostly because its available */ #define PREEMPT_NEED_RESCHED 0x80000000 @@ -59,9 +63,15 @@ #include <asm/preempt.h> #define hardirq_count() (preempt_count() & HARDIRQ_MASK) -#define softirq_count() (preempt_count() & SOFTIRQ_MASK) #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ | NMI_MASK)) +#ifndef CONFIG_PREEMPT_RT_FULL +# define softirq_count() (preempt_count() & SOFTIRQ_MASK) +# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) +#else +# define softirq_count() (0UL) +extern int in_serving_softirq(void); +#endif /* * Are we doing bottom half or hardware interrupt processing? @@ -72,7 +82,6 @@ #define in_irq() (hardirq_count()) #define in_softirq() (softirq_count()) #define in_interrupt() (irq_count()) -#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) /* * Are we in NMI context? @@ -91,7 +100,11 @@ /* * The preempt_count offset after spin_lock() */ +#if !defined(CONFIG_PREEMPT_RT_FULL) #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET +#else +#define PREEMPT_LOCK_OFFSET 0 +#endif /* * The preempt_count offset needed for things like: @@ -140,6 +153,20 @@ extern void preempt_count_sub(int val); #define preempt_count_inc() preempt_count_add(1) #define preempt_count_dec() preempt_count_sub(1) +#ifdef CONFIG_PREEMPT_LAZY +#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) +#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) +#define inc_preempt_lazy_count() add_preempt_lazy_count(1) +#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) +#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) +#else +#define add_preempt_lazy_count(val) do { } while (0) +#define sub_preempt_lazy_count(val) do { } while (0) +#define inc_preempt_lazy_count() do { } while (0) +#define dec_preempt_lazy_count() do { } while (0) +#define preempt_lazy_count() (0) +#endif + #ifdef CONFIG_PREEMPT_COUNT #define preempt_disable() \ @@ -148,13 +175,25 @@ do { \ barrier(); \ } while (0) +#define preempt_lazy_disable() \ +do { \ + inc_preempt_lazy_count(); \ + barrier(); \ +} while (0) + #define sched_preempt_enable_no_resched() \ do { \ barrier(); \ preempt_count_dec(); \ } while (0) -#define preempt_enable_no_resched() sched_preempt_enable_no_resched() +#ifdef CONFIG_PREEMPT_RT_BASE +# define preempt_enable_no_resched() sched_preempt_enable_no_resched() +# define preempt_check_resched_rt() preempt_check_resched() +#else +# define preempt_enable_no_resched() preempt_enable() +# define preempt_check_resched_rt() barrier(); +#endif #define preemptible() (preempt_count() == 0 && !irqs_disabled()) @@ -179,6 +218,13 @@ do { \ __preempt_schedule(); \ } while (0) +#define preempt_lazy_enable() \ +do { \ + dec_preempt_lazy_count(); \ + barrier(); \ + preempt_check_resched(); \ +} while (0) + #else /* !CONFIG_PREEMPT */ #define preempt_enable() \ do { \ @@ -224,6 +270,7 @@ do { \ #define preempt_disable_notrace() barrier() #define preempt_enable_no_resched_notrace() barrier() #define preempt_enable_notrace() barrier() +#define preempt_check_resched_rt() barrier() #define preemptible() 0 #endif /* CONFIG_PREEMPT_COUNT */ @@ -244,10 +291,31 @@ do { \ } while (0) #define preempt_fold_need_resched() \ do { \ - if (tif_need_resched()) \ + if (tif_need_resched_now()) \ set_preempt_need_resched(); \ } while (0) +#ifdef CONFIG_PREEMPT_RT_FULL +# define preempt_disable_rt() preempt_disable() +# define preempt_enable_rt() preempt_enable() +# define preempt_disable_nort() barrier() +# define preempt_enable_nort() barrier() +# ifdef CONFIG_SMP + extern void migrate_disable(void); + extern void migrate_enable(void); +# else /* CONFIG_SMP */ +# define migrate_disable() barrier() +# define migrate_enable() barrier() +# endif /* CONFIG_SMP */ +#else +# define preempt_disable_rt() barrier() +# define preempt_enable_rt() barrier() +# define preempt_disable_nort() preempt_disable() +# define preempt_enable_nort() preempt_enable() +# define migrate_disable() preempt_disable() +# define migrate_enable() preempt_enable() +#endif + #ifdef CONFIG_PREEMPT_NOTIFIERS struct preempt_notifier; diff --git a/include/linux/printk.h b/include/linux/printk.h index eac1af8502bb..37e647af0b0b 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -126,9 +126,11 @@ struct va_format { #ifdef CONFIG_EARLY_PRINTK extern asmlinkage __printf(1, 2) void early_printk(const char *fmt, ...); +extern void printk_kill(void); #else static inline __printf(1, 2) __cold void early_printk(const char *s, ...) { } +static inline void printk_kill(void) { } #endif #ifdef CONFIG_PRINTK_NMI diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index af3581b8a451..277295039c8f 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h @@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, int radix_tree_preload(gfp_t gfp_mask); int radix_tree_maybe_preload(gfp_t gfp_mask); int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order); +void radix_tree_preload_end(void); + void radix_tree_init(void); void *radix_tree_tag_set(struct radix_tree_root *root, unsigned long index, unsigned int tag); @@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_tagged(struct radix_tree_root *root, int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag); unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item); -static inline void radix_tree_preload_end(void) -{ - preempt_enable(); -} - /** * struct radix_tree_iter - radix tree iterator state * diff --git a/include/linux/random.h b/include/linux/random.h index 16ab429735a7..9d0fecb5b6c2 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -31,7 +31,7 @@ static inline void add_latent_entropy(void) {} extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) __latent_entropy; -extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; +extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) __latent_entropy; extern void get_random_bytes(void *buf, int nbytes); extern int add_random_ready_callback(struct random_ready_callback *rdy); diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index e585018498d5..25c64474fc27 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -31,7 +31,7 @@ #include <linux/kernel.h> #include <linux/stddef.h> -#include <linux/rcupdate.h> +#include <linux/rcu_assign_pointer.h> struct rb_node { unsigned long __rb_parent_color; diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h index d076183e49be..36bfb4dd57ae 100644 --- a/include/linux/rbtree_augmented.h +++ b/include/linux/rbtree_augmented.h @@ -26,6 +26,7 @@ #include <linux/compiler.h> #include <linux/rbtree.h> +#include <linux/rcupdate.h> /* * Please note - only struct rb_augment_callbacks and the prototypes for diff --git a/include/linux/rcu_assign_pointer.h b/include/linux/rcu_assign_pointer.h new file mode 100644 index 000000000000..7066962a4379 --- /dev/null +++ b/include/linux/rcu_assign_pointer.h @@ -0,0 +1,54 @@ +#ifndef __LINUX_RCU_ASSIGN_POINTER_H__ +#define __LINUX_RCU_ASSIGN_POINTER_H__ +#include <linux/compiler.h> +#include <asm/barrier.h> + +/** + * RCU_INITIALIZER() - statically initialize an RCU-protected global variable + * @v: The value to statically initialize with. + */ +#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) + +/** + * rcu_assign_pointer() - assign to RCU-protected pointer + * @p: pointer to assign to + * @v: value to assign (publish) + * + * Assigns the specified value to the specified RCU-protected + * pointer, ensuring that any concurrent RCU readers will see + * any prior initialization. + * + * Inserts memory barriers on architectures that require them + * (which is most of them), and also prevents the compiler from + * reordering the code that initializes the structure after the pointer + * assignment. More importantly, this call documents which pointers + * will be dereferenced by RCU read-side code. + * + * In some special cases, you may use RCU_INIT_POINTER() instead + * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due + * to the fact that it does not constrain either the CPU or the compiler. + * That said, using RCU_INIT_POINTER() when you should have used + * rcu_assign_pointer() is a very bad thing that results in + * impossible-to-diagnose memory corruption. So please be careful. + * See the RCU_INIT_POINTER() comment header for details. + * + * Note that rcu_assign_pointer() evaluates each of its arguments only + * once, appearances notwithstanding. One of the "extra" evaluations + * is in typeof() and the other visible only to sparse (__CHECKER__), + * neither of which actually execute the argument. As with most cpp + * macros, this execute-arguments-only-once property is important, so + * please be careful when making changes to rcu_assign_pointer() and the + * other macros that it invokes. + */ +#define rcu_assign_pointer(p, v) \ +({ \ + uintptr_t _r_a_p__v = (uintptr_t)(v); \ + \ + if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ + WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ + else \ + smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ + _r_a_p__v; \ +}) + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 01f71e1d2e94..30cc001d0d5a 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -46,6 +46,7 @@ #include <linux/compiler.h> #include <linux/ktime.h> #include <linux/irqflags.h> +#include <linux/rcu_assign_pointer.h> #include <asm/barrier.h> @@ -178,6 +179,9 @@ void call_rcu(struct rcu_head *head, #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +#ifdef CONFIG_PREEMPT_RT_FULL +#define call_rcu_bh call_rcu +#else /** * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. * @head: structure to be used for queueing the RCU updates. @@ -201,6 +205,7 @@ void call_rcu(struct rcu_head *head, */ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func); +#endif /** * call_rcu_sched() - Queue an RCU for invocation after sched grace period. @@ -301,6 +306,11 @@ void synchronize_rcu(void); * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. */ #define rcu_preempt_depth() (current->rcu_read_lock_nesting) +#ifndef CONFIG_PREEMPT_RT_FULL +#define sched_rcu_preempt_depth() rcu_preempt_depth() +#else +static inline int sched_rcu_preempt_depth(void) { return 0; } +#endif #else /* #ifdef CONFIG_PREEMPT_RCU */ @@ -326,6 +336,8 @@ static inline int rcu_preempt_depth(void) return 0; } +#define sched_rcu_preempt_depth() rcu_preempt_depth() + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ /* Internal to kernel */ @@ -505,7 +517,14 @@ extern struct lockdep_map rcu_callback_map; int debug_lockdep_rcu_enabled(void); int rcu_read_lock_held(void); +#ifdef CONFIG_PREEMPT_RT_FULL +static inline int rcu_read_lock_bh_held(void) +{ + return rcu_read_lock_held(); +} +#else int rcu_read_lock_bh_held(void); +#endif /** * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? @@ -626,54 +645,6 @@ static inline void rcu_preempt_sleep_check(void) }) /** - * RCU_INITIALIZER() - statically initialize an RCU-protected global variable - * @v: The value to statically initialize with. - */ -#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) - -/** - * rcu_assign_pointer() - assign to RCU-protected pointer - * @p: pointer to assign to - * @v: value to assign (publish) - * - * Assigns the specified value to the specified RCU-protected - * pointer, ensuring that any concurrent RCU readers will see - * any prior initialization. - * - * Inserts memory barriers on architectures that require them - * (which is most of them), and also prevents the compiler from - * reordering the code that initializes the structure after the pointer - * assignment. More importantly, this call documents which pointers - * will be dereferenced by RCU read-side code. - * - * In some special cases, you may use RCU_INIT_POINTER() instead - * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due - * to the fact that it does not constrain either the CPU or the compiler. - * That said, using RCU_INIT_POINTER() when you should have used - * rcu_assign_pointer() is a very bad thing that results in - * impossible-to-diagnose memory corruption. So please be careful. - * See the RCU_INIT_POINTER() comment header for details. - * - * Note that rcu_assign_pointer() evaluates each of its arguments only - * once, appearances notwithstanding. One of the "extra" evaluations - * is in typeof() and the other visible only to sparse (__CHECKER__), - * neither of which actually execute the argument. As with most cpp - * macros, this execute-arguments-only-once property is important, so - * please be careful when making changes to rcu_assign_pointer() and the - * other macros that it invokes. - */ -#define rcu_assign_pointer(p, v) \ -({ \ - uintptr_t _r_a_p__v = (uintptr_t)(v); \ - \ - if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ - WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ - else \ - smp_store_release(&p, RCU_INITIALIZER((typeof(p))_r_a_p__v)); \ - _r_a_p__v; \ -}) - -/** * rcu_access_pointer() - fetch RCU pointer with no dereferencing * @p: The pointer to read * @@ -951,10 +922,14 @@ static inline void rcu_read_unlock(void) static inline void rcu_read_lock_bh(void) { local_bh_disable(); +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_lock(); +#else __acquire(RCU_BH); rcu_lock_acquire(&rcu_bh_lock_map); RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_lock_bh() used illegally while idle"); +#endif } /* @@ -964,10 +939,14 @@ static inline void rcu_read_lock_bh(void) */ static inline void rcu_read_unlock_bh(void) { +#ifdef CONFIG_PREEMPT_RT_FULL + rcu_read_unlock(); +#else RCU_LOCKDEP_WARN(!rcu_is_watching(), "rcu_read_unlock_bh() used illegally while idle"); rcu_lock_release(&rcu_bh_lock_map); __release(RCU_BH); +#endif local_bh_enable(); } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 63a4e4cf40a5..08ab12df2863 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -44,7 +44,11 @@ static inline void rcu_virt_note_context_switch(int cpu) rcu_note_context_switch(); } +#ifdef CONFIG_PREEMPT_RT_FULL +# define synchronize_rcu_bh synchronize_rcu +#else void synchronize_rcu_bh(void); +#endif void synchronize_sched_expedited(void); void synchronize_rcu_expedited(void); @@ -72,7 +76,11 @@ static inline void synchronize_rcu_bh_expedited(void) } void rcu_barrier(void); +#ifdef CONFIG_PREEMPT_RT_FULL +# define rcu_barrier_bh rcu_barrier +#else void rcu_barrier_bh(void); +#endif void rcu_barrier_sched(void); unsigned long get_state_synchronize_rcu(void); void cond_synchronize_rcu(unsigned long oldstate); @@ -82,17 +90,14 @@ void cond_synchronize_sched(unsigned long oldstate); extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_vernum; unsigned long rcu_batches_started(void); -unsigned long rcu_batches_started_bh(void); unsigned long rcu_batches_started_sched(void); unsigned long rcu_batches_completed(void); -unsigned long rcu_batches_completed_bh(void); unsigned long rcu_batches_completed_sched(void); unsigned long rcu_exp_batches_completed(void); unsigned long rcu_exp_batches_completed_sched(void); void show_rcu_gp_kthreads(void); void rcu_force_quiescent_state(void); -void rcu_bh_force_quiescent_state(void); void rcu_sched_force_quiescent_state(void); void rcu_idle_enter(void); @@ -109,6 +114,16 @@ extern int rcu_scheduler_active __read_mostly; bool rcu_is_watching(void); +#ifndef CONFIG_PREEMPT_RT_FULL +void rcu_bh_force_quiescent_state(void); +unsigned long rcu_batches_started_bh(void); +unsigned long rcu_batches_completed_bh(void); +#else +# define rcu_bh_force_quiescent_state rcu_force_quiescent_state +# define rcu_batches_completed_bh rcu_batches_completed +# define rcu_batches_started_bh rcu_batches_completed +#endif + void rcu_all_qs(void); /* RCUtree hotplug events */ diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 1abba5ce2a2f..294a8b4875f1 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -13,11 +13,15 @@ #define __LINUX_RT_MUTEX_H #include <linux/linkage.h> +#include <linux/spinlock_types_raw.h> #include <linux/rbtree.h> -#include <linux/spinlock_types.h> extern int max_lock_depth; /* for sysctl */ +#ifdef CONFIG_DEBUG_MUTEXES +#include <linux/debug_locks.h> +#endif + /** * The rt_mutex structure * @@ -31,8 +35,8 @@ struct rt_mutex { struct rb_root waiters; struct rb_node *waiters_leftmost; struct task_struct *owner; -#ifdef CONFIG_DEBUG_RT_MUTEXES int save_state; +#ifdef CONFIG_DEBUG_RT_MUTEXES const char *name, *file; int line; void *magic; @@ -55,22 +59,33 @@ struct hrtimer_sleeper; # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) #endif +# define rt_mutex_init(mutex) \ + do { \ + raw_spin_lock_init(&(mutex)->wait_lock); \ + __rt_mutex_init(mutex, #mutex); \ + } while (0) + #ifdef CONFIG_DEBUG_RT_MUTEXES # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ , .name = #mutexname, .file = __FILE__, .line = __LINE__ -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) extern void rt_mutex_debug_task_free(struct task_struct *tsk); #else # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) # define rt_mutex_debug_task_free(t) do { } while (0) #endif -#define __RT_MUTEX_INITIALIZER(mutexname) \ - { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ +#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ , .waiters = RB_ROOT \ , .owner = NULL \ - __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} + __DEBUG_RT_MUTEX_INITIALIZER(mutexname) + +#define __RT_MUTEX_INITIALIZER(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } + +#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ + { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ + , .save_state = 1 } #define DEFINE_RT_MUTEX(mutexname) \ struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) @@ -90,7 +105,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); +extern int rt_mutex_lock_state(struct rt_mutex *lock, int state); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); +extern int rt_mutex_lock_killable(struct rt_mutex *lock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout); diff --git a/include/linux/rwlock_rt.h b/include/linux/rwlock_rt.h new file mode 100644 index 000000000000..49ed2d45d3be --- /dev/null +++ b/include/linux/rwlock_rt.h @@ -0,0 +1,99 @@ +#ifndef __LINUX_RWLOCK_RT_H +#define __LINUX_RWLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#define rwlock_init(rwl) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(rwl)->lock); \ + __rt_rwlock_init(rwl, #rwl, &__key); \ +} while (0) + +extern void __lockfunc rt_write_lock(rwlock_t *rwlock); +extern void __lockfunc rt_read_lock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); +extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); +extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); +extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); +extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); +extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); +extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); + +#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) +#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) + +#define write_trylock_irqsave(lock, flags) \ + __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) + +#define read_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = rt_read_lock_irqsave(lock); \ + } while (0) + +#define write_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = rt_write_lock_irqsave(lock); \ + } while (0) + +#define read_lock(lock) rt_read_lock(lock) + +#define read_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_read_lock(lock); \ + } while (0) + +#define read_lock_irq(lock) read_lock(lock) + +#define write_lock(lock) rt_write_lock(lock) + +#define write_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_write_lock(lock); \ + } while (0) + +#define write_lock_irq(lock) write_lock(lock) + +#define read_unlock(lock) rt_read_unlock(lock) + +#define read_unlock_bh(lock) \ + do { \ + rt_read_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define read_unlock_irq(lock) read_unlock(lock) + +#define write_unlock(lock) rt_write_unlock(lock) + +#define write_unlock_bh(lock) \ + do { \ + rt_write_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define write_unlock_irq(lock) write_unlock(lock) + +#define read_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_read_unlock(lock); \ + } while (0) + +#define write_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + rt_write_unlock(lock); \ + } while (0) + +#endif diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h index cc0072e93e36..5317cd957292 100644 --- a/include/linux/rwlock_types.h +++ b/include/linux/rwlock_types.h @@ -1,6 +1,10 @@ #ifndef __LINUX_RWLOCK_TYPES_H #define __LINUX_RWLOCK_TYPES_H +#if !defined(__LINUX_SPINLOCK_TYPES_H) +# error "Do not include directly, include spinlock_types.h" +#endif + /* * include/linux/rwlock_types.h - generic rwlock type definitions * and initializers diff --git a/include/linux/rwlock_types_rt.h b/include/linux/rwlock_types_rt.h new file mode 100644 index 000000000000..51b28d775fe1 --- /dev/null +++ b/include/linux/rwlock_types_rt.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_RWLOCK_TYPES_RT_H +#define __LINUX_RWLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +/* + * rwlocks - rtmutex which allows single reader recursion + */ +typedef struct { + struct rt_mutex lock; + int read_depth; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} rwlock_t; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define RW_DEP_MAP_INIT(lockname) +#endif + +#define __RW_LOCK_UNLOCKED(name) \ + { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ + RW_DEP_MAP_INIT(name) } + +#define DEFINE_RWLOCK(name) \ + rwlock_t name = __RW_LOCK_UNLOCKED(name) + +#endif diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index dd1d14250340..aa2ac1f65c2d 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -19,6 +19,10 @@ #include <linux/osq_lock.h> #endif +#ifdef CONFIG_PREEMPT_RT_FULL +#include <linux/rwsem_rt.h> +#else /* PREEMPT_RT_FULL */ + struct rw_semaphore; #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK @@ -106,6 +110,13 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#endif /* !PREEMPT_RT_FULL */ + +/* + * The functions below are the same for all rwsem implementations including + * the RT specific variant. + */ + /* * lock for reading */ diff --git a/include/linux/rwsem_rt.h b/include/linux/rwsem_rt.h new file mode 100644 index 000000000000..2ffbf093ae92 --- /dev/null +++ b/include/linux/rwsem_rt.h @@ -0,0 +1,67 @@ +#ifndef _LINUX_RWSEM_RT_H +#define _LINUX_RWSEM_RT_H + +#ifndef _LINUX_RWSEM_H +#error "Include rwsem.h" +#endif + +#include <linux/rtmutex.h> +#include <linux/swait.h> + +#define READER_BIAS (1U << 31) +#define WRITER_BIAS (1U << 30) + +struct rw_semaphore { + atomic_t readers; + struct rt_mutex rtmutex; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +}; + +#define __RWSEM_INITIALIZER(name) \ +{ \ + .readers = ATOMIC_INIT(READER_BIAS), \ + .rtmutex = __RT_MUTEX_INITIALIZER(name.rtmutex), \ + RW_DEP_MAP_INIT(name) \ +} + +#define DECLARE_RWSEM(lockname) \ + struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) + +extern void __rwsem_init(struct rw_semaphore *rwsem, const char *name, + struct lock_class_key *key); + +#define __init_rwsem(sem, name, key) \ +do { \ + rt_mutex_init(&(sem)->rtmutex); \ + __rwsem_init((sem), (name), (key)); \ +} while (0) + +#define init_rwsem(sem) \ +do { \ + static struct lock_class_key __key; \ + \ + __init_rwsem((sem), #sem, &__key); \ +} while (0) + +static inline int rwsem_is_locked(struct rw_semaphore *sem) +{ + return atomic_read(&sem->readers) != READER_BIAS; +} + +static inline int rwsem_is_contended(struct rw_semaphore *sem) +{ + return atomic_read(&sem->readers) > 0; +} + +extern void __down_read(struct rw_semaphore *sem); +extern int __down_read_trylock(struct rw_semaphore *sem); +extern void __down_write(struct rw_semaphore *sem); +extern int __must_check __down_write_killable(struct rw_semaphore *sem); +extern int __down_write_trylock(struct rw_semaphore *sem); +extern void __up_read(struct rw_semaphore *sem); +extern void __up_write(struct rw_semaphore *sem); +extern void __downgrade_write(struct rw_semaphore *sem); + +#endif diff --git a/include/linux/sched.h b/include/linux/sched.h index a4d0afc009a7..e775696b480a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -26,6 +26,7 @@ struct sched_param { #include <linux/nodemask.h> #include <linux/mm_types.h> #include <linux/preempt.h> +#include <asm/kmap_types.h> #include <asm/page.h> #include <asm/ptrace.h> @@ -236,17 +237,13 @@ extern char ___assert_task_state[1 - 2*!!( /* Convenience macros for the sake of wake_up */ #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE) -#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED) /* get_task_state() */ #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) -#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) -#define task_is_stopped_or_traced(task) \ - ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) #define task_contributes_to_load(task) \ ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ (task->flags & PF_FROZEN) == 0 && \ @@ -312,6 +309,11 @@ extern char ___assert_task_state[1 - 2*!!( #endif +#define __set_current_state_no_track(state_value) \ + do { current->state = (state_value); } while (0) +#define set_current_state_no_track(state_value) \ + set_mb(current->state, (state_value)) + /* Task command name length */ #define TASK_COMM_LEN 16 @@ -1022,9 +1024,31 @@ struct wake_q_head { #define WAKE_Q(name) \ struct wake_q_head name = { WAKE_Q_TAIL, &name.first } -extern void wake_q_add(struct wake_q_head *head, - struct task_struct *task); -extern void wake_up_q(struct wake_q_head *head); +extern void __wake_q_add(struct wake_q_head *head, + struct task_struct *task, bool sleeper); +static inline void wake_q_add(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, false); +} + +static inline void wake_q_add_sleeper(struct wake_q_head *head, + struct task_struct *task) +{ + __wake_q_add(head, task, true); +} + +extern void __wake_up_q(struct wake_q_head *head, bool sleeper); + +static inline void wake_up_q(struct wake_q_head *head) +{ + __wake_up_q(head, false); +} + +static inline void wake_up_q_sleeper(struct wake_q_head *head) +{ + __wake_up_q(head, true); +} /* * sched-domains (multiprocessor balancing) declarations: @@ -1491,6 +1515,7 @@ struct task_struct { struct thread_info thread_info; #endif volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ + volatile long saved_state; /* saved state for "spinlock sleepers" */ void *stack; atomic_t usage; unsigned int flags; /* per process flags, defined below */ @@ -1530,6 +1555,13 @@ struct task_struct { #endif unsigned int policy; +#ifdef CONFIG_PREEMPT_RT_FULL + int migrate_disable; + int migrate_disable_update; +# ifdef CONFIG_SCHED_DEBUG + int migrate_disable_atomic; +# endif +#endif int nr_cpus_allowed; cpumask_t cpus_allowed; @@ -1668,6 +1700,9 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *posix_timer_list; +#endif /* process credentials */ const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */ @@ -1699,10 +1734,15 @@ struct task_struct { /* signal handlers */ struct signal_struct *signal; struct sighand_struct *sighand; + struct sigqueue *sigqueue_cache; sigset_t blocked, real_blocked; sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ struct sigpending pending; +#ifdef CONFIG_PREEMPT_RT_FULL + /* TODO: move me into ->restart_block ? */ + struct siginfo forced_info; +#endif unsigned long sas_ss_sp; size_t sas_ss_size; @@ -1728,11 +1768,14 @@ struct task_struct { raw_spinlock_t pi_lock; struct wake_q_node wake_q; + struct wake_q_node wake_q_sleeper; #ifdef CONFIG_RT_MUTEXES /* PI waiters blocked on a rt_mutex held by this task */ struct rb_root pi_waiters; struct rb_node *pi_waiters_leftmost; + /* Updated under owner's pi_lock and rq lock */ + struct task_struct *pi_top_task; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; #endif @@ -1931,6 +1974,12 @@ struct task_struct { /* bitmask and counter of trace recursion */ unsigned long trace_recursion; #endif /* CONFIG_TRACING */ +#ifdef CONFIG_WAKEUP_LATENCY_HIST + u64 preempt_timestamp_hist; +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + long timer_offset; +#endif +#endif #ifdef CONFIG_KCOV /* Coverage collection mode enabled for this task (0 if disabled). */ enum kcov_mode kcov_mode; @@ -1956,9 +2005,23 @@ struct task_struct { unsigned int sequential_io; unsigned int sequential_io_avg; #endif +#ifdef CONFIG_PREEMPT_RT_BASE + struct rcu_head put_rcu; + int softirq_nestcnt; + unsigned int softirqs_raised; +#endif +#ifdef CONFIG_PREEMPT_RT_FULL +# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 + int kmap_idx; + pte_t kmap_pte[KM_TYPE_NR]; +# endif +#endif #ifdef CONFIG_DEBUG_ATOMIC_SLEEP unsigned long task_state_change; #endif +#ifdef CONFIG_PREEMPT_RT_FULL + int xmit_recursion; +#endif int pagefault_disabled; #ifdef CONFIG_MMU struct task_struct *oom_reaper_list; @@ -1998,14 +2061,6 @@ static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) } #endif -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - -static inline int tsk_nr_cpus_allowed(struct task_struct *p) -{ - return p->nr_cpus_allowed; -} - #define TNF_MIGRATED 0x01 #define TNF_NO_GROUP 0x02 #define TNF_SHARED 0x04 @@ -2225,6 +2280,15 @@ extern struct pid *cad_pid; extern void free_task(struct task_struct *tsk); #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __put_task_struct_cb(struct rcu_head *rhp); + +static inline void put_task_struct(struct task_struct *t) +{ + if (atomic_dec_and_test(&t->usage)) + call_rcu(&t->put_rcu, __put_task_struct_cb); +} +#else extern void __put_task_struct(struct task_struct *t); static inline void put_task_struct(struct task_struct *t) @@ -2232,6 +2296,7 @@ static inline void put_task_struct(struct task_struct *t) if (atomic_dec_and_test(&t->usage)) __put_task_struct(t); } +#endif struct task_struct *task_rcu_dereference(struct task_struct **ptask); struct task_struct *try_get_task_struct(struct task_struct **ptask); @@ -2273,6 +2338,7 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, /* * Per process flags */ +#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ @@ -2441,6 +2507,10 @@ extern void do_set_cpus_allowed(struct task_struct *p, extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); +int migrate_me(void); +void tell_sched_cpu_down_begin(int cpu); +void tell_sched_cpu_down_done(int cpu); + #else static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) @@ -2453,6 +2523,9 @@ static inline int set_cpus_allowed_ptr(struct task_struct *p, return -EINVAL; return 0; } +static inline int migrate_me(void) { return 0; } +static inline void tell_sched_cpu_down_begin(int cpu) { } +static inline void tell_sched_cpu_down_done(int cpu) { } #endif #ifdef CONFIG_NO_HZ_COMMON @@ -2691,6 +2764,7 @@ extern void xtime_update(unsigned long ticks); extern int wake_up_state(struct task_struct *tsk, unsigned int state); extern int wake_up_process(struct task_struct *tsk); +extern int wake_up_lock_sleeper(struct task_struct * tsk); extern void wake_up_new_task(struct task_struct *tsk); #ifdef CONFIG_SMP extern void kick_process(struct task_struct *tsk); @@ -2899,6 +2973,17 @@ static inline void mmdrop(struct mm_struct *mm) __mmdrop(mm); } +#ifdef CONFIG_PREEMPT_RT_BASE +extern void __mmdrop_delayed(struct rcu_head *rhp); +static inline void mmdrop_delayed(struct mm_struct *mm) +{ + if (atomic_dec_and_test(&mm->mm_count)) + call_rcu(&mm->delayed_drop, __mmdrop_delayed); +} +#else +# define mmdrop_delayed(mm) mmdrop(mm) +#endif + static inline void mmdrop_async_fn(struct work_struct *work) { struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); @@ -3291,6 +3376,43 @@ static inline int test_tsk_need_resched(struct task_struct *tsk) return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); } +#ifdef CONFIG_PREEMPT_LAZY +static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) +{ + set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) +{ + clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); +} + +static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) +{ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); +} + +static inline int need_resched_lazy(void) +{ + return test_thread_flag(TIF_NEED_RESCHED_LAZY); +} + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#else +static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } +static inline int need_resched_lazy(void) { return 0; } + +static inline int need_resched_now(void) +{ + return test_thread_flag(TIF_NEED_RESCHED); +} + +#endif + static inline int restart_syscall(void) { set_tsk_thread_flag(current, TIF_SIGPENDING); @@ -3322,6 +3444,51 @@ static inline int signal_pending_state(long state, struct task_struct *p) return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); } +static inline bool __task_is_stopped_or_traced(struct task_struct *task) +{ + if (task->state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) + return true; +#endif + return false; +} + +static inline bool task_is_stopped_or_traced(struct task_struct *task) +{ + bool traced_stopped; + +#ifdef CONFIG_PREEMPT_RT_FULL + unsigned long flags; + + raw_spin_lock_irqsave(&task->pi_lock, flags); + traced_stopped = __task_is_stopped_or_traced(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); +#else + traced_stopped = __task_is_stopped_or_traced(task); +#endif + return traced_stopped; +} + +static inline bool task_is_traced(struct task_struct *task) +{ + bool traced = false; + + if (task->state & __TASK_TRACED) + return true; +#ifdef CONFIG_PREEMPT_RT_FULL + /* in case the task is sleeping on tasklist_lock */ + raw_spin_lock_irq(&task->pi_lock); + if (task->state & __TASK_TRACED) + traced = true; + else if (task->saved_state & __TASK_TRACED) + traced = true; + raw_spin_unlock_irq(&task->pi_lock); +#endif + return traced; +} + /* * cond_resched() and cond_resched_lock(): latency reduction via * explicit rescheduling in places that are safe. The return @@ -3347,12 +3514,16 @@ extern int __cond_resched_lock(spinlock_t *lock); __cond_resched_lock(lock); \ }) +#ifndef CONFIG_PREEMPT_RT_FULL extern int __cond_resched_softirq(void); #define cond_resched_softirq() ({ \ ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ __cond_resched_softirq(); \ }) +#else +# define cond_resched_softirq() cond_resched() +#endif static inline void cond_resched_rcu(void) { @@ -3527,6 +3698,31 @@ static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) #endif /* CONFIG_SMP */ +static inline int __migrate_disabled(struct task_struct *p) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + return p->migrate_disable; +#else + return 0; +#endif +} + +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) +{ + if (__migrate_disabled(p)) + return cpumask_of(task_cpu(p)); + + return &p->cpus_allowed; +} + +static inline int tsk_nr_cpus_allowed(struct task_struct *p) +{ + if (__migrate_disabled(p)) + return 1; + return p->nr_cpus_allowed; +} + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); extern long sched_getaffinity(pid_t pid, struct cpumask *mask); diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a30b172df6e1..db3e91f2bc03 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -16,27 +16,20 @@ static inline int rt_task(struct task_struct *p) } #ifdef CONFIG_RT_MUTEXES -extern int rt_mutex_getprio(struct task_struct *p); -extern void rt_mutex_setprio(struct task_struct *p, int prio); -extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio); -extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); +/* + * Must hold either p->pi_lock or task_rq(p)->lock. + */ +static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p) +{ + return p->pi_top_task; +} +extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task); extern void rt_mutex_adjust_pi(struct task_struct *p); static inline bool tsk_is_pi_blocked(struct task_struct *tsk) { return tsk->pi_blocked_on != NULL; } #else -static inline int rt_mutex_getprio(struct task_struct *p) -{ - return p->normal_prio; -} - -static inline int rt_mutex_get_effective_prio(struct task_struct *task, - int newprio) -{ - return newprio; -} - static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) { return NULL; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index ead97654c4e9..3d7223ffdd3b 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -220,20 +220,30 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) return __read_seqcount_retry(s, start); } - - -static inline void raw_write_seqcount_begin(seqcount_t *s) +static inline void __raw_write_seqcount_begin(seqcount_t *s) { s->sequence++; smp_wmb(); } -static inline void raw_write_seqcount_end(seqcount_t *s) +static inline void raw_write_seqcount_begin(seqcount_t *s) +{ + preempt_disable_rt(); + __raw_write_seqcount_begin(s); +} + +static inline void __raw_write_seqcount_end(seqcount_t *s) { smp_wmb(); s->sequence++; } +static inline void raw_write_seqcount_end(seqcount_t *s) +{ + __raw_write_seqcount_end(s); + preempt_enable_rt(); +} + /** * raw_write_seqcount_barrier - do a seq write barrier * @s: pointer to seqcount_t @@ -428,10 +438,32 @@ typedef struct { /* * Read side functions for starting and finalizing a read side section. */ +#ifndef CONFIG_PREEMPT_RT_FULL static inline unsigned read_seqbegin(const seqlock_t *sl) { return read_seqcount_begin(&sl->seqcount); } +#else +/* + * Starvation safe read side for RT + */ +static inline unsigned read_seqbegin(seqlock_t *sl) +{ + unsigned ret; + +repeat: + ret = ACCESS_ONCE(sl->seqcount.sequence); + if (unlikely(ret & 1)) { + /* + * Take the lock and let the writer proceed (i.e. evtl + * boost it), otherwise we could loop here forever. + */ + spin_unlock_wait(&sl->lock); + goto repeat; + } + return ret; +} +#endif static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) { @@ -446,36 +478,45 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) static inline void write_seqlock(seqlock_t *sl) { spin_lock(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); +} + +static inline int try_write_seqlock(seqlock_t *sl) +{ + if (spin_trylock(&sl->lock)) { + __raw_write_seqcount_begin(&sl->seqcount); + return 1; + } + return 0; } static inline void write_sequnlock(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock(&sl->lock); } static inline void write_seqlock_bh(seqlock_t *sl) { spin_lock_bh(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_bh(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_bh(&sl->lock); } static inline void write_seqlock_irq(seqlock_t *sl) { spin_lock_irq(&sl->lock); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); } static inline void write_sequnlock_irq(seqlock_t *sl) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irq(&sl->lock); } @@ -484,7 +525,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) unsigned long flags; spin_lock_irqsave(&sl->lock, flags); - write_seqcount_begin(&sl->seqcount); + __raw_write_seqcount_begin(&sl->seqcount); return flags; } @@ -494,7 +535,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl) static inline void write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) { - write_seqcount_end(&sl->seqcount); + __raw_write_seqcount_end(&sl->seqcount); spin_unlock_irqrestore(&sl->lock, flags); } diff --git a/include/linux/signal.h b/include/linux/signal.h index b63f63eaa39c..295540fdfc72 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -233,6 +233,7 @@ static inline void init_sigpending(struct sigpending *sig) } extern void flush_sigqueue(struct sigpending *queue); +extern void flush_task_sigqueue(struct task_struct *tsk); /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ static inline int valid_signal(unsigned long sig) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 32810f279f8e..0db6e31161f6 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -284,6 +284,7 @@ struct sk_buff_head { __u32 qlen; spinlock_t lock; + raw_spinlock_t raw_lock; }; struct sk_buff; @@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(struct sk_buff_head *list) __skb_queue_head_init(list); } +static inline void skb_queue_head_init_raw(struct sk_buff_head *list) +{ + raw_spin_lock_init(&list->raw_lock); + __skb_queue_head_init(list); +} + static inline void skb_queue_head_init_class(struct sk_buff_head *list, struct lock_class_key *class) { diff --git a/include/linux/smp.h b/include/linux/smp.h index 8e0cb7a0f836..891c533724f5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus; extern void __init setup_nr_cpu_ids(void); extern void __init smp_init(void); +extern int __boot_cpu_id; + +static inline int get_boot_cpu_id(void) +{ + return __boot_cpu_id; +} + #else /* !SMP */ static inline void smp_send_stop(void) { } @@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); } static inline void smp_init(void) { } #endif +static inline int get_boot_cpu_id(void) +{ + return 0; +} + #endif /* !SMP */ /* @@ -185,6 +197,9 @@ static inline void smp_init(void) { } #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) #define put_cpu() preempt_enable() +#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) +#define put_cpu_light() migrate_enable() + /* * Callback to arch code if there's nosmp or maxcpus=0 on the * boot command line: diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 47dd0cebd204..b241cc044bd3 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h @@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) /* Include rwlock functions */ -#include <linux/rwlock.h> +#ifdef CONFIG_PREEMPT_RT_FULL +# include <linux/rwlock_rt.h> +#else +# include <linux/rwlock.h> +#endif /* * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: @@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock) # include <linux/spinlock_api_up.h> #endif +#ifdef CONFIG_PREEMPT_RT_FULL +# include <linux/spinlock_rt.h> +#else /* PREEMPT_RT_FULL */ + /* * Map the spin_lock functions to the raw variants for PREEMPT_RT=n */ @@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); #define atomic_dec_and_lock(atomic, lock) \ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) +#endif /* !PREEMPT_RT_FULL */ + #endif /* __LINUX_SPINLOCK_H */ diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index 5344268e6e62..043263f30e81 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h @@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) return 0; } -#include <linux/rwlock_api_smp.h> +#ifndef CONFIG_PREEMPT_RT_FULL +# include <linux/rwlock_api_smp.h> +#endif #endif /* __LINUX_SPINLOCK_API_SMP_H */ diff --git a/include/linux/spinlock_rt.h b/include/linux/spinlock_rt.h new file mode 100644 index 000000000000..43ca841b913a --- /dev/null +++ b/include/linux/spinlock_rt.h @@ -0,0 +1,162 @@ +#ifndef __LINUX_SPINLOCK_RT_H +#define __LINUX_SPINLOCK_RT_H + +#ifndef __LINUX_SPINLOCK_H +#error Do not include directly. Use spinlock.h +#endif + +#include <linux/bug.h> + +extern void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); + +#define spin_lock_init(slock) \ +do { \ + static struct lock_class_key __key; \ + \ + rt_mutex_init(&(slock)->lock); \ + __rt_spin_lock_init(slock, #slock, &__key); \ +} while (0) + +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); + +extern void __lockfunc rt_spin_lock(spinlock_t *lock); +extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); +extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); +extern void __lockfunc rt_spin_unlock(spinlock_t *lock); +extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); +extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); +extern int __lockfunc rt_spin_trylock(spinlock_t *lock); +extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); + +/* + * lockdep-less calls, for derived types like rwlock: + * (for trylock they can use rt_mutex_trylock() directly. + */ +extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); +extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); + +#define spin_lock(lock) rt_spin_lock(lock) + +#define spin_lock_bh(lock) \ + do { \ + local_bh_disable(); \ + rt_spin_lock(lock); \ + } while (0) + +#define spin_lock_irq(lock) spin_lock(lock) + +#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) + +#define spin_trylock(lock) \ +({ \ + int __locked; \ + __locked = spin_do_trylock(lock); \ + __locked; \ +}) + +#ifdef CONFIG_LOCKDEP +# define spin_lock_nested(lock, subclass) \ + do { \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) + +#define spin_lock_bh_nested(lock, subclass) \ + do { \ + local_bh_disable(); \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + rt_spin_lock_nested(lock, subclass); \ + } while (0) +#else +# define spin_lock_nested(lock, subclass) spin_lock(lock) +# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) + +# define spin_lock_irqsave_nested(lock, flags, subclass) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) +#endif + +#define spin_lock_irqsave(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = 0; \ + spin_lock(lock); \ + } while (0) + +static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) +{ + unsigned long flags = 0; +#ifdef CONFIG_TRACE_IRQFLAGS + flags = rt_spin_lock_trace_flags(lock); +#else + spin_lock(lock); /* lock_local */ +#endif + return flags; +} + +/* FIXME: we need rt_spin_lock_nest_lock */ +#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) + +#define spin_unlock(lock) rt_spin_unlock(lock) + +#define spin_unlock_bh(lock) \ + do { \ + rt_spin_unlock(lock); \ + local_bh_enable(); \ + } while (0) + +#define spin_unlock_irq(lock) spin_unlock(lock) + +#define spin_unlock_irqrestore(lock, flags) \ + do { \ + typecheck(unsigned long, flags); \ + (void) flags; \ + spin_unlock(lock); \ + } while (0) + +#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) +#define spin_trylock_irq(lock) spin_trylock(lock) + +#define spin_trylock_irqsave(lock, flags) \ + rt_spin_trylock_irqsave(lock, &(flags)) + +#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) + +#ifdef CONFIG_GENERIC_LOCKBREAK +# define spin_is_contended(lock) ((lock)->break_lock) +#else +# define spin_is_contended(lock) (((void)(lock), 0)) +#endif + +static inline int spin_can_lock(spinlock_t *lock) +{ + return !rt_mutex_is_locked(&lock->lock); +} + +static inline int spin_is_locked(spinlock_t *lock) +{ + return rt_mutex_is_locked(&lock->lock); +} + +static inline void assert_spin_locked(spinlock_t *lock) +{ + BUG_ON(!spin_is_locked(lock)); +} + +#define atomic_dec_and_lock(atomic, lock) \ + atomic_dec_and_spin_lock(atomic, lock) + +#endif diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index 73548eb13a5d..10bac715ea96 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h @@ -9,80 +9,15 @@ * Released under the General Public License (GPL). */ -#if defined(CONFIG_SMP) -# include <asm/spinlock_types.h> -#else -# include <linux/spinlock_types_up.h> -#endif - -#include <linux/lockdep.h> - -typedef struct raw_spinlock { - arch_spinlock_t raw_lock; -#ifdef CONFIG_GENERIC_LOCKBREAK - unsigned int break_lock; -#endif -#ifdef CONFIG_DEBUG_SPINLOCK - unsigned int magic, owner_cpu; - void *owner; -#endif -#ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lockdep_map dep_map; -#endif -} raw_spinlock_t; - -#define SPINLOCK_MAGIC 0xdead4ead - -#define SPINLOCK_OWNER_INIT ((void *)-1L) - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -#else -# define SPIN_DEP_MAP_INIT(lockname) -#endif +#include <linux/spinlock_types_raw.h> -#ifdef CONFIG_DEBUG_SPINLOCK -# define SPIN_DEBUG_INIT(lockname) \ - .magic = SPINLOCK_MAGIC, \ - .owner_cpu = -1, \ - .owner = SPINLOCK_OWNER_INIT, +#ifndef CONFIG_PREEMPT_RT_FULL +# include <linux/spinlock_types_nort.h> +# include <linux/rwlock_types.h> #else -# define SPIN_DEBUG_INIT(lockname) +# include <linux/rtmutex.h> +# include <linux/spinlock_types_rt.h> +# include <linux/rwlock_types_rt.h> #endif -#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ - { \ - .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ - SPIN_DEBUG_INIT(lockname) \ - SPIN_DEP_MAP_INIT(lockname) } - -#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ - (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) - -typedef struct spinlock { - union { - struct raw_spinlock rlock; - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) - struct { - u8 __padding[LOCK_PADSIZE]; - struct lockdep_map dep_map; - }; -#endif - }; -} spinlock_t; - -#define __SPIN_LOCK_INITIALIZER(lockname) \ - { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } - -#define __SPIN_LOCK_UNLOCKED(lockname) \ - (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) - -#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) - -#include <linux/rwlock_types.h> - #endif /* __LINUX_SPINLOCK_TYPES_H */ diff --git a/include/linux/spinlock_types_nort.h b/include/linux/spinlock_types_nort.h new file mode 100644 index 000000000000..f1dac1fb1d6a --- /dev/null +++ b/include/linux/spinlock_types_nort.h @@ -0,0 +1,33 @@ +#ifndef __LINUX_SPINLOCK_TYPES_NORT_H +#define __LINUX_SPINLOCK_TYPES_NORT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +/* + * The non RT version maps spinlocks to raw_spinlocks + */ +typedef struct spinlock { + union { + struct raw_spinlock rlock; + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) + struct { + u8 __padding[LOCK_PADSIZE]; + struct lockdep_map dep_map; + }; +#endif + }; +} spinlock_t; + +#define __SPIN_LOCK_INITIALIZER(lockname) \ + { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } + +#define __SPIN_LOCK_UNLOCKED(lockname) \ + (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) + +#endif diff --git a/include/linux/spinlock_types_raw.h b/include/linux/spinlock_types_raw.h new file mode 100644 index 000000000000..edffc4d53fc9 --- /dev/null +++ b/include/linux/spinlock_types_raw.h @@ -0,0 +1,56 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RAW_H +#define __LINUX_SPINLOCK_TYPES_RAW_H + +#if defined(CONFIG_SMP) +# include <asm/spinlock_types.h> +#else +# include <linux/spinlock_types_up.h> +#endif + +#include <linux/lockdep.h> + +typedef struct raw_spinlock { + arch_spinlock_t raw_lock; +#ifdef CONFIG_GENERIC_LOCKBREAK + unsigned int break_lock; +#endif +#ifdef CONFIG_DEBUG_SPINLOCK + unsigned int magic, owner_cpu; + void *owner; +#endif +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} raw_spinlock_t; + +#define SPINLOCK_MAGIC 0xdead4ead + +#define SPINLOCK_OWNER_INIT ((void *)-1L) + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +#else +# define SPIN_DEP_MAP_INIT(lockname) +#endif + +#ifdef CONFIG_DEBUG_SPINLOCK +# define SPIN_DEBUG_INIT(lockname) \ + .magic = SPINLOCK_MAGIC, \ + .owner_cpu = -1, \ + .owner = SPINLOCK_OWNER_INIT, +#else +# define SPIN_DEBUG_INIT(lockname) +#endif + +#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ + { \ + .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ + SPIN_DEBUG_INIT(lockname) \ + SPIN_DEP_MAP_INIT(lockname) } + +#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ + (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) + +#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) + +#endif diff --git a/include/linux/spinlock_types_rt.h b/include/linux/spinlock_types_rt.h new file mode 100644 index 000000000000..3e3d8c5f7a9a --- /dev/null +++ b/include/linux/spinlock_types_rt.h @@ -0,0 +1,48 @@ +#ifndef __LINUX_SPINLOCK_TYPES_RT_H +#define __LINUX_SPINLOCK_TYPES_RT_H + +#ifndef __LINUX_SPINLOCK_TYPES_H +#error "Do not include directly. Include spinlock_types.h instead" +#endif + +#include <linux/cache.h> + +/* + * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: + */ +typedef struct spinlock { + struct rt_mutex lock; + unsigned int break_lock; +#ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lockdep_map dep_map; +#endif +} spinlock_t; + +#ifdef CONFIG_DEBUG_RT_MUTEXES +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + .file = __FILE__, \ + .line = __LINE__ , \ + } +#else +# define __RT_SPIN_INITIALIZER(name) \ + { \ + .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ + .save_state = 1, \ + } +#endif + +/* +.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) +*/ + +#define __SPIN_LOCK_UNLOCKED(name) \ + { .lock = __RT_SPIN_INITIALIZER(name.lock), \ + SPIN_DEP_MAP_INIT(name) } + +#define DEFINE_SPINLOCK(name) \ + spinlock_t name = __SPIN_LOCK_UNLOCKED(name) + +#endif diff --git a/include/linux/srcu.h b/include/linux/srcu.h index dc8eb63c6568..e793d3a257da 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h @@ -84,10 +84,10 @@ int init_srcu_struct(struct srcu_struct *sp); void process_srcu(struct work_struct *work); -#define __SRCU_STRUCT_INIT(name) \ +#define __SRCU_STRUCT_INIT(name, pcpu_name) \ { \ .completed = -300, \ - .per_cpu_ref = &name##_srcu_array, \ + .per_cpu_ref = &pcpu_name, \ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ .running = false, \ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ @@ -119,7 +119,7 @@ void process_srcu(struct work_struct *work); */ #define __DEFINE_SRCU(name, is_static) \ static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ - is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) + is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array) #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d9718378a8be..e81e6dc7dcb1 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -193,6 +193,12 @@ struct platform_freeze_ops { void (*end)(void); }; +#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) +extern bool pm_in_action; +#else +# define pm_in_action false +#endif + #ifdef CONFIG_SUSPEND /** * suspend_set_ops - set platform dependent suspend operations diff --git a/include/linux/swait.h b/include/linux/swait.h index c1f9c62a8a50..83f004a72320 100644 --- a/include/linux/swait.h +++ b/include/linux/swait.h @@ -87,6 +87,7 @@ static inline int swait_active(struct swait_queue_head *q) extern void swake_up(struct swait_queue_head *q); extern void swake_up_all(struct swait_queue_head *q); extern void swake_up_locked(struct swait_queue_head *q); +extern void swake_up_all_locked(struct swait_queue_head *q); extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); diff --git a/include/linux/swap.h b/include/linux/swap.h index 55ff5593c193..52bf5477dc92 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -11,6 +11,7 @@ #include <linux/fs.h> #include <linux/atomic.h> #include <linux/page-flags.h> +#include <linux/locallock.h> #include <asm/page.h> struct notifier_block; @@ -247,7 +248,8 @@ struct swap_info_struct { void *workingset_eviction(struct address_space *mapping, struct page *page); bool workingset_refault(void *shadow); void workingset_activation(struct page *page); -extern struct list_lru workingset_shadow_nodes; +extern struct list_lru __workingset_shadow_nodes; +DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); static inline unsigned int workingset_node_pages(struct radix_tree_node *node) { @@ -292,6 +294,7 @@ extern unsigned long nr_free_pagecache_pages(void); /* linux/mm/swap.c */ +DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); extern void lru_cache_add(struct page *); extern void lru_cache_add_anon(struct page *page); extern void lru_cache_add_file(struct page *page); diff --git a/include/linux/swork.h b/include/linux/swork.h new file mode 100644 index 000000000000..f175fa9a6016 --- /dev/null +++ b/include/linux/swork.h @@ -0,0 +1,24 @@ +#ifndef _LINUX_SWORK_H +#define _LINUX_SWORK_H + +#include <linux/list.h> + +struct swork_event { + struct list_head item; + unsigned long flags; + void (*func)(struct swork_event *); +}; + +static inline void INIT_SWORK(struct swork_event *event, + void (*func)(struct swork_event *)) +{ + event->flags = 0; + event->func = func; +} + +bool swork_queue(struct swork_event *sev); + +int swork_get(void); +void swork_put(void); + +#endif /* _LINUX_SWORK_H */ diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 2873baf5372a..eb1a108f17ca 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag) #define test_thread_flag(flag) \ test_ti_thread_flag(current_thread_info(), flag) -#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#ifdef CONFIG_PREEMPT_LAZY +#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ + test_thread_flag(TIF_NEED_RESCHED_LAZY)) +#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) +#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) + +#else +#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) +#define tif_need_resched_lazy() 0 +#endif #ifndef CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES static inline int arch_within_stack_frames(const void * const stack, diff --git a/include/linux/timer.h b/include/linux/timer.h index 51d601f192d4..83cea629efe1 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -241,7 +241,7 @@ extern void add_timer(struct timer_list *timer); extern int try_to_del_timer_sync(struct timer_list *timer); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) extern int del_timer_sync(struct timer_list *timer); #else # define del_timer_sync(t) del_timer(t) diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index ba57266d9e80..5c36934ec2bc 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -56,6 +56,9 @@ struct trace_entry { unsigned char flags; unsigned char preempt_count; int pid; + unsigned short migrate_disable; + unsigned short padding; + unsigned char preempt_lazy_count; }; #define TRACE_EVENT_TYPE_MAX \ diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index f30c187ed785..83bf0f798426 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -24,6 +24,7 @@ static __always_inline void pagefault_disabled_dec(void) */ static inline void pagefault_disable(void) { + migrate_disable(); pagefault_disabled_inc(); /* * make sure to have issued the store before a pagefault @@ -40,6 +41,7 @@ static inline void pagefault_enable(void) */ barrier(); pagefault_disabled_dec(); + migrate_enable(); } /* diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 4a29c75b146e..0a294e950df8 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -27,6 +27,7 @@ #include <linux/errno.h> #include <linux/rbtree.h> #include <linux/types.h> +#include <linux/wait.h> struct vm_area_struct; struct mm_struct; diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 613771909b6e..e28c5a43229d 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, vm_event_states); */ static inline void __count_vm_event(enum vm_event_item item) { + preempt_disable_rt(); raw_cpu_inc(vm_event_states.event[item]); + preempt_enable_rt(); } static inline void count_vm_event(enum vm_event_item item) @@ -43,7 +45,9 @@ static inline void count_vm_event(enum vm_event_item item) static inline void __count_vm_events(enum vm_event_item item, long delta) { + preempt_disable_rt(); raw_cpu_add(vm_event_states.event[item], delta); + preempt_enable_rt(); } static inline void count_vm_events(enum vm_event_item item, long delta) diff --git a/include/linux/wait.h b/include/linux/wait.h index 2408e8d5c05c..db50d6609195 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -8,6 +8,7 @@ #include <linux/spinlock.h> #include <asm/current.h> #include <uapi/linux/wait.h> +#include <linux/atomic.h> typedef struct __wait_queue wait_queue_t; typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); diff --git a/include/net/dst.h b/include/net/dst.h index ddcff17615da..a1fc787b1a8c 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -452,7 +452,7 @@ static inline void dst_confirm(struct dst_entry *dst) static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, struct sk_buff *skb) { - const struct hh_cache *hh; + struct hh_cache *hh; if (dst->pending_confirm) { unsigned long now = jiffies; diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h index 231e121cc7d9..d125222b979d 100644 --- a/include/net/gen_stats.h +++ b/include/net/gen_stats.h @@ -5,6 +5,7 @@ #include <linux/socket.h> #include <linux/rtnetlink.h> #include <linux/pkt_sched.h> +#include <net/net_seq_lock.h> struct gnet_stats_basic_cpu { struct gnet_stats_basic_packed bstats; @@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type, spinlock_t *lock, struct gnet_dump *d, int padattr); -int gnet_stats_copy_basic(const seqcount_t *running, +int gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); -void __gnet_stats_copy_basic(const seqcount_t *running, +void __gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b); @@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); void gen_kill_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_rate_est64 *rate_est); int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, - seqcount_t *running, struct nlattr *opt); + net_seqlock_t *running, struct nlattr *opt); bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats, const struct gnet_stats_rate_est64 *rate_est); #endif diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 8b683841e574..bf656008f6e7 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h @@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb) } #endif -static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) +static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) { unsigned int seq; int hh_len; @@ -501,7 +501,7 @@ struct neighbour_cb { #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) -static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, +static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, const struct net_device *dev) { unsigned int seq; diff --git a/include/net/net_seq_lock.h b/include/net/net_seq_lock.h new file mode 100644 index 000000000000..a7034298a82a --- /dev/null +++ b/include/net/net_seq_lock.h @@ -0,0 +1,15 @@ +#ifndef __NET_NET_SEQ_LOCK_H__ +#define __NET_NET_SEQ_LOCK_H__ + +#ifdef CONFIG_PREEMPT_RT_BASE +# define net_seqlock_t seqlock_t +# define net_seq_begin(__r) read_seqbegin(__r) +# define net_seq_retry(__r, __s) read_seqretry(__r, __s) + +#else +# define net_seqlock_t seqcount_t +# define net_seq_begin(__r) read_seqcount_begin(__r) +# define net_seq_retry(__r, __s) read_seqcount_retry(__r, __s) +#endif + +#endif diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index 7adf4386ac8f..d3fd5c357268 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -69,6 +69,7 @@ struct netns_ipv4 { int sysctl_icmp_echo_ignore_all; int sysctl_icmp_echo_ignore_broadcasts; + int sysctl_icmp_echo_sysrq; int sysctl_icmp_ignore_bogus_error_responses; int sysctl_icmp_ratelimit; int sysctl_icmp_ratemask; diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index f18fc1a0321f..5d2c9b89c168 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -10,6 +10,7 @@ #include <linux/dynamic_queue_limits.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> +#include <net/net_seq_lock.h> struct Qdisc_ops; struct qdisc_walker; @@ -86,7 +87,7 @@ struct Qdisc { struct sk_buff *gso_skb ____cacheline_aligned_in_smp; struct qdisc_skb_head q; struct gnet_stats_basic_packed bstats; - seqcount_t running; + net_seqlock_t running; struct gnet_stats_queue qstats; unsigned long state; struct Qdisc *next_sched; @@ -98,13 +99,22 @@ struct Qdisc { spinlock_t busylock ____cacheline_aligned_in_smp; }; -static inline bool qdisc_is_running(const struct Qdisc *qdisc) +static inline bool qdisc_is_running(struct Qdisc *qdisc) { +#ifdef CONFIG_PREEMPT_RT_BASE + return spin_is_locked(&qdisc->running.lock) ? true : false; +#else return (raw_read_seqcount(&qdisc->running) & 1) ? true : false; +#endif } static inline bool qdisc_run_begin(struct Qdisc *qdisc) { +#ifdef CONFIG_PREEMPT_RT_BASE + if (try_write_seqlock(&qdisc->running)) + return true; + return false; +#else if (qdisc_is_running(qdisc)) return false; /* Variant of write_seqcount_begin() telling lockdep a trylock @@ -113,11 +123,16 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc) raw_write_seqcount_begin(&qdisc->running); seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_); return true; +#endif } static inline void qdisc_run_end(struct Qdisc *qdisc) { +#ifdef CONFIG_PREEMPT_RT_BASE + write_sequnlock(&qdisc->running); +#else write_seqcount_end(&qdisc->running); +#endif } static inline bool qdisc_may_bulk(const struct Qdisc *qdisc) @@ -308,7 +323,7 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc) return qdisc_lock(root); } -static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) +static inline net_seqlock_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc) { struct Qdisc *root = qdisc_root_sleeping(qdisc); diff --git a/include/trace/events/hist.h b/include/trace/events/hist.h new file mode 100644 index 000000000000..f7710de1b1f3 --- /dev/null +++ b/include/trace/events/hist.h @@ -0,0 +1,73 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM hist + +#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_HIST_H + +#include "latency_hist.h" +#include <linux/tracepoint.h> + +#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) +#define trace_preemptirqsoff_hist(a, b) +#define trace_preemptirqsoff_hist_rcuidle(a, b) +#else +TRACE_EVENT(preemptirqsoff_hist, + + TP_PROTO(int reason, int starthist), + + TP_ARGS(reason, starthist), + + TP_STRUCT__entry( + __field(int, reason) + __field(int, starthist) + ), + + TP_fast_assign( + __entry->reason = reason; + __entry->starthist = starthist; + ), + + TP_printk("reason=%s starthist=%s", getaction(__entry->reason), + __entry->starthist ? "start" : "stop") +); +#endif + +#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST +#define trace_hrtimer_interrupt(a, b, c, d) +#else +TRACE_EVENT(hrtimer_interrupt, + + TP_PROTO(int cpu, long long offset, struct task_struct *curr, + struct task_struct *task), + + TP_ARGS(cpu, offset, curr, task), + + TP_STRUCT__entry( + __field(int, cpu) + __field(long long, offset) + __array(char, ccomm, TASK_COMM_LEN) + __field(int, cprio) + __array(char, tcomm, TASK_COMM_LEN) + __field(int, tprio) + ), + + TP_fast_assign( + __entry->cpu = cpu; + __entry->offset = offset; + memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); + __entry->cprio = curr->prio; + memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>", + task != NULL ? TASK_COMM_LEN : 7); + __entry->tprio = task != NULL ? task->prio : -1; + ), + + TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", + __entry->cpu, __entry->offset, __entry->ccomm, + __entry->cprio, __entry->tcomm, __entry->tprio) +); +#endif + +#endif /* _TRACE_HIST_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/latency_hist.h b/include/trace/events/latency_hist.h new file mode 100644 index 000000000000..d3f2fbd560b1 --- /dev/null +++ b/include/trace/events/latency_hist.h @@ -0,0 +1,29 @@ +#ifndef _LATENCY_HIST_H +#define _LATENCY_HIST_H + +enum hist_action { + IRQS_ON, + PREEMPT_ON, + TRACE_STOP, + IRQS_OFF, + PREEMPT_OFF, + TRACE_START, +}; + +static char *actions[] = { + "IRQS_ON", + "PREEMPT_ON", + "TRACE_STOP", + "IRQS_OFF", + "PREEMPT_OFF", + "TRACE_START", +}; + +static inline char *getaction(int action) +{ + if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) + return actions[action]; + return "unknown"; +} + +#endif /* _LATENCY_HIST_H */ diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9b90c57517a9..516ae88cddf4 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template, TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; - __entry->prio = p->prio; + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ __entry->success = 1; /* rudiment, kill when possible */ __entry->target_cpu = task_cpu(p); ), @@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch, memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN); __entry->next_pid = next->pid; __entry->next_prio = next->prio; + /* XXX SCHED_DEADLINE */ ), TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", @@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task, TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; - __entry->prio = p->prio; + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ __entry->orig_cpu = task_cpu(p); __entry->dest_cpu = dest_cpu; ), @@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_template, TP_fast_assign( memcpy(__entry->comm, p->comm, TASK_COMM_LEN); __entry->pid = p->pid; - __entry->prio = p->prio; + __entry->prio = p->prio; /* XXX SCHED_DEADLINE */ ), TP_printk("comm=%s pid=%d prio=%d", @@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait, TP_fast_assign( memcpy(__entry->comm, current->comm, TASK_COMM_LEN); __entry->pid = pid_nr(pid); - __entry->prio = current->prio; + __entry->prio = current->prio; /* XXX SCHED_DEADLINE */ ), TP_printk("comm=%s pid=%d prio=%d", @@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime, */ TRACE_EVENT(sched_pi_setprio, - TP_PROTO(struct task_struct *tsk, int newprio), + TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task), - TP_ARGS(tsk, newprio), + TP_ARGS(tsk, pi_task), TP_STRUCT__entry( __array( char, comm, TASK_COMM_LEN ) @@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio, memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); __entry->pid = tsk->pid; __entry->oldprio = tsk->prio; - __entry->newprio = newprio; + __entry->newprio = pi_task ? pi_task->prio : tsk->prio; + /* XXX SCHED_DEADLINE bits missing */ ), TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", diff --git a/init/Kconfig b/init/Kconfig index 34407f15e6d3..2ce33a32e65d 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -506,7 +506,7 @@ config TINY_RCU config RCU_EXPERT bool "Make expert-level adjustments to RCU configuration" - default n + default y if PREEMPT_RT_FULL help This option needs to be enabled if you wish to make expert-level adjustments to RCU configuration. By default, @@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ_COMMON && SMP && RCU_EXPERT + depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL default n help This option permits CPUs to enter dynticks-idle state even if @@ -650,7 +650,7 @@ config TREE_RCU_TRACE config RCU_BOOST bool "Enable RCU priority boosting" depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT - default n + default y if PREEMPT_RT_FULL help This option boosts the priority of preempted RCU readers that block the current preemptible RCU grace period for too long. @@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL endchoice -config RCU_EXPEDITE_BOOT - bool - default n - help - This option enables expedited grace periods at boot time, - as if rcu_expedite_gp() had been invoked early in boot. - The corresponding rcu_unexpedite_gp() is invoked from - rcu_end_inkernel_boot(), which is intended to be invoked - at the end of the kernel-only boot sequence, just before - init is exec'ed. - - Accept the default if unsure. - endmenu # "RCU Subsystem" config BUILD_BIN2C @@ -1064,6 +1051,7 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on CGROUP_SCHED + depends on !PREEMPT_RT_FULL default n help This feature lets you explicitly allocate real CPU bandwidth @@ -1772,6 +1760,7 @@ choice config SLAB bool "SLAB" + depends on !PREEMPT_RT_FULL select HAVE_HARDENED_USERCOPY_ALLOCATOR help The regular slab allocator that is established and known to work @@ -1792,6 +1781,7 @@ config SLUB config SLOB depends on EXPERT bool "SLOB (Simple Allocator)" + depends on !PREEMPT_RT_FULL help SLOB replaces the stock allocator with a drastically simpler allocator. SLOB is generally more space efficient but @@ -1810,7 +1800,7 @@ config SLAB_FREELIST_RANDOM config SLUB_CPU_PARTIAL default y - depends on SLUB && SMP + depends on SLUB && SMP && !PREEMPT_RT_FULL bool "SLUB per cpu partial cache" help Per cpu partial caches accellerate objects allocation and freeing diff --git a/init/Makefile b/init/Makefile index c4fb45525d08..821190dfaa75 100644 --- a/init/Makefile +++ b/init/Makefile @@ -35,4 +35,4 @@ silent_chk_compile.h = : include/generated/compile.h: FORCE @$($(quiet)chk_compile.h) $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ - "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" + "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" diff --git a/init/main.c b/init/main.c index ae3996ae9bac..6470deef01c9 100644 --- a/init/main.c +++ b/init/main.c @@ -507,6 +507,7 @@ asmlinkage __visible void __init start_kernel(void) setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); + softirq_early_init(); boot_cpu_state_init(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ diff --git a/ipc/sem.c b/ipc/sem.c index 10b94bc59d4a..b8360eaacc7a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -712,6 +712,13 @@ undo: static void wake_up_sem_queue_prepare(struct list_head *pt, struct sem_queue *q, int error) { +#ifdef CONFIG_PREEMPT_RT_BASE + struct task_struct *p = q->sleeper; + get_task_struct(p); + q->status = error; + wake_up_process(p); + put_task_struct(p); +#else if (list_empty(pt)) { /* * Hold preempt off so that we don't get preempted and have the @@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, q->pid = error; list_add_tail(&q->list, pt); +#endif } /** @@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(struct list_head *pt, */ static void wake_up_sem_queue_do(struct list_head *pt) { +#ifndef CONFIG_PREEMPT_RT_BASE struct sem_queue *q, *t; int did_something; @@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct list_head *pt) } if (did_something) preempt_enable(); +#endif } static void unlink_queue(struct sem_array *sma, struct sem_queue *q) diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks index ebdb0043203a..b9e6aa7e5aa6 100644 --- a/kernel/Kconfig.locks +++ b/kernel/Kconfig.locks @@ -225,11 +225,11 @@ config ARCH_SUPPORTS_ATOMIC_RMW config MUTEX_SPIN_ON_OWNER def_bool y - depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW + depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL config RWSEM_SPIN_ON_OWNER def_bool y - depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW + depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL config LOCK_SPIN_ON_OWNER def_bool y diff --git a/kernel/Kconfig.preempt b/kernel/Kconfig.preempt index 3f9c97419f02..11dbe26a8279 100644 --- a/kernel/Kconfig.preempt +++ b/kernel/Kconfig.preempt @@ -1,3 +1,16 @@ +config PREEMPT + bool + select PREEMPT_COUNT + +config PREEMPT_RT_BASE + bool + select PREEMPT + +config HAVE_PREEMPT_LAZY + bool + +config PREEMPT_LAZY + def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL choice prompt "Preemption Model" @@ -33,9 +46,9 @@ config PREEMPT_VOLUNTARY Select this if you are building a kernel for a desktop system. -config PREEMPT +config PREEMPT__LL bool "Preemptible Kernel (Low-Latency Desktop)" - select PREEMPT_COUNT + select PREEMPT select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK help This option reduces the latency of the kernel by making @@ -52,6 +65,22 @@ config PREEMPT embedded system with latency requirements in the milliseconds range. +config PREEMPT_RTB + bool "Preemptible Kernel (Basic RT)" + select PREEMPT_RT_BASE + help + This option is basically the same as (Low-Latency Desktop) but + enables changes which are preliminary for the full preemptible + RT kernel. + +config PREEMPT_RT_FULL + bool "Fully Preemptible Kernel (RT)" + depends on IRQ_FORCED_THREADING + select PREEMPT_RT_BASE + select PREEMPT_RCU + help + All and everything + endchoice config PREEMPT_COUNT diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 4c233437ee1a..6c3c9f298f22 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_head *rcu_head) queue_work(cgroup_destroy_wq, &css->destroy_work); } -static void css_release_work_fn(struct work_struct *work) +static void css_release_work_fn(struct swork_event *sev) { struct cgroup_subsys_state *css = - container_of(work, struct cgroup_subsys_state, destroy_work); + container_of(sev, struct cgroup_subsys_state, destroy_swork); struct cgroup_subsys *ss = css->ss; struct cgroup *cgrp = css->cgroup; @@ -5087,8 +5087,8 @@ static void css_release(struct percpu_ref *ref) struct cgroup_subsys_state *css = container_of(ref, struct cgroup_subsys_state, refcnt); - INIT_WORK(&css->destroy_work, css_release_work_fn); - queue_work(cgroup_destroy_wq, &css->destroy_work); + INIT_SWORK(&css->destroy_swork, css_release_work_fn); + swork_queue(&css->destroy_swork); } static void init_and_link_css(struct cgroup_subsys_state *css, @@ -5749,6 +5749,7 @@ static int __init cgroup_wq_init(void) */ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); BUG_ON(!cgroup_destroy_wq); + BUG_ON(swork_get()); /* * Used to destroy pidlists and separate to serve as flush domain. diff --git a/kernel/cpu.c b/kernel/cpu.c index 26a4f74bff83..010db3c943cd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -239,6 +239,289 @@ static struct { #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) +/** + * hotplug_pcp - per cpu hotplug descriptor + * @unplug: set when pin_current_cpu() needs to sync tasks + * @sync_tsk: the task that waits for tasks to finish pinned sections + * @refcount: counter of tasks in pinned sections + * @grab_lock: set when the tasks entering pinned sections should wait + * @synced: notifier for @sync_tsk to tell cpu_down it's finished + * @mutex: the mutex to make tasks wait (used when @grab_lock is true) + * @mutex_init: zero if the mutex hasn't been initialized yet. + * + * Although @unplug and @sync_tsk may point to the same task, the @unplug + * is used as a flag and still exists after @sync_tsk has exited and + * @sync_tsk set to NULL. + */ +struct hotplug_pcp { + struct task_struct *unplug; + struct task_struct *sync_tsk; + int refcount; + int grab_lock; + struct completion synced; + struct completion unplug_wait; +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * Note, on PREEMPT_RT, the hotplug lock must save the state of + * the task, otherwise the mutex will cause the task to fail + * to sleep when required. (Because it's called from migrate_disable()) + * + * The spinlock_t on PREEMPT_RT is a mutex that saves the task's + * state. + */ + spinlock_t lock; +#else + struct mutex mutex; +#endif + int mutex_init; +}; + +#ifdef CONFIG_PREEMPT_RT_FULL +# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock) +# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock) +#else +# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) +# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) +#endif + +static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); + +/** + * pin_current_cpu - Prevent the current cpu from being unplugged + * + * Lightweight version of get_online_cpus() to prevent cpu from being + * unplugged when code runs in a migration disabled region. + * + * Must be called with preemption disabled (preempt_count = 1)! + */ +void pin_current_cpu(void) +{ + struct hotplug_pcp *hp; + int force = 0; + +retry: + hp = this_cpu_ptr(&hotplug_pcp); + + if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || + hp->unplug == current) { + hp->refcount++; + return; + } + if (hp->grab_lock) { + preempt_enable(); + hotplug_lock(hp); + hotplug_unlock(hp); + } else { + preempt_enable(); + /* + * Try to push this task off of this CPU. + */ + if (!migrate_me()) { + preempt_disable(); + hp = this_cpu_ptr(&hotplug_pcp); + if (!hp->grab_lock) { + /* + * Just let it continue it's already pinned + * or about to sleep. + */ + force = 1; + goto retry; + } + preempt_enable(); + } + } + preempt_disable(); + goto retry; +} + +/** + * unpin_current_cpu - Allow unplug of current cpu + * + * Must be called with preemption or interrupts disabled! + */ +void unpin_current_cpu(void) +{ + struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); + + WARN_ON(hp->refcount <= 0); + + /* This is safe. sync_unplug_thread is pinned to this cpu */ + if (!--hp->refcount && hp->unplug && hp->unplug != current) + wake_up_process(hp->unplug); +} + +static void wait_for_pinned_cpus(struct hotplug_pcp *hp) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + while (hp->refcount) { + schedule_preempt_disabled(); + set_current_state(TASK_UNINTERRUPTIBLE); + } +} + +static int sync_unplug_thread(void *data) +{ + struct hotplug_pcp *hp = data; + + wait_for_completion(&hp->unplug_wait); + preempt_disable(); + hp->unplug = current; + wait_for_pinned_cpus(hp); + + /* + * This thread will synchronize the cpu_down() with threads + * that have pinned the CPU. When the pinned CPU count reaches + * zero, we inform the cpu_down code to continue to the next step. + */ + set_current_state(TASK_UNINTERRUPTIBLE); + preempt_enable(); + complete(&hp->synced); + + /* + * If all succeeds, the next step will need tasks to wait till + * the CPU is offline before continuing. To do this, the grab_lock + * is set and tasks going into pin_current_cpu() will block on the + * mutex. But we still need to wait for those that are already in + * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() + * will kick this thread out. + */ + while (!hp->grab_lock && !kthread_should_stop()) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + + /* Make sure grab_lock is seen before we see a stale completion */ + smp_mb(); + + /* + * Now just before cpu_down() enters stop machine, we need to make + * sure all tasks that are in pinned CPU sections are out, and new + * tasks will now grab the lock, keeping them from entering pinned + * CPU sections. + */ + if (!kthread_should_stop()) { + preempt_disable(); + wait_for_pinned_cpus(hp); + preempt_enable(); + complete(&hp->synced); + } + + set_current_state(TASK_UNINTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_UNINTERRUPTIBLE); + } + set_current_state(TASK_RUNNING); + + /* + * Force this thread off this CPU as it's going down and + * we don't want any more work on this CPU. + */ + current->flags &= ~PF_NO_SETAFFINITY; + set_cpus_allowed_ptr(current, cpu_present_mask); + migrate_me(); + return 0; +} + +static void __cpu_unplug_sync(struct hotplug_pcp *hp) +{ + wake_up_process(hp->sync_tsk); + wait_for_completion(&hp->synced); +} + +static void __cpu_unplug_wait(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + complete(&hp->unplug_wait); + wait_for_completion(&hp->synced); +} + +/* + * Start the sync_unplug_thread on the target cpu and wait for it to + * complete. + */ +static int cpu_unplug_begin(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + int err; + + /* Protected by cpu_hotplug.lock */ + if (!hp->mutex_init) { +#ifdef CONFIG_PREEMPT_RT_FULL + spin_lock_init(&hp->lock); +#else + mutex_init(&hp->mutex); +#endif + hp->mutex_init = 1; + } + + /* Inform the scheduler to migrate tasks off this CPU */ + tell_sched_cpu_down_begin(cpu); + + init_completion(&hp->synced); + init_completion(&hp->unplug_wait); + + hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); + if (IS_ERR(hp->sync_tsk)) { + err = PTR_ERR(hp->sync_tsk); + hp->sync_tsk = NULL; + return err; + } + kthread_bind(hp->sync_tsk, cpu); + + /* + * Wait for tasks to get out of the pinned sections, + * it's still OK if new tasks enter. Some CPU notifiers will + * wait for tasks that are going to enter these sections and + * we must not have them block. + */ + wake_up_process(hp->sync_tsk); + return 0; +} + +static void cpu_unplug_sync(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + init_completion(&hp->synced); + /* The completion needs to be initialzied before setting grab_lock */ + smp_wmb(); + + /* Grab the mutex before setting grab_lock */ + hotplug_lock(hp); + hp->grab_lock = 1; + + /* + * The CPU notifiers have been completed. + * Wait for tasks to get out of pinned CPU sections and have new + * tasks block until the CPU is completely down. + */ + __cpu_unplug_sync(hp); + + /* All done with the sync thread */ + kthread_stop(hp->sync_tsk); + hp->sync_tsk = NULL; +} + +static void cpu_unplug_done(unsigned int cpu) +{ + struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); + + hp->unplug = NULL; + /* Let all tasks know cpu unplug is finished before cleaning up */ + smp_wmb(); + + if (hp->sync_tsk) + kthread_stop(hp->sync_tsk); + + if (hp->grab_lock) { + hotplug_unlock(hp); + /* protected by cpu_hotplug.lock */ + hp->grab_lock = 0; + } + tell_sched_cpu_down_done(cpu); +} void get_online_cpus(void) { @@ -802,10 +1085,14 @@ static int takedown_cpu(unsigned int cpu) struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int err; + __cpu_unplug_wait(cpu); /* Park the smpboot threads */ kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread); smpboot_park_threads(cpu); + /* Notifiers are done. Don't let any more tasks pin this CPU. */ + cpu_unplug_sync(cpu); + /* * Prevent irq alloc/free while the dying cpu reorganizes the * interrupt affinities. @@ -890,6 +1177,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu); int prev_state, ret = 0; bool hasdied = false; + int mycpu; + cpumask_var_t cpumask; + cpumask_var_t cpumask_org; if (num_online_cpus() == 1) return -EBUSY; @@ -897,7 +1187,34 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, if (!cpu_present(cpu)) return -EINVAL; + /* Move the downtaker off the unplug cpu */ + if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) + return -ENOMEM; + if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { + free_cpumask_var(cpumask); + return -ENOMEM; + } + + cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); + cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); + set_cpus_allowed_ptr(current, cpumask); + free_cpumask_var(cpumask); + migrate_disable(); + mycpu = smp_processor_id(); + if (mycpu == cpu) { + printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); + migrate_enable(); + ret = -EBUSY; + goto restore_cpus; + } + + migrate_enable(); cpu_hotplug_begin(); + ret = cpu_unplug_begin(cpu); + if (ret) { + printk("cpu_unplug_begin(%d) failed\n", cpu); + goto out_cancel; + } cpuhp_tasks_frozen = tasks_frozen; @@ -936,10 +1253,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen, hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE; out: + cpu_unplug_done(cpu); +out_cancel: cpu_hotplug_done(); /* This post dead nonsense must die */ if (!ret && hasdied) cpu_notify_nofail(CPU_POST_DEAD, cpu); +restore_cpus: + set_cpus_allowed_ptr(current, cpumask_org); + free_cpumask_var(cpumask_org); return ret; } @@ -1242,6 +1564,8 @@ core_initcall(cpu_hotplug_pm_sync_init); #endif /* CONFIG_PM_SLEEP_SMP */ +int __boot_cpu_id; + #endif /* CONFIG_SMP */ /* Boot processor state steps */ @@ -1926,6 +2250,10 @@ void __init boot_cpu_init(void) set_cpu_active(cpu, true); set_cpu_present(cpu, true); set_cpu_possible(cpu, true); + +#ifdef CONFIG_SMP + __boot_cpu_id = cpu; +#endif } /* diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 511b1dd8ff09..1dd63833ecdc 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -285,7 +285,7 @@ static struct cpuset top_cpuset = { */ static DEFINE_MUTEX(cpuset_mutex); -static DEFINE_SPINLOCK(callback_lock); +static DEFINE_RAW_SPINLOCK(callback_lock); static struct workqueue_struct *cpuset_migrate_mm_wq; @@ -908,9 +908,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cp->effective_cpus, new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); @@ -975,9 +975,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) return retval; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->cpus_allowed as a temp variable */ update_cpumasks_hier(cs, trialcs->cpus_allowed); @@ -1177,9 +1177,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems) continue; rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cp->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) && !nodes_equal(cp->mems_allowed, cp->effective_mems)); @@ -1247,9 +1247,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs, if (retval < 0) goto done; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = trialcs->mems_allowed; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* use trialcs->mems_allowed as a temp variable */ update_nodemasks_hier(cs, &trialcs->mems_allowed); @@ -1340,9 +1340,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) || (is_spread_page(cs) != is_spread_page(trialcs))); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->flags = trialcs->flags; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) rebuild_sched_domains_locked(); @@ -1757,7 +1757,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) cpuset_filetype_t type = seq_cft(sf)->private; int ret = 0; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); switch (type) { case FILE_CPULIST: @@ -1776,7 +1776,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v) ret = -EINVAL; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); return ret; } @@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) cpuset_inc(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { cpumask_copy(cs->effective_cpus, parent->effective_cpus); cs->effective_mems = parent->effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) goto out_unlock; @@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css) } rcu_read_unlock(); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cs->mems_allowed = parent->mems_allowed; cs->effective_mems = parent->mems_allowed; cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); cpumask_copy(cs->effective_cpus, parent->cpus_allowed); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); out_unlock: mutex_unlock(&cpuset_mutex); return 0; @@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css) static void cpuset_bind(struct cgroup_subsys_state *root_css) { mutex_lock(&cpuset_mutex); - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) { cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); @@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css) top_cpuset.mems_allowed = top_cpuset.effective_mems; } - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); mutex_unlock(&cpuset_mutex); } @@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs, { bool is_empty; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->cpus_allowed, new_cpus); cpumask_copy(cs->effective_cpus, new_cpus); cs->mems_allowed = *new_mems; cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* * Don't call update_tasks_cpumask() if the cpuset becomes empty, @@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs, if (nodes_empty(*new_mems)) *new_mems = parent_cs(cs)->effective_mems; - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); cpumask_copy(cs->effective_cpus, new_cpus); cs->effective_mems = *new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); if (cpus_updated) update_tasks_cpumask(cs); @@ -2317,21 +2317,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work) /* synchronize cpus_allowed to cpu_active_mask */ if (cpus_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); cpumask_copy(top_cpuset.effective_cpus, &new_cpus); - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); /* we don't mess with cpumasks of tasks in top_cpuset */ } /* synchronize mems_allowed to N_MEMORY */ if (mems_updated) { - spin_lock_irq(&callback_lock); + raw_spin_lock_irq(&callback_lock); if (!on_dfl) top_cpuset.mems_allowed = new_mems; top_cpuset.effective_mems = new_mems; - spin_unlock_irq(&callback_lock); + raw_spin_unlock_irq(&callback_lock); update_tasks_nodemask(&top_cpuset); } @@ -2436,11 +2436,11 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) { unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_cpus(task_cs(tsk), pmask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); } void cpuset_cpus_allowed_fallback(struct task_struct *tsk) @@ -2488,11 +2488,11 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) nodemask_t mask; unsigned long flags; - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return mask; } @@ -2584,14 +2584,14 @@ bool __cpuset_node_allowed(int node, gfp_t gfp_mask) return true; /* Not hardwall and node outside mems_allowed: scan up cpusets */ - spin_lock_irqsave(&callback_lock, flags); + raw_spin_lock_irqsave(&callback_lock, flags); rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); rcu_read_unlock(); - spin_unlock_irqrestore(&callback_lock, flags); + raw_spin_unlock_irqrestore(&callback_lock, flags); return allowed; } diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index fc1ef736253c..83c666537a7a 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int linecount; int colcount; int logging, saved_loglevel = 0; - int saved_trap_printk; int got_printf_lock = 0; int retlen = 0; int fnd, len; @@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) unsigned long uninitialized_var(flags); preempt_disable(); - saved_trap_printk = kdb_trap_printk; - kdb_trap_printk = 0; /* Serialize kdb_printf if multiple cpus try to write at once. * But if any cpu goes recursive in kdb, just print the output, @@ -855,7 +852,6 @@ kdb_print_out: } else { __release(kdb_printf_lock); } - kdb_trap_printk = saved_trap_printk; preempt_enable(); return retlen; } @@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...) va_list ap; int r; + kdb_trap_printk++; va_start(ap, fmt); r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); va_end(ap); + kdb_trap_printk--; return r; } diff --git a/kernel/events/core.c b/kernel/events/core.c index 36ff2d93f222..554aebf7e88b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu) raw_spin_lock_init(&cpuctx->hrtimer_lock); hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); timer->function = perf_mux_hrtimer_handler; + timer->irqsafe = 1; } static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) @@ -8390,6 +8391,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hwc->hrtimer.function = perf_swevent_hrtimer; + hwc->hrtimer.irqsafe = 1; /* * Since hrtimers have a fixed rate, we can do a static freq->period diff --git a/kernel/exit.c b/kernel/exit.c index 3076f3089919..fb2ebcf3ca7c 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -143,7 +143,7 @@ static void __exit_signal(struct task_struct *tsk) * Do this under ->siglock, we can race with another thread * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. */ - flush_sigqueue(&tsk->pending); + flush_task_sigqueue(tsk); tsk->sighand = NULL; spin_unlock(&sighand->siglock); diff --git a/kernel/fork.c b/kernel/fork.c index 9321b1ad3335..276acd8acf0a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -76,6 +76,7 @@ #include <linux/compiler.h> #include <linux/sysctl.h> #include <linux/kcov.h> +#include <linux/kprobes.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> @@ -376,13 +377,24 @@ static inline void put_signal_struct(struct signal_struct *sig) if (atomic_dec_and_test(&sig->sigcnt)) free_signal_struct(sig); } - +#ifdef CONFIG_PREEMPT_RT_BASE +static +#endif void __put_task_struct(struct task_struct *tsk) { WARN_ON(!tsk->exit_state); WARN_ON(atomic_read(&tsk->usage)); WARN_ON(tsk == current); + /* + * Remove function-return probe instances associated with this + * task and put them back on the free list. + */ + kprobe_flush_task(tsk); + + /* Task is done with its stack. */ + put_task_stack(tsk); + cgroup_free(tsk); task_numa_free(tsk); security_task_free(tsk); @@ -393,7 +405,18 @@ void __put_task_struct(struct task_struct *tsk) if (!profile_handoff_task(tsk)) free_task(tsk); } +#ifndef CONFIG_PREEMPT_RT_BASE EXPORT_SYMBOL_GPL(__put_task_struct); +#else +void __put_task_struct_cb(struct rcu_head *rhp) +{ + struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); + + __put_task_struct(tsk); + +} +EXPORT_SYMBOL_GPL(__put_task_struct_cb); +#endif void __init __weak arch_task_cache_init(void) { } @@ -535,6 +558,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node) tsk->splice_pipe = NULL; tsk->task_frag.page = NULL; tsk->wake_q.next = NULL; + tsk->wake_q_sleeper.next = NULL; account_kernel_stack(tsk, 1); @@ -861,6 +885,19 @@ void __mmdrop(struct mm_struct *mm) } EXPORT_SYMBOL_GPL(__mmdrop); +#ifdef CONFIG_PREEMPT_RT_BASE +/* + * RCU callback for delayed mm drop. Not strictly rcu, but we don't + * want another facility to make this work. + */ +void __mmdrop_delayed(struct rcu_head *rhp) +{ + struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); + + __mmdrop(mm); +} +#endif + static inline void __mmput(struct mm_struct *mm) { VM_BUG_ON(atomic_read(&mm->mm_users)); @@ -1426,6 +1463,7 @@ static void rt_mutex_init_task(struct task_struct *p) #ifdef CONFIG_RT_MUTEXES p->pi_waiters = RB_ROOT; p->pi_waiters_leftmost = NULL; + p->pi_top_task = NULL; p->pi_blocked_on = NULL; #endif } @@ -1435,6 +1473,9 @@ static void rt_mutex_init_task(struct task_struct *p) */ static void posix_cpu_timers_init(struct task_struct *tsk) { +#ifdef CONFIG_PREEMPT_RT_BASE + tsk->posix_timer_list = NULL; +#endif tsk->cputime_expires.prof_exp = 0; tsk->cputime_expires.virt_exp = 0; tsk->cputime_expires.sched_exp = 0; @@ -1561,6 +1602,7 @@ static __latent_entropy struct task_struct *copy_process( spin_lock_init(&p->alloc_lock); init_sigpending(&p->pending); + p->sigqueue_cache = NULL; p->utime = p->stime = p->gtime = 0; p->utimescaled = p->stimescaled = 0; diff --git a/kernel/futex.c b/kernel/futex.c index 88bad86180ac..2e074d63e8fa 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -801,7 +801,7 @@ static int refill_pi_state_cache(void) return 0; } -static struct futex_pi_state * alloc_pi_state(void) +static struct futex_pi_state *alloc_pi_state(void) { struct futex_pi_state *pi_state = current->pi_state_cache; @@ -811,6 +811,11 @@ static struct futex_pi_state * alloc_pi_state(void) return pi_state; } +static void get_pi_state(struct futex_pi_state *pi_state) +{ + WARN_ON_ONCE(!atomic_inc_not_zero(&pi_state->refcount)); +} + /* * Drops a reference to the pi_state object and frees or caches it * when the last reference is gone. @@ -855,7 +860,7 @@ static void put_pi_state(struct futex_pi_state *pi_state) * Look up the task based on what TID userspace gave us. * We dont trust it. */ -static struct task_struct * futex_find_get_task(pid_t pid) +static struct task_struct *futex_find_get_task(pid_t pid) { struct task_struct *p; @@ -905,7 +910,9 @@ void exit_pi_state_list(struct task_struct *curr) * task still owns the PI-state: */ if (head->next != next) { + raw_spin_unlock_irq(&curr->pi_lock); spin_unlock(&hb->lock); + raw_spin_lock_irq(&curr->pi_lock); continue; } @@ -915,10 +922,12 @@ void exit_pi_state_list(struct task_struct *curr) pi_state->owner = NULL; raw_spin_unlock_irq(&curr->pi_lock); - rt_mutex_unlock(&pi_state->pi_mutex); - + get_pi_state(pi_state); spin_unlock(&hb->lock); + rt_mutex_futex_unlock(&pi_state->pi_mutex); + put_pi_state(pi_state); + raw_spin_lock_irq(&curr->pi_lock); } raw_spin_unlock_irq(&curr->pi_lock); @@ -972,6 +981,39 @@ void exit_pi_state_list(struct task_struct *curr) * * [10] There is no transient state which leaves owner and user space * TID out of sync. + * + * + * Serialization and lifetime rules: + * + * hb->lock: + * + * hb -> futex_q, relation + * futex_q -> pi_state, relation + * + * (cannot be raw because hb can contain arbitrary amount + * of futex_q's) + * + * pi_mutex->wait_lock: + * + * {uval, pi_state} + * + * (and pi_mutex 'obviously') + * + * p->pi_lock: + * + * p->pi_state_list -> pi_state->list, relation + * + * pi_state->refcount: + * + * pi_state lifetime + * + * + * Lock order: + * + * hb->lock + * pi_mutex->wait_lock + * p->pi_lock + * */ /* @@ -979,10 +1021,13 @@ void exit_pi_state_list(struct task_struct *curr) * the pi_state against the user space value. If correct, attach to * it. */ -static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, +static int attach_to_pi_state(u32 __user *uaddr, u32 uval, + struct futex_pi_state *pi_state, struct futex_pi_state **ps) { pid_t pid = uval & FUTEX_TID_MASK; + u32 uval2; + int ret; /* * Userspace might have messed up non-PI and PI futexes [3] @@ -990,9 +1035,39 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, if (unlikely(!pi_state)) return -EINVAL; + /* + * We get here with hb->lock held, and having found a + * futex_top_waiter(). This means that futex_lock_pi() of said futex_q + * has dropped the hb->lock in between queue_me() and unqueue_me_pi(), + * which in turn means that futex_lock_pi() still has a reference on + * our pi_state. + * + * The waiter holding a reference on @pi_state also protects against + * the unlocked put_pi_state() in futex_unlock_pi(), futex_lock_pi() + * and futex_wait_requeue_pi() as it cannot go to 0 and consequently + * free pi_state before we can take a reference ourselves. + */ WARN_ON(!atomic_read(&pi_state->refcount)); /* + * Now that we have a pi_state, we can acquire wait_lock + * and do the state validation. + */ + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + /* + * Since {uval, pi_state} is serialized by wait_lock, and our current + * uval was read without holding it, it can have changed. Verify it + * still is what we expect it to be, otherwise retry the entire + * operation. + */ + if (get_futex_value_locked(&uval2, uaddr)) + goto out_efault; + + if (uval != uval2) + goto out_eagain; + + /* * Handle the owner died case: */ if (uval & FUTEX_OWNER_DIED) { @@ -1007,11 +1082,11 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, * is not 0. Inconsistent state. [5] */ if (pid) - return -EINVAL; + goto out_einval; /* * Take a ref on the state and return success. [4] */ - goto out_state; + goto out_attach; } /* @@ -1023,14 +1098,14 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, * Take a ref on the state and return success. [6] */ if (!pid) - goto out_state; + goto out_attach; } else { /* * If the owner died bit is not set, then the pi_state * must have an owner. [7] */ if (!pi_state->owner) - return -EINVAL; + goto out_einval; } /* @@ -1039,11 +1114,29 @@ static int attach_to_pi_state(u32 uval, struct futex_pi_state *pi_state, * user space TID. [9/10] */ if (pid != task_pid_vnr(pi_state->owner)) - return -EINVAL; -out_state: - atomic_inc(&pi_state->refcount); + goto out_einval; + +out_attach: + get_pi_state(pi_state); + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); *ps = pi_state; return 0; + +out_einval: + ret = -EINVAL; + goto out_error; + +out_eagain: + ret = -EAGAIN; + goto out_error; + +out_efault: + ret = -EFAULT; + goto out_error; + +out_error: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + return ret; } /* @@ -1094,6 +1187,9 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, /* * No existing pi state. First waiter. [2] + * + * This creates pi_state, we have hb->lock held, this means nothing can + * observe this state, wait_lock is irrelevant. */ pi_state = alloc_pi_state(); @@ -1118,17 +1214,18 @@ static int attach_to_pi_owner(u32 uval, union futex_key *key, return 0; } -static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, +static int lookup_pi_state(u32 __user *uaddr, u32 uval, + struct futex_hash_bucket *hb, union futex_key *key, struct futex_pi_state **ps) { - struct futex_q *match = futex_top_waiter(hb, key); + struct futex_q *top_waiter = futex_top_waiter(hb, key); /* * If there is a waiter on that futex, validate it and * attach to the pi_state when the validation succeeds. */ - if (match) - return attach_to_pi_state(uval, match->pi_state, ps); + if (top_waiter) + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); /* * We are the first waiter - try to look up the owner based on @@ -1147,7 +1244,7 @@ static int lock_pi_update_atomic(u32 __user *uaddr, u32 uval, u32 newval) if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) return -EFAULT; - /*If user space value changed, let the caller retry */ + /* If user space value changed, let the caller retry */ return curval != uval ? -EAGAIN : 0; } @@ -1175,7 +1272,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct task_struct *task, int set_waiters) { u32 uval, newval, vpid = task_pid_vnr(task); - struct futex_q *match; + struct futex_q *top_waiter; int ret; /* @@ -1201,9 +1298,9 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * Lookup existing state first. If it exists, try to attach to * its pi_state. */ - match = futex_top_waiter(hb, key); - if (match) - return attach_to_pi_state(uval, match->pi_state, ps); + top_waiter = futex_top_waiter(hb, key); + if (top_waiter) + return attach_to_pi_state(uaddr, uval, top_waiter->pi_state, ps); /* * No waiter and user TID is 0. We are here because the @@ -1284,50 +1381,45 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) wake_q_add(wake_q, p); __unqueue_futex(q); /* - * The waiting task can free the futex_q as soon as - * q->lock_ptr = NULL is written, without taking any locks. A - * memory barrier is required here to prevent the following - * store to lock_ptr from getting ahead of the plist_del. + * The waiting task can free the futex_q as soon as q->lock_ptr = NULL + * is written, without taking any locks. This is possible in the event + * of a spurious wakeup, for example. A memory barrier is required here + * to prevent the following store to lock_ptr from getting ahead of the + * plist_del in __unqueue_futex(). */ - smp_wmb(); - q->lock_ptr = NULL; + smp_store_release(&q->lock_ptr, NULL); } -static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, - struct futex_hash_bucket *hb) +/* + * Caller must hold a reference on @pi_state. + */ +static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) { - struct task_struct *new_owner; - struct futex_pi_state *pi_state = this->pi_state; u32 uninitialized_var(curval), newval; + struct task_struct *new_owner; + bool postunlock = false; WAKE_Q(wake_q); - bool deboost; + WAKE_Q(wake_sleeper_q); int ret = 0; - if (!pi_state) - return -EINVAL; - - /* - * If current does not own the pi_state then the futex is - * inconsistent and user space fiddled with the futex value. - */ - if (pi_state->owner != current) - return -EINVAL; - - raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); + if (WARN_ON_ONCE(!new_owner)) { + /* + * As per the comment in futex_unlock_pi() this should not happen. + * + * When this happens, give up our locks and try again, giving + * the futex_lock_pi() instance time to complete, either by + * waiting on the rtmutex or removing itself from the futex + * queue. + */ + ret = -EAGAIN; + goto out_unlock; + } /* - * It is possible that the next waiter (the one that brought - * this owner to the kernel) timed out and is no longer - * waiting on the lock. - */ - if (!new_owner) - new_owner = this->task; - - /* - * We pass it to the next owner. The WAITERS bit is always - * kept enabled while there is PI state around. We cleanup the - * owner died bit, because we are the owner. + * We pass it to the next owner. The WAITERS bit is always kept + * enabled while there is PI state around. We cleanup the owner + * died bit, because we are the owner. */ newval = FUTEX_WAITERS | task_pid_vnr(new_owner); @@ -1336,6 +1428,7 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) { ret = -EFAULT; + } else if (curval != uval) { /* * If a unconditional UNLOCK_PI operation (user space did not @@ -1348,10 +1441,14 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, else ret = -EINVAL; } - if (ret) { - raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - return ret; - } + + if (ret) + goto out_unlock; + + /* + * This is a point of no return; once we modify the uval there is no + * going back and subsequent operations must not fail. + */ raw_spin_lock(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); @@ -1364,22 +1461,15 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this, pi_state->owner = new_owner; raw_spin_unlock(&new_owner->pi_lock); + postunlock = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, + &wake_sleeper_q); +out_unlock: raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); + if (postunlock) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); - /* - * First unlock HB so the waiter does not spin on it once he got woken - * up. Second wake up the waiter before the priority is adjusted. If we - * deboost first (and lose our higher priority), then the task might get - * scheduled away before the wake up can take place. - */ - spin_unlock(&hb->lock); - wake_up_q(&wake_q); - if (deboost) - rt_mutex_adjust_prio(current); - - return 0; + return ret; } /* @@ -1825,7 +1915,7 @@ retry_private: * If that call succeeds then we have pi_state and an * initial refcount on it. */ - ret = lookup_pi_state(ret, hb2, &key2, &pi_state); + ret = lookup_pi_state(uaddr2, ret, hb2, &key2, &pi_state); } switch (ret) { @@ -1908,7 +1998,7 @@ retry_private: * refcount on the pi_state and store the pointer in * the futex_q object of the waiter. */ - atomic_inc(&pi_state->refcount); + get_pi_state(pi_state); this->pi_state = pi_state; ret = rt_mutex_start_proxy_lock(&pi_state->pi_mutex, this->rt_waiter, @@ -1925,6 +2015,16 @@ retry_private: requeue_pi_wake_futex(this, &key2, hb2); drop_count++; continue; + } else if (ret == -EAGAIN) { + /* + * Waiter was woken by timeout or + * signal and has set pi_blocked_on to + * PI_WAKEUP_INPROGRESS before we + * tried to enqueue it on the rtmutex. + */ + this->pi_state = NULL; + put_pi_state(pi_state); + continue; } else if (ret) { /* * rt_mutex_start_proxy_lock() detected a @@ -2008,20 +2108,7 @@ queue_unlock(struct futex_hash_bucket *hb) hb_waiters_dec(hb); } -/** - * queue_me() - Enqueue the futex_q on the futex_hash_bucket - * @q: The futex_q to enqueue - * @hb: The destination hash bucket - * - * The hb->lock must be held by the caller, and is released here. A call to - * queue_me() is typically paired with exactly one call to unqueue_me(). The - * exceptions involve the PI related operations, which may use unqueue_me_pi() - * or nothing if the unqueue is done as part of the wake process and the unqueue - * state is implicit in the state of woken task (see futex_wait_requeue_pi() for - * an example). - */ -static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) - __releases(&hb->lock) +static inline void __queue_me(struct futex_q *q, struct futex_hash_bucket *hb) { int prio; @@ -2038,6 +2125,24 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) plist_node_init(&q->list, prio); plist_add(&q->list, &hb->chain); q->task = current; +} + +/** + * queue_me() - Enqueue the futex_q on the futex_hash_bucket + * @q: The futex_q to enqueue + * @hb: The destination hash bucket + * + * The hb->lock must be held by the caller, and is released here. A call to + * queue_me() is typically paired with exactly one call to unqueue_me(). The + * exceptions involve the PI related operations, which may use unqueue_me_pi() + * or nothing if the unqueue is done as part of the wake process and the unqueue + * state is implicit in the state of woken task (see futex_wait_requeue_pi() for + * an example). + */ +static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) + __releases(&hb->lock) +{ + __queue_me(q, hb); spin_unlock(&hb->lock); } @@ -2124,10 +2229,13 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, { u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS; struct futex_pi_state *pi_state = q->pi_state; - struct task_struct *oldowner = pi_state->owner; u32 uval, uninitialized_var(curval), newval; + struct task_struct *oldowner; int ret; + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + + oldowner = pi_state->owner; /* Owner died? */ if (!pi_state->owner) newtid |= FUTEX_OWNER_DIED; @@ -2135,7 +2243,8 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, /* * We are here either because we stole the rtmutex from the * previous highest priority waiter or we are the highest priority - * waiter but failed to get the rtmutex the first time. + * waiter but have failed to get the rtmutex the first time. + * * We have to replace the newowner TID in the user space variable. * This must be atomic as we have to preserve the owner died bit here. * @@ -2143,17 +2252,16 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, * because we can fault here. Imagine swapped out pages or a fork * that marked all the anonymous memory readonly for cow. * - * Modifying pi_state _before_ the user space value would - * leave the pi_state in an inconsistent state when we fault - * here, because we need to drop the hash bucket lock to - * handle the fault. This might be observed in the PID check - * in lookup_pi_state. + * Modifying pi_state _before_ the user space value would leave the + * pi_state in an inconsistent state when we fault here, because we + * need to drop the locks to handle the fault. This might be observed + * in the PID check in lookup_pi_state. */ retry: if (get_futex_value_locked(&uval, uaddr)) goto handle_fault; - while (1) { + for (;;) { newval = (uval & FUTEX_OWNER_DIED) | newtid; if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) @@ -2168,47 +2276,60 @@ retry: * itself. */ if (pi_state->owner != NULL) { - raw_spin_lock_irq(&pi_state->owner->pi_lock); + raw_spin_lock(&pi_state->owner->pi_lock); WARN_ON(list_empty(&pi_state->list)); list_del_init(&pi_state->list); - raw_spin_unlock_irq(&pi_state->owner->pi_lock); + raw_spin_unlock(&pi_state->owner->pi_lock); } pi_state->owner = newowner; - raw_spin_lock_irq(&newowner->pi_lock); + raw_spin_lock(&newowner->pi_lock); WARN_ON(!list_empty(&pi_state->list)); list_add(&pi_state->list, &newowner->pi_state_list); - raw_spin_unlock_irq(&newowner->pi_lock); + raw_spin_unlock(&newowner->pi_lock); + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + return 0; /* - * To handle the page fault we need to drop the hash bucket - * lock here. That gives the other task (either the highest priority - * waiter itself or the task which stole the rtmutex) the - * chance to try the fixup of the pi_state. So once we are - * back from handling the fault we need to check the pi_state - * after reacquiring the hash bucket lock and before trying to - * do another fixup. When the fixup has been done already we - * simply return. + * To handle the page fault we need to drop the locks here. That gives + * the other task (either the highest priority waiter itself or the + * task which stole the rtmutex) the chance to try the fixup of the + * pi_state. So once we are back from handling the fault we need to + * check the pi_state after reacquiring the locks and before trying to + * do another fixup. When the fixup has been done already we simply + * return. + * + * Note: we hold both hb->lock and pi_mutex->wait_lock. We can safely + * drop hb->lock since the caller owns the hb -> futex_q relation. + * Dropping the pi_mutex->wait_lock requires the state revalidate. */ handle_fault: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); spin_unlock(q->lock_ptr); ret = fault_in_user_writeable(uaddr); spin_lock(q->lock_ptr); + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); /* * Check if someone else fixed it for us: */ - if (pi_state->owner != oldowner) - return 0; + if (pi_state->owner != oldowner) { + ret = 0; + goto out_unlock; + } if (ret) - return ret; + goto out_unlock; goto retry; + +out_unlock: + raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + return ret; } static long futex_wait_restart(struct restart_block *restart); @@ -2230,13 +2351,16 @@ static long futex_wait_restart(struct restart_block *restart); */ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) { - struct task_struct *owner; int ret = 0; if (locked) { /* * Got the lock. We might not be the anticipated owner if we * did a lock-steal - fix up the PI-state in that case: + * + * We can safely read pi_state->owner without holding wait_lock + * because we now own the rt_mutex, only the owner will attempt + * to change it. */ if (q->pi_state->owner != current) ret = fixup_pi_state_owner(uaddr, q, current); @@ -2244,43 +2368,15 @@ static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) } /* - * Catch the rare case, where the lock was released when we were on the - * way back before we locked the hash bucket. - */ - if (q->pi_state->owner == current) { - /* - * Try to get the rt_mutex now. This might fail as some other - * task acquired the rt_mutex after we removed ourself from the - * rt_mutex waiters list. - */ - if (rt_mutex_trylock(&q->pi_state->pi_mutex)) { - locked = 1; - goto out; - } - - /* - * pi_state is incorrect, some other task did a lock steal and - * we returned due to timeout or signal without taking the - * rt_mutex. Too late. - */ - raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); - owner = rt_mutex_owner(&q->pi_state->pi_mutex); - if (!owner) - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); - raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); - ret = fixup_pi_state_owner(uaddr, q, owner); - goto out; - } - - /* * Paranoia check. If we did not take the lock, then we should not be * the owner of the rt_mutex. */ - if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) + if (rt_mutex_owner(&q->pi_state->pi_mutex) == current) { printk(KERN_ERR "fixup_owner: ret = %d pi-mutex: %p " "pi-state %p\n", ret, q->pi_state->pi_mutex.owner, q->pi_state->owner); + } out: return ret ? ret : locked; @@ -2504,6 +2600,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ktime_t *time, int trylock) { struct hrtimer_sleeper timeout, *to = NULL; + struct futex_pi_state *pi_state = NULL; + struct rt_mutex_waiter rt_waiter; struct futex_hash_bucket *hb; struct futex_q q = futex_q_init; int res, ret; @@ -2556,25 +2654,77 @@ retry_private: } } + WARN_ON(!q.pi_state); + /* * Only actually queue now that the atomic ops are done: */ - queue_me(&q, hb); + __queue_me(&q, hb); - WARN_ON(!q.pi_state); - /* - * Block on the PI mutex: - */ - if (!trylock) { - ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to); - } else { - ret = rt_mutex_trylock(&q.pi_state->pi_mutex); + if (trylock) { + ret = rt_mutex_futex_trylock(&q.pi_state->pi_mutex); /* Fixup the trylock return value: */ ret = ret ? 0 : -EWOULDBLOCK; + goto no_block; + } + + rt_mutex_init_waiter(&rt_waiter, false); + + /* + * On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not + * hold it while doing rt_mutex_start_proxy(), because then it will + * include hb->lock in the blocking chain, even through we'll not in + * fact hold it while blocking. This will lead it to report -EDEADLK + * and BUG when futex_unlock_pi() interleaves with this. + * + * Therefore acquire wait_lock while holding hb->lock, but drop the + * latter before calling rt_mutex_start_proxy_lock(). This still fully + * serializes against futex_unlock_pi() as that does the exact same + * lock handoff sequence. + */ + raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock); + /* + * the migrate_disable() here disables migration in the in_atomic() fast + * path which is enabled again in the following spin_unlock(). We have + * one migrate_disable() pending in the slow-path which is reversed + * after the raw_spin_unlock_irq() where we leave the atomic context. + */ + migrate_disable(); + + spin_unlock(q.lock_ptr); + ret = __rt_mutex_start_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter, current); + raw_spin_unlock_irq(&q.pi_state->pi_mutex.wait_lock); + migrate_enable(); + + if (ret) { + if (ret == 1) + ret = 0; + + spin_lock(q.lock_ptr); + goto no_block; } + + if (unlikely(to)) + hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS); + + ret = rt_mutex_wait_proxy_lock(&q.pi_state->pi_mutex, to, &rt_waiter); + spin_lock(q.lock_ptr); /* + * If we failed to acquire the lock (signal/timeout), we must + * first acquire the hb->lock before removing the lock from the + * rt_mutex waitqueue, such that we can keep the hb and rt_mutex + * wait lists consistent. + * + * In particular; it is important that futex_unlock_pi() can not + * observe this inconsistency. + */ + if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter)) + ret = 0; + +no_block: + /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. */ @@ -2590,12 +2740,19 @@ retry_private: * If fixup_owner() faulted and was unable to handle the fault, unlock * it and return the fault to userspace. */ - if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) - rt_mutex_unlock(&q.pi_state->pi_mutex); + if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current)) { + pi_state = q.pi_state; + get_pi_state(pi_state); + } /* Unqueue and drop the lock */ unqueue_me_pi(&q); + if (pi_state) { + rt_mutex_futex_unlock(&pi_state->pi_mutex); + put_pi_state(pi_state); + } + goto out_put_key; out_unlock_put_key: @@ -2604,8 +2761,10 @@ out_unlock_put_key: out_put_key: put_futex_key(&q.key); out: - if (to) + if (to) { + hrtimer_cancel(&to->timer); destroy_hrtimer_on_stack(&to->timer); + } return ret != -EINTR ? ret : -ERESTARTNOINTR; uaddr_faulted: @@ -2632,7 +2791,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current); union futex_key key = FUTEX_KEY_INIT; struct futex_hash_bucket *hb; - struct futex_q *match; + struct futex_q *top_waiter; int ret; retry: @@ -2656,12 +2815,48 @@ retry: * all and we at least want to know if user space fiddled * with the futex value instead of blindly unlocking. */ - match = futex_top_waiter(hb, &key); - if (match) { - ret = wake_futex_pi(uaddr, uval, match, hb); + top_waiter = futex_top_waiter(hb, &key); + if (top_waiter) { + struct futex_pi_state *pi_state = top_waiter->pi_state; + + ret = -EINVAL; + if (!pi_state) + goto out_unlock; + + /* + * If current does not own the pi_state then the futex is + * inconsistent and user space fiddled with the futex value. + */ + if (pi_state->owner != current) + goto out_unlock; + + get_pi_state(pi_state); + /* + * By taking wait_lock while still holding hb->lock, we ensure + * there is no point where we hold neither; and therefore + * wake_futex_pi() must observe a state consistent with what we + * observed. + */ + raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + /* + * Magic trickery for now to make the RT migrate disable + * logic happy. The following spin_unlock() happens with + * interrupts disabled so the internal migrate_enable() + * won't undo the migrate_disable() which was issued when + * locking hb->lock. + */ + migrate_disable(); + spin_unlock(&hb->lock); + + /* Drops pi_state->pi_mutex.wait_lock */ + ret = wake_futex_pi(uaddr, uval, pi_state); + + migrate_enable(); + + put_pi_state(pi_state); + /* - * In case of success wake_futex_pi dropped the hash - * bucket lock. + * Success, we're done! No tricky corner cases. */ if (!ret) goto out_putkey; @@ -2676,7 +2871,6 @@ retry: * setting the FUTEX_WAITERS bit. Try again. */ if (ret == -EAGAIN) { - spin_unlock(&hb->lock); put_futex_key(&key); goto retry; } @@ -2684,7 +2878,7 @@ retry: * wake_futex_pi has detected invalid state. Tell user * space. */ - goto out_unlock; + goto out_putkey; } /* @@ -2694,8 +2888,10 @@ retry: * preserve the WAITERS bit not the OWNER_DIED one. We are the * owner. */ - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) + if (cmpxchg_futex_value_locked(&curval, uaddr, uval, 0)) { + spin_unlock(&hb->lock); goto pi_faulted; + } /* * If uval has changed, let user space handle it. @@ -2709,7 +2905,6 @@ out_putkey: return ret; pi_faulted: - spin_unlock(&hb->lock); put_futex_key(&key); ret = fault_in_user_writeable(uaddr); @@ -2813,8 +3008,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, u32 __user *uaddr2) { struct hrtimer_sleeper timeout, *to = NULL; + struct futex_pi_state *pi_state = NULL; struct rt_mutex_waiter rt_waiter; - struct futex_hash_bucket *hb; + struct futex_hash_bucket *hb, *hb2; union futex_key key2 = FUTEX_KEY_INIT; struct futex_q q = futex_q_init; int res, ret; @@ -2839,10 +3035,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * The waiter is allocated on our stack, manipulated by the requeue * code while we sleep on uaddr. */ - debug_rt_mutex_init_waiter(&rt_waiter); - RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); - RB_CLEAR_NODE(&rt_waiter.tree_entry); - rt_waiter.task = NULL; + rt_mutex_init_waiter(&rt_waiter, false); ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); if (unlikely(ret != 0)) @@ -2873,20 +3066,55 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, /* Queue the futex_q, drop the hb lock, wait for wakeup. */ futex_wait_queue_me(hb, &q, to); - spin_lock(&hb->lock); - ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); - spin_unlock(&hb->lock); - if (ret) - goto out_put_keys; + /* + * On RT we must avoid races with requeue and trying to block + * on two mutexes (hb->lock and uaddr2's rtmutex) by + * serializing access to pi_blocked_on with pi_lock. + */ + raw_spin_lock_irq(¤t->pi_lock); + if (current->pi_blocked_on) { + /* + * We have been requeued or are in the process of + * being requeued. + */ + raw_spin_unlock_irq(¤t->pi_lock); + } else { + /* + * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS + * prevents a concurrent requeue from moving us to the + * uaddr2 rtmutex. After that we can safely acquire + * (and possibly block on) hb->lock. + */ + current->pi_blocked_on = PI_WAKEUP_INPROGRESS; + raw_spin_unlock_irq(¤t->pi_lock); + + spin_lock(&hb->lock); + + /* + * Clean up pi_blocked_on. We might leak it otherwise + * when we succeeded with the hb->lock in the fast + * path. + */ + raw_spin_lock_irq(¤t->pi_lock); + current->pi_blocked_on = NULL; + raw_spin_unlock_irq(¤t->pi_lock); + + ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); + spin_unlock(&hb->lock); + if (ret) + goto out_put_keys; + } /* - * In order for us to be here, we know our q.key == key2, and since - * we took the hb->lock above, we also know that futex_requeue() has - * completed and we no longer have to concern ourselves with a wakeup - * race with the atomic proxy lock acquisition by the requeue code. The - * futex_requeue dropped our key1 reference and incremented our key2 - * reference count. + * In order to be here, we have either been requeued, are in + * the process of being requeued, or requeue successfully + * acquired uaddr2 on our behalf. If pi_blocked_on was + * non-null above, we may be racing with a requeue. Do not + * rely on q->lock_ptr to be hb2->lock until after blocking on + * hb->lock or hb2->lock. The futex_requeue dropped our key1 + * reference and incremented our key2 reference count. */ + hb2 = hash_futex(&key2); /* Check if the requeue code acquired the second futex for us. */ if (!q.rt_waiter) { @@ -2895,16 +3123,19 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * did a lock-steal - fix up the PI-state in that case. */ if (q.pi_state && (q.pi_state->owner != current)) { - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); ret = fixup_pi_state_owner(uaddr2, &q, current); - if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) - rt_mutex_unlock(&q.pi_state->pi_mutex); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { + pi_state = q.pi_state; + get_pi_state(pi_state); + } /* * Drop the reference to the pi state which * the requeue_pi() code acquired for us. */ put_pi_state(q.pi_state); - spin_unlock(q.lock_ptr); + spin_unlock(&hb2->lock); } } else { struct rt_mutex *pi_mutex; @@ -2916,10 +3147,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, */ WARN_ON(!q.pi_state); pi_mutex = &q.pi_state->pi_mutex; - ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); - debug_rt_mutex_free_waiter(&rt_waiter); + ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter); - spin_lock(q.lock_ptr); + spin_lock(&hb2->lock); + BUG_ON(&hb2->lock != q.lock_ptr); + if (ret && !rt_mutex_cleanup_proxy_lock(pi_mutex, &rt_waiter)) + ret = 0; + + debug_rt_mutex_free_waiter(&rt_waiter); /* * Fixup the pi_state owner and possibly acquire the lock if we * haven't already. @@ -2937,13 +3172,20 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, * the fault, unlock the rt_mutex and return the fault to * userspace. */ - if (ret && rt_mutex_owner(pi_mutex) == current) - rt_mutex_unlock(pi_mutex); + if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) { + pi_state = q.pi_state; + get_pi_state(pi_state); + } /* Unqueue and drop the lock. */ unqueue_me_pi(&q); } + if (pi_state) { + rt_mutex_futex_unlock(&pi_state->pi_mutex); + put_pi_state(pi_state); + } + if (ret == -EINTR) { /* * We've already been requeued, but cannot restart by calling diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index d3f24905852c..f87aa8fdcc51 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -181,10 +181,16 @@ irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) { irqreturn_t retval; unsigned int flags = 0; + struct pt_regs *regs = get_irq_regs(); + u64 ip = regs ? instruction_pointer(regs) : 0; retval = __handle_irq_event_percpu(desc, &flags); - add_interrupt_randomness(desc->irq_data.irq, flags); +#ifdef CONFIG_PREEMPT_RT_FULL + desc->random_ip = ip; +#else + add_interrupt_randomness(desc->irq_data.irq, flags, ip); +#endif if (!noirqdebug) note_interrupt(desc, retval); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index ea41820ab12e..5994867526f3 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -22,6 +22,7 @@ #include "internals.h" #ifdef CONFIG_IRQ_FORCED_THREADING +# ifndef CONFIG_PREEMPT_RT_BASE __read_mostly bool force_irqthreads; static int __init setup_forced_irqthreads(char *arg) @@ -30,6 +31,7 @@ static int __init setup_forced_irqthreads(char *arg) return 0; } early_param("threadirqs", setup_forced_irqthreads); +# endif #endif static void __synchronize_hardirq(struct irq_desc *desc) @@ -233,7 +235,12 @@ int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask, if (desc->affinity_notify) { kref_get(&desc->affinity_notify->kref); + +#ifdef CONFIG_PREEMPT_RT_BASE + swork_queue(&desc->affinity_notify->swork); +#else schedule_work(&desc->affinity_notify->work); +#endif } irqd_set(data, IRQD_AFFINITY_SET); @@ -271,10 +278,8 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) } EXPORT_SYMBOL_GPL(irq_set_affinity_hint); -static void irq_affinity_notify(struct work_struct *work) +static void _irq_affinity_notify(struct irq_affinity_notify *notify) { - struct irq_affinity_notify *notify = - container_of(work, struct irq_affinity_notify, work); struct irq_desc *desc = irq_to_desc(notify->irq); cpumask_var_t cpumask; unsigned long flags; @@ -296,6 +301,35 @@ out: kref_put(¬ify->kref, notify->release); } +#ifdef CONFIG_PREEMPT_RT_BASE +static void init_helper_thread(void) +{ + static int init_sworker_once; + + if (init_sworker_once) + return; + if (WARN_ON(swork_get())) + return; + init_sworker_once = 1; +} + +static void irq_affinity_notify(struct swork_event *swork) +{ + struct irq_affinity_notify *notify = + container_of(swork, struct irq_affinity_notify, swork); + _irq_affinity_notify(notify); +} + +#else + +static void irq_affinity_notify(struct work_struct *work) +{ + struct irq_affinity_notify *notify = + container_of(work, struct irq_affinity_notify, work); + _irq_affinity_notify(notify); +} +#endif + /** * irq_set_affinity_notifier - control notification of IRQ affinity changes * @irq: Interrupt for which to enable/disable notification @@ -324,7 +358,12 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) if (notify) { notify->irq = irq; kref_init(¬ify->kref); +#ifdef CONFIG_PREEMPT_RT_BASE + INIT_SWORK(¬ify->swork, irq_affinity_notify); + init_helper_thread(); +#else INIT_WORK(¬ify->work, irq_affinity_notify); +#endif } raw_spin_lock_irqsave(&desc->lock, flags); @@ -879,7 +918,15 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) local_bh_disable(); ret = action->thread_fn(action->irq, action->dev_id); irq_finalize_oneshot(desc, action); - local_bh_enable(); + /* + * Interrupts which have real time requirements can be set up + * to avoid softirq processing in the thread handler. This is + * safe as these interrupts do not raise soft interrupts. + */ + if (irq_settings_no_softirq_call(desc)) + _local_bh_enable(); + else + local_bh_enable(); return ret; } @@ -976,6 +1023,12 @@ static int irq_thread(void *data) if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); +#ifdef CONFIG_PREEMPT_RT_FULL + migrate_disable(); + add_interrupt_randomness(action->irq, 0, + desc->random_ip ^ (unsigned long) action); + migrate_enable(); +#endif wake_threads_waitq(desc); } @@ -1338,6 +1391,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) irqd_set(&desc->irq_data, IRQD_NO_BALANCING); } + if (new->flags & IRQF_NO_SOFTIRQ_CALL) + irq_settings_set_no_softirq_call(desc); + /* Set default affinity mask once everything is setup */ setup_affinity(desc, mask); @@ -2063,7 +2119,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state); * This call sets the internal irqchip state of an interrupt, * depending on the value of @which. * - * This function should be called with preemption disabled if the + * This function should be called with migration disabled if the * interrupt controller has per-cpu registers. */ int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 320579d89091..2df2d4445b1e 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -16,6 +16,7 @@ enum { _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, _IRQ_IS_POLLED = IRQ_IS_POLLED, _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, + _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; @@ -30,6 +31,7 @@ enum { #define IRQ_PER_CPU_DEVID GOT_YOU_MORON #define IRQ_IS_POLLED GOT_YOU_MORON #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON +#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON #undef IRQF_MODIFY_MASK #define IRQF_MODIFY_MASK GOT_YOU_MORON @@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } +static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) +{ + return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; +} + +static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) +{ + desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; +} + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { return desc->status_use_accessors & _IRQ_PER_CPU; diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 5707f97a3e6a..73f38dc7a7fb 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); static int __init irqfixup_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 1; printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); printk(KERN_WARNING "This may impact system performance.\n"); @@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644); static int __init irqpoll_setup(char *str) { +#ifdef CONFIG_PREEMPT_RT_BASE + pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); + return 1; +#endif irqfixup = 2; printk(KERN_WARNING "Misrouted IRQ fixup and polling support " "enabled\n"); diff --git a/kernel/irq_work.c b/kernel/irq_work.c index bcf107ce0854..2899ba0d23d1 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -17,6 +17,7 @@ #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/smp.h> +#include <linux/interrupt.h> #include <asm/processor.h> @@ -65,6 +66,8 @@ void __weak arch_irq_work_raise(void) */ bool irq_work_queue_on(struct irq_work *work, int cpu) { + struct llist_head *list; + /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(cpu)); @@ -75,7 +78,12 @@ bool irq_work_queue_on(struct irq_work *work, int cpu) if (!irq_work_claim(work)) return false; - if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) + list = &per_cpu(lazy_list, cpu); + else + list = &per_cpu(raised_list, cpu); + + if (llist_add(&work->llnode, list)) arch_send_call_function_single_ipi(cpu); return true; @@ -86,6 +94,9 @@ EXPORT_SYMBOL_GPL(irq_work_queue_on); /* Enqueue the irq work @work on the current CPU */ bool irq_work_queue(struct irq_work *work) { + struct llist_head *list; + bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); + /* Only queue if not already pending */ if (!irq_work_claim(work)) return false; @@ -93,13 +104,15 @@ bool irq_work_queue(struct irq_work *work) /* Queue the entry and raise the IPI if needed. */ preempt_disable(); - /* If the work is "lazy", handle it from next tick if any */ - if (work->flags & IRQ_WORK_LAZY) { - if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && - tick_nohz_tick_stopped()) - arch_irq_work_raise(); - } else { - if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) + lazy_work = work->flags & IRQ_WORK_LAZY; + + if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) + list = this_cpu_ptr(&lazy_list); + else + list = this_cpu_ptr(&raised_list); + + if (llist_add(&work->llnode, list)) { + if (!lazy_work || tick_nohz_tick_stopped()) arch_irq_work_raise(); } @@ -116,9 +129,8 @@ bool irq_work_needs_cpu(void) raised = this_cpu_ptr(&raised_list); lazy = this_cpu_ptr(&lazy_list); - if (llist_empty(raised) || arch_irq_work_has_interrupt()) - if (llist_empty(lazy)) - return false; + if (llist_empty(raised) && llist_empty(lazy)) + return false; /* All work should have been flushed before going offline */ WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); @@ -132,7 +144,7 @@ static void irq_work_run_list(struct llist_head *list) struct irq_work *work; struct llist_node *llnode; - BUG_ON(!irqs_disabled()); + BUG_ON_NONRT(!irqs_disabled()); if (llist_empty(list)) return; @@ -169,7 +181,16 @@ static void irq_work_run_list(struct llist_head *list) void irq_work_run(void) { irq_work_run_list(this_cpu_ptr(&raised_list)); - irq_work_run_list(this_cpu_ptr(&lazy_list)); + if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { + /* + * NOTE: we raise softirq via IPI for safety, + * and execute in irq_work_tick() to move the + * overhead from hard to soft irq context. + */ + if (!llist_empty(this_cpu_ptr(&lazy_list))) + raise_softirq(TIMER_SOFTIRQ); + } else + irq_work_run_list(this_cpu_ptr(&lazy_list)); } EXPORT_SYMBOL_GPL(irq_work_run); @@ -179,8 +200,17 @@ void irq_work_tick(void) if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) irq_work_run_list(raised); + + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) + irq_work_run_list(this_cpu_ptr(&lazy_list)); +} + +#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) +void irq_work_tick_soft(void) +{ irq_work_run_list(this_cpu_ptr(&lazy_list)); } +#endif /* * Synchronize against the irq_work @entry, ensures the entry is not diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index ee1bc1bb8feb..ddef07958840 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c @@ -136,6 +136,15 @@ KERNEL_ATTR_RO(vmcoreinfo); #endif /* CONFIG_KEXEC_CORE */ +#if defined(CONFIG_PREEMPT_RT_FULL) +static ssize_t realtime_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + return sprintf(buf, "%d\n", 1); +} +KERNEL_ATTR_RO(realtime); +#endif + /* whether file capabilities are enabled */ static ssize_t fscaps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -225,6 +234,9 @@ static struct attribute * kernel_attrs[] = { &rcu_expedited_attr.attr, &rcu_normal_attr.attr, #endif +#ifdef CONFIG_PREEMPT_RT_FULL + &realtime_attr.attr, +#endif NULL }; diff --git a/kernel/locking/Makefile b/kernel/locking/Makefile index 6f88e352cd4f..6ff9e8011dd0 100644 --- a/kernel/locking/Makefile +++ b/kernel/locking/Makefile @@ -2,7 +2,7 @@ # and is generally not a function of system call inputs. KCOV_INSTRUMENT := n -obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o +obj-y += semaphore.o percpu-rwsem.o ifdef CONFIG_FUNCTION_TRACER CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) @@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) endif +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) +obj-y += mutex.o obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o +endif +obj-y += rwsem.o obj-$(CONFIG_LOCKDEP) += lockdep.o ifeq ($(CONFIG_PROC_FS),y) obj-$(CONFIG_LOCKDEP) += lockdep_proc.o @@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o +ifneq ($(CONFIG_PREEMPT_RT_FULL),y) obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o +endif +obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 4d7ffc0a0d00..3d157b3128eb 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c @@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) struct lockdep_subclass_key *key; struct hlist_head *hash_head; struct lock_class *class; + bool is_static = false; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); @@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) /* * Static locks do not have their class-keys yet - for them the key - * is the lock object itself: + * is the lock object itself. If the lock is in the per cpu area, + * the canonical address of the lock (per cpu offset removed) is + * used. */ - if (unlikely(!lock->key)) - lock->key = (void *)lock; + if (unlikely(!lock->key)) { + unsigned long can_addr, addr = (unsigned long)lock; + + if (__is_kernel_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (__is_module_percpu_address(addr, &can_addr)) + lock->key = (void *)can_addr; + else if (static_obj(lock)) + lock->key = (void *)lock; + else + return ERR_PTR(-EINVAL); + is_static = true; + } /* * NOTE: the class-key must be unique. For dynamic locks, a static @@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) } } - return NULL; + return is_static || static_obj(lock->key) ? NULL : ERR_PTR(-EINVAL); } /* @@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) DEBUG_LOCKS_WARN_ON(!irqs_disabled()); class = look_up_lock_class(lock, subclass); - if (likely(class)) + if (likely(!IS_ERR_OR_NULL(class))) goto out_set_class_cache; /* * Debug-check: all keys must be persistent! - */ - if (!static_obj(lock->key)) { + */ + if (IS_ERR(class)) { debug_locks_off(); printk("INFO: trying to register non-static key.\n"); printk("the code is fine but needs lockdep annotation.\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); - return NULL; } @@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) * Clearly if the lock hasn't been acquired _ever_, we're not * holding it either, so report failure. */ - if (!class) + if (IS_ERR_OR_NULL(class)) return 0; /* @@ -3689,6 +3702,7 @@ static void check_flags(unsigned long flags) } } +#ifndef CONFIG_PREEMPT_RT_FULL /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only @@ -3703,6 +3717,7 @@ static void check_flags(unsigned long flags) DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } } +#endif if (!debug_locks) print_irqtrace_events(current); @@ -4159,7 +4174,7 @@ void lockdep_reset_lock(struct lockdep_map *lock) * If the class exists we look it up and zap it: */ class = look_up_lock_class(lock, j); - if (class) + if (!IS_ERR_OR_NULL(class)) zap_class(class); } /* diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index d3de04b12f8c..0f49abeae337 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c @@ -26,7 +26,6 @@ #include <linux/kthread.h> #include <linux/sched/rt.h> #include <linux/spinlock.h> -#include <linux/rwlock.h> #include <linux/mutex.h> #include <linux/rwsem.h> #include <linux/smp.h> diff --git a/kernel/locking/percpu-rwsem.c b/kernel/locking/percpu-rwsem.c index ce182599cf2e..2ad3a1e8344c 100644 --- a/kernel/locking/percpu-rwsem.c +++ b/kernel/locking/percpu-rwsem.c @@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *sem, /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */ rcu_sync_init(&sem->rss, RCU_SCHED_SYNC); __init_rwsem(&sem->rw_sem, name, rwsem_key); - init_waitqueue_head(&sem->writer); + init_swait_queue_head(&sem->writer); sem->readers_block = 0; return 0; } @@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_semaphore *sem) __this_cpu_dec(*sem->read_count); /* Prod writer to recheck readers_active */ - wake_up(&sem->writer); + swake_up(&sem->writer); } EXPORT_SYMBOL_GPL(__percpu_up_read); @@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *sem) */ /* Wait for all now active readers to complete. */ - wait_event(sem->writer, readers_active_check(sem)); + swait_event(sem->writer, readers_active_check(sem)); } EXPORT_SYMBOL_GPL(percpu_down_write); diff --git a/kernel/locking/rt.c b/kernel/locking/rt.c new file mode 100644 index 000000000000..6284e3b15091 --- /dev/null +++ b/kernel/locking/rt.c @@ -0,0 +1,331 @@ +/* + * kernel/rt.c + * + * Real-Time Preemption Support + * + * started by Ingo Molnar: + * + * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * + * historic credit for proving that Linux spinlocks can be implemented via + * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow + * and others) who prototyped it on 2.4 and did lots of comparative + * research and analysis; TimeSys, for proving that you can implement a + * fully preemptible kernel via the use of IRQ threading and mutexes; + * Bill Huey for persuasively arguing on lkml that the mutex model is the + * right one; and to MontaVista, who ported pmutexes to 2.6. + * + * This code is a from-scratch implementation and is not based on pmutexes, + * but the idea of converting spinlocks to mutexes is used here too. + * + * lock debugging, locking tree, deadlock detection: + * + * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey + * Released under the General Public License (GPL). + * + * Includes portions of the generic R/W semaphore implementation from: + * + * Copyright (c) 2001 David Howells (dhowells@redhat.com). + * - Derived partially from idea by Andrea Arcangeli <andrea@suse.de> + * - Derived also from comments by Linus + * + * Pending ownership of locks and ownership stealing: + * + * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt + * + * (also by Steven Rostedt) + * - Converted single pi_lock to individual task locks. + * + * By Esben Nielsen: + * Doing priority inheritance with help of the scheduler. + * + * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> + * - major rework based on Esben Nielsens initial patch + * - replaced thread_info references by task_struct refs + * - removed task->pending_owner dependency + * - BKL drop/reacquire for semaphore style locks to avoid deadlocks + * in the scheduler return path as discussed with Steven Rostedt + * + * Copyright (C) 2006, Kihon Technologies Inc. + * Steven Rostedt <rostedt@goodmis.org> + * - debugged and patched Thomas Gleixner's rework. + * - added back the cmpxchg to the rework. + * - turned atomic require back on for SMP. + */ + +#include <linux/spinlock.h> +#include <linux/rtmutex.h> +#include <linux/sched.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/kallsyms.h> +#include <linux/syscalls.h> +#include <linux/interrupt.h> +#include <linux/plist.h> +#include <linux/fs.h> +#include <linux/futex.h> +#include <linux/hrtimer.h> + +#include "rtmutex_common.h" + +/* + * struct mutex functions + */ +void __mutex_do_init(struct mutex *mutex, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); + lockdep_init_map(&mutex->dep_map, name, key, 0); +#endif + mutex->lock.save_state = 0; +} +EXPORT_SYMBOL(__mutex_do_init); + +void __lockfunc _mutex_lock(struct mutex *lock) +{ + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock); + +int __lockfunc _mutex_lock_interruptible(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible); + +int __lockfunc _mutex_lock_killable(struct mutex *lock) +{ + int ret; + + mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) +{ + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nested); + +void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) +{ + mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); + rt_mutex_lock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_lock_nest_lock); + +int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); + ret = rt_mutex_lock_interruptible(&lock->lock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_interruptible_nested); + +int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) +{ + int ret; + + mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + ret = rt_mutex_lock_killable(&lock->lock); + if (ret) + mutex_release(&lock->dep_map, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(_mutex_lock_killable_nested); +#endif + +int __lockfunc _mutex_trylock(struct mutex *lock) +{ + int ret = rt_mutex_trylock(&lock->lock); + + if (ret) + mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); + + return ret; +} +EXPORT_SYMBOL(_mutex_trylock); + +void __lockfunc _mutex_unlock(struct mutex *lock) +{ + mutex_release(&lock->dep_map, 1, _RET_IP_); + rt_mutex_unlock(&lock->lock); +} +EXPORT_SYMBOL(_mutex_unlock); + +/* + * rwlock_t functions + */ +int __lockfunc rt_write_trylock(rwlock_t *rwlock) +{ + int ret; + + migrate_disable(); + ret = rt_mutex_trylock(&rwlock->lock); + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + + return ret; +} +EXPORT_SYMBOL(rt_write_trylock); + +int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) +{ + int ret; + + *flags = 0; + ret = rt_write_trylock(rwlock); + return ret; +} +EXPORT_SYMBOL(rt_write_trylock_irqsave); + +int __lockfunc rt_read_trylock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + int ret = 1; + + /* + * recursive read locks succeed when current owns the lock, + * but not when read_depth == 0 which means that the lock is + * write locked. + */ + if (rt_mutex_owner(lock) != current) { + migrate_disable(); + ret = rt_mutex_trylock(lock); + if (ret) + rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + + } else if (!rwlock->read_depth) { + ret = 0; + } + + if (ret) + rwlock->read_depth++; + + return ret; +} +EXPORT_SYMBOL(rt_read_trylock); + +void __lockfunc rt_write_lock(rwlock_t *rwlock) +{ + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(&rwlock->lock); +} +EXPORT_SYMBOL(rt_write_lock); + +void __lockfunc rt_read_lock(rwlock_t *rwlock) +{ + struct rt_mutex *lock = &rwlock->lock; + + + /* + * recursive read locks succeed when current owns the lock + */ + if (rt_mutex_owner(lock) != current) { + rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); + __rt_spin_lock(lock); + } + rwlock->read_depth++; +} + +EXPORT_SYMBOL(rt_read_lock); + +void __lockfunc rt_write_unlock(rwlock_t *rwlock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); + migrate_enable(); +} +EXPORT_SYMBOL(rt_write_unlock); + +void __lockfunc rt_read_unlock(rwlock_t *rwlock) +{ + /* Release the lock only when read_depth is down to 0 */ + if (--rwlock->read_depth == 0) { + rwlock_release(&rwlock->dep_map, 1, _RET_IP_); + __rt_spin_unlock(&rwlock->lock); + migrate_enable(); + } +} +EXPORT_SYMBOL(rt_read_unlock); + +unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) +{ + rt_write_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_write_lock_irqsave); + +unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) +{ + rt_read_lock(rwlock); + + return 0; +} +EXPORT_SYMBOL(rt_read_lock_irqsave); + +void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); + lockdep_init_map(&rwlock->dep_map, name, key, 0); +#endif + rwlock->lock.save_state = 1; + rwlock->read_depth = 0; +} +EXPORT_SYMBOL(__rt_rwlock_init); + +/** + * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 + * @cnt: the atomic which we are to dec + * @lock: the mutex to return holding if we dec to 0 + * + * return true and hold lock if we dec to 0, return false otherwise + */ +int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) +{ + /* dec if we can't possibly hit 0 */ + if (atomic_add_unless(cnt, -1, 1)) + return 0; + /* we might hit 0, so take the lock */ + mutex_lock(lock); + if (!atomic_dec_and_test(cnt)) { + /* when we actually did the dec, we didn't hit 0 */ + mutex_unlock(lock); + return 0; + } + /* we hit 0, and we hold the lock */ + return 1; +} +EXPORT_SYMBOL(atomic_dec_and_mutex_lock); diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c index 62b6cee8ea7f..0613c4b1d059 100644 --- a/kernel/locking/rtmutex-debug.c +++ b/kernel/locking/rtmutex-debug.c @@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) lock->name = name; } -void -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) -{ -} - -void rt_mutex_deadlock_account_unlock(struct task_struct *task) -{ -} - diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h index d0519c3432b6..b585af9a1b50 100644 --- a/kernel/locking/rtmutex-debug.h +++ b/kernel/locking/rtmutex-debug.h @@ -9,9 +9,6 @@ * This file contains macros used solely by rtmutex.c. Debug version. */ -extern void -rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); -extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 2c49d76f96c3..78a6c4a223c1 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -7,6 +7,11 @@ * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt * Copyright (C) 2006 Esben Nielsen + * Adaptive Spinlocks: + * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, + * and Peter Morreale, + * Adaptive Spinlocks simplification: + * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com> * * See Documentation/locking/rt-mutex-design.txt for details. */ @@ -16,6 +21,7 @@ #include <linux/sched/rt.h> #include <linux/sched/deadline.h> #include <linux/timer.h> +#include <linux/ww_mutex.h> #include "rtmutex_common.h" @@ -133,6 +139,12 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock) WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); } +static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) +{ + return waiter && waiter != PI_WAKEUP_INPROGRESS && + waiter != PI_REQUEUE_INPROGRESS; +} + /* * We can speed up the acquire/release, if there's no debugging state to be * set up. @@ -222,12 +234,25 @@ static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, } #endif +#define STEAL_NORMAL 0 +#define STEAL_LATERAL 1 +/* + * Only use with rt_mutex_waiter_{less,equal}() + */ +#define task_to_waiter(p) \ + &(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline } + static inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) + struct rt_mutex_waiter *right, int mode) { - if (left->prio < right->prio) - return 1; + if (mode == STEAL_NORMAL) { + if (left->prio < right->prio) + return 1; + } else { + if (left->prio <= right->prio) + return 1; + } /* * If both waiters have dl_prio(), we check the deadlines of the @@ -236,12 +261,30 @@ rt_mutex_waiter_less(struct rt_mutex_waiter *left, * then right waiter has a dl_prio() too. */ if (dl_prio(left->prio)) - return dl_time_before(left->task->dl.deadline, - right->task->dl.deadline); + return dl_time_before(left->deadline, right->deadline); return 0; } +static inline int +rt_mutex_waiter_equal(struct rt_mutex_waiter *left, + struct rt_mutex_waiter *right) +{ + if (left->prio != right->prio) + return 0; + + /* + * If both waiters have dl_prio(), we check the deadlines of the + * associated tasks. + * If left waiter has a dl_prio(), and we didn't return 0 above, + * then right waiter has a dl_prio() too. + */ + if (dl_prio(left->prio)) + return left->deadline == right->deadline; + + return 1; +} + static void rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) { @@ -253,7 +296,7 @@ rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter) while (*link) { parent = *link; entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry); - if (rt_mutex_waiter_less(waiter, entry)) { + if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { link = &parent->rb_left; } else { link = &parent->rb_right; @@ -292,7 +335,7 @@ rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) while (*link) { parent = *link; entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry); - if (rt_mutex_waiter_less(waiter, entry)) { + if (rt_mutex_waiter_less(waiter, entry, STEAL_NORMAL)) { link = &parent->rb_left; } else { link = &parent->rb_right; @@ -320,72 +363,16 @@ rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) RB_CLEAR_NODE(&waiter->pi_tree_entry); } -/* - * Calculate task priority from the waiter tree priority - * - * Return task->normal_prio when the waiter tree is empty or when - * the waiter is not allowed to do priority boosting - */ -int rt_mutex_getprio(struct task_struct *task) -{ - if (likely(!task_has_pi_waiters(task))) - return task->normal_prio; - - return min(task_top_pi_waiter(task)->prio, - task->normal_prio); -} - -struct task_struct *rt_mutex_get_top_task(struct task_struct *task) -{ - if (likely(!task_has_pi_waiters(task))) - return NULL; - - return task_top_pi_waiter(task)->task; -} - -/* - * Called by sched_setscheduler() to get the priority which will be - * effective after the change. - */ -int rt_mutex_get_effective_prio(struct task_struct *task, int newprio) -{ - if (!task_has_pi_waiters(task)) - return newprio; - - if (task_top_pi_waiter(task)->task->prio <= newprio) - return task_top_pi_waiter(task)->task->prio; - return newprio; -} - -/* - * Adjust the priority of a task, after its pi_waiters got modified. - * - * This can be both boosting and unboosting. task->pi_lock must be held. - */ -static void __rt_mutex_adjust_prio(struct task_struct *task) +static void rt_mutex_adjust_prio(struct task_struct *p) { - int prio = rt_mutex_getprio(task); + struct task_struct *pi_task = NULL; - if (task->prio != prio || dl_prio(prio)) - rt_mutex_setprio(task, prio); -} + lockdep_assert_held(&p->pi_lock); -/* - * Adjust task priority (undo boosting). Called from the exit path of - * rt_mutex_slowunlock() and rt_mutex_slowlock(). - * - * (Note: We do this outside of the protection of lock->wait_lock to - * allow the lock to be taken while or before we readjust the priority - * of task. We do not use the spin_xx_mutex() variants here as we are - * outside of the debug path.) - */ -void rt_mutex_adjust_prio(struct task_struct *task) -{ - unsigned long flags; + if (task_has_pi_waiters(p)) + pi_task = task_top_pi_waiter(p)->task; - raw_spin_lock_irqsave(&task->pi_lock, flags); - __rt_mutex_adjust_prio(task); - raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_setprio(p, pi_task); } /* @@ -414,6 +401,14 @@ static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter, return debug_rt_mutex_detect_deadlock(waiter, chwalk); } +static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) +{ + if (waiter->savestate) + wake_up_lock_sleeper(waiter->task); + else + wake_up_process(waiter->task); +} + /* * Max number of times we'll walk the boosting chain: */ @@ -421,7 +416,8 @@ int max_lock_depth = 1024; static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) { - return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; + return rt_mutex_real_waiter(p->pi_blocked_on) ? + p->pi_blocked_on->lock : NULL; } /* @@ -557,7 +553,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * reached or the state of the chain has changed while we * dropped the locks. */ - if (!waiter) + if (!rt_mutex_real_waiter(waiter)) goto out_unlock_pi; /* @@ -608,7 +604,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * enabled we continue, but stop the requeueing in the chain * walk. */ - if (waiter->prio == task->prio) { + if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { if (!detect_deadlock) goto out_unlock_pi; else @@ -704,7 +700,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, /* [7] Requeue the waiter in the lock waiter tree. */ rt_mutex_dequeue(lock, waiter); + + /* + * Update the waiter prio fields now that we're dequeued. + * + * These values can have changed through either: + * + * sys_sched_set_scheduler() / sys_sched_setattr() + * + * or + * + * DL CBS enforcement advancing the effective deadline. + * + * Even though pi_waiters also uses these fields, and that tree is only + * updated in [11], we can do this here, since we hold [L], which + * serializes all pi_waiters access and rb_erase() does not care about + * the values of the node being removed. + */ waiter->prio = task->prio; + waiter->deadline = task->dl.deadline; + rt_mutex_enqueue(lock, waiter); /* [8] Release the task */ @@ -719,13 +734,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * follow here. This is the end of the chain we are walking. */ if (!rt_mutex_owner(lock)) { + struct rt_mutex_waiter *lock_top_waiter; + /* * If the requeue [7] above changed the top waiter, * then we need to wake the new top waiter up to try * to get the lock. */ - if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) - wake_up_process(rt_mutex_top_waiter(lock)->task); + lock_top_waiter = rt_mutex_top_waiter(lock); + if (prerequeue_top_waiter != lock_top_waiter) + rt_mutex_wake_waiter(lock_top_waiter); raw_spin_unlock_irq(&lock->wait_lock); return 0; } @@ -745,7 +763,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, */ rt_mutex_dequeue_pi(task, prerequeue_top_waiter); rt_mutex_enqueue_pi(task, waiter); - __rt_mutex_adjust_prio(task); + rt_mutex_adjust_prio(task); } else if (prerequeue_top_waiter == waiter) { /* @@ -761,7 +779,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, rt_mutex_dequeue_pi(task, waiter); waiter = rt_mutex_top_waiter(lock); rt_mutex_enqueue_pi(task, waiter); - __rt_mutex_adjust_prio(task); + rt_mutex_adjust_prio(task); } else { /* * Nothing changed. No need to do any priority @@ -818,6 +836,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, return ret; } + /* * Try to take an rt-mutex * @@ -828,9 +847,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, * @waiter: The waiter that is queued to the lock's wait tree if the * callsite called task_blocked_on_lock(), otherwise NULL */ -static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, - struct rt_mutex_waiter *waiter) +static int __try_to_take_rt_mutex(struct rt_mutex *lock, + struct task_struct *task, + struct rt_mutex_waiter *waiter, int mode) { + lockdep_assert_held(&lock->wait_lock); + /* * Before testing whether we can acquire @lock, we set the * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all @@ -866,8 +888,10 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * If waiter is not the highest priority waiter of * @lock, give up. */ - if (waiter != rt_mutex_top_waiter(lock)) + if (waiter != rt_mutex_top_waiter(lock)) { + /* XXX rt_mutex_waiter_less() ? */ return 0; + } /* * We can acquire the lock. Remove the waiter from the @@ -885,14 +909,26 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, * not need to be dequeued. */ if (rt_mutex_has_waiters(lock)) { + struct task_struct *pown = rt_mutex_top_waiter(lock)->task; + + if (task != pown) + return 0; + + /* + * Note that RT tasks are excluded from lateral-steals + * to prevent the introduction of an unbounded latency. + */ + if (rt_task(task)) + mode = STEAL_NORMAL; /* * If @task->prio is greater than or equal to * the top waiter priority (kernel view), * @task lost. */ - if (task->prio >= rt_mutex_top_waiter(lock)->prio) + if (!rt_mutex_waiter_less(task_to_waiter(task), + rt_mutex_top_waiter(lock), + mode)) return 0; - /* * The current top waiter stays enqueued. We * don't have to change anything in the lock @@ -936,10 +972,383 @@ takeit: */ rt_mutex_set_owner(lock, task); - rt_mutex_deadlock_account_lock(lock, task); + return 1; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * preemptible spin_lock functions: + */ +static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock, + bool mg_off), + bool do_mig_dis) +{ + might_sleep_no_state_check(); + + if (do_mig_dis) + migrate_disable(); + + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) + return; + else + slowfn(lock, do_mig_dis); +} +static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, + void (*slowfn)(struct rt_mutex *lock)) +{ + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; + else + slowfn(lock); +} +#ifdef CONFIG_SMP +/* + * Note that owner is a speculative pointer and dereferencing relies + * on rcu_read_lock() and the check against the lock owner. + */ +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *owner) +{ + int res = 0; + + rcu_read_lock(); + for (;;) { + if (owner != rt_mutex_owner(lock)) + break; + /* + * Ensure that owner->on_cpu is dereferenced _after_ + * checking the above to be valid. + */ + barrier(); + if (!owner->on_cpu) { + res = 1; + break; + } + cpu_relax(); + } + rcu_read_unlock(); + return res; +} +#else +static int adaptive_wait(struct rt_mutex *lock, + struct task_struct *orig_owner) +{ return 1; } +#endif + +static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task, + enum rtmutex_chainwalk chwalk); +/* + * Slow path lock function spin_lock style: this variant is very + * careful not to miss any non-lock wakeups. + * + * We store the current state under p->pi_lock in p->saved_state and + * the try_to_wake_up() code handles this accordingly. + */ +static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock, + bool mg_off) +{ + struct task_struct *lock_owner, *self = current; + struct rt_mutex_waiter waiter, *top_waiter; + unsigned long flags; + int ret; + + rt_mutex_init_waiter(&waiter, true); + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + return; + } + + BUG_ON(rt_mutex_owner(lock) == self); + + /* + * We save whatever state the task is in and we'll restore it + * after acquiring the lock taking real wakeups into account + * as well. We are serialized via pi_lock against wakeups. See + * try_to_wake_up(). + */ + raw_spin_lock(&self->pi_lock); + self->saved_state = self->state; + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + + ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK); + BUG_ON(ret); + + for (;;) { + /* Try to acquire the lock again. */ + if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) + break; + + top_waiter = rt_mutex_top_waiter(lock); + lock_owner = rt_mutex_owner(lock); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_print_deadlock(&waiter); + + if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) { + if (mg_off) + migrate_enable(); + schedule(); + if (mg_off) + migrate_disable(); + } + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(TASK_UNINTERRUPTIBLE); + raw_spin_unlock(&self->pi_lock); + } + + /* + * Restore the task state to current->saved_state. We set it + * to the original state above and the try_to_wake_up() code + * has possibly updated it when a real (non-rtmutex) wakeup + * happened while we were blocked. Clear saved_state so + * try_to_wakeup() does not get confused. + */ + raw_spin_lock(&self->pi_lock); + __set_current_state_no_track(self->saved_state); + self->saved_state = TASK_RUNNING; + raw_spin_unlock(&self->pi_lock); + + /* + * try_to_take_rt_mutex() sets the waiter bit + * unconditionally. We might have to fix that up: + */ + fixup_rt_mutex_waiters(lock); + + BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); + BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); + + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + debug_rt_mutex_free_waiter(&waiter); +} + +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper); +/* + * Slow path to release a rt_mutex spin_lock style + */ +static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) +{ + unsigned long flags; + WAKE_Q(wake_q); + WAKE_Q(wake_sleeper_q); + bool postunlock; + + raw_spin_lock_irqsave(&lock->wait_lock, flags); + postunlock = __rt_mutex_unlock_common(lock, &wake_q, &wake_sleeper_q); + raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + if (postunlock) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); +} + +void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock__no_mg); + +void __lockfunc rt_spin_lock(spinlock_t *lock) +{ + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); + spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); +} +EXPORT_SYMBOL(rt_spin_lock); + +void __lockfunc __rt_spin_lock(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true); +} +EXPORT_SYMBOL(__rt_spin_lock); + +void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock) +{ + rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false); +} +EXPORT_SYMBOL(__rt_spin_lock__no_mg); + +#ifdef CONFIG_DEBUG_LOCK_ALLOC +void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) +{ + spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); +} +EXPORT_SYMBOL(rt_spin_lock_nested); +#endif + +void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(rt_spin_unlock__no_mg); + +void __lockfunc rt_spin_unlock(spinlock_t *lock) +{ + /* NOTE: we always pass in '1' for nested, for simplicity */ + spin_release(&lock->dep_map, 1, _RET_IP_); + rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); + migrate_enable(); +} +EXPORT_SYMBOL(rt_spin_unlock); + +void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) +{ + rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); +} +EXPORT_SYMBOL(__rt_spin_unlock); + +/* + * Wait for the lock to get unlocked: instead of polling for an unlock + * (like raw spinlocks do), we lock and unlock, to force the kernel to + * schedule if there's contention: + */ +void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) +{ + spin_lock(lock); + spin_unlock(lock); +} +EXPORT_SYMBOL(rt_spin_unlock_wait); + +int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock) +{ + int ret; + + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock__no_mg); + +int __lockfunc rt_spin_trylock(spinlock_t *lock) +{ + int ret; + + migrate_disable(); + ret = rt_mutex_trylock(&lock->lock); + if (ret) + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + else + migrate_enable(); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock); + +int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) +{ + int ret; + + local_bh_disable(); + ret = rt_mutex_trylock(&lock->lock); + if (ret) { + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } else + local_bh_enable(); + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_bh); + +int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) +{ + int ret; + + *flags = 0; + ret = rt_mutex_trylock(&lock->lock); + if (ret) { + migrate_disable(); + spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); + } + return ret; +} +EXPORT_SYMBOL(rt_spin_trylock_irqsave); + +int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) +{ + /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ + if (atomic_add_unless(atomic, -1, 1)) + return 0; + rt_spin_lock(lock); + if (atomic_dec_and_test(atomic)) + return 1; + rt_spin_unlock(lock); + return 0; +} +EXPORT_SYMBOL(atomic_dec_and_spin_lock); + + void +__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held lock: + */ + debug_check_no_locks_freed((void *)lock, sizeof(*lock)); + lockdep_init_map(&lock->dep_map, name, key, 0); +#endif +} +EXPORT_SYMBOL(__rt_spin_lock_init); + +#endif /* PREEMPT_RT_FULL */ + +#ifdef CONFIG_PREEMPT_RT_FULL + static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); + + if (!hold_ctx) + return 0; + + if (unlikely(ctx == hold_ctx)) + return -EALREADY; + + if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && + (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(ctx->contending_lock); + ctx->contending_lock = ww; +#endif + return -EDEADLK; + } + + return 0; +} +#else + static inline int __sched +__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) +{ + BUG(); + return 0; +} + +#endif + +static inline int +try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, + struct rt_mutex_waiter *waiter) +{ + return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); +} /* * Task blocks on lock. @@ -958,6 +1367,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, struct rt_mutex *next_lock; int chain_walk = 0, res; + lockdep_assert_held(&lock->wait_lock); + /* * Early deadlock detection. We really don't want the task to * enqueue on itself just to untangle the mess later. It's not @@ -971,10 +1382,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, return -EDEADLK; raw_spin_lock(&task->pi_lock); - __rt_mutex_adjust_prio(task); + + /* + * In the case of futex requeue PI, this will be a proxy + * lock. The task will wake unaware that it is enqueueed on + * this lock. Avoid blocking on two locks and corrupting + * pi_blocked_on via the PI_WAKEUP_INPROGRESS + * flag. futex_wait_requeue_pi() sets this when it wakes up + * before requeue (due to a signal or timeout). Do not enqueue + * the task if PI_WAKEUP_INPROGRESS is set. + */ + if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { + raw_spin_unlock(&task->pi_lock); + return -EAGAIN; + } + + BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); + + rt_mutex_adjust_prio(task); waiter->task = task; waiter->lock = lock; waiter->prio = task->prio; + waiter->deadline = task->dl.deadline; /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) @@ -993,8 +1422,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, rt_mutex_dequeue_pi(owner, top_waiter); rt_mutex_enqueue_pi(owner, waiter); - __rt_mutex_adjust_prio(owner); - if (owner->pi_blocked_on) + rt_mutex_adjust_prio(owner); + if (rt_mutex_real_waiter(owner->pi_blocked_on)) chain_walk = 1; } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { chain_walk = 1; @@ -1036,6 +1465,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, * Called with lock->wait_lock held and interrupts disabled. */ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q, struct rt_mutex *lock) { struct rt_mutex_waiter *waiter; @@ -1045,12 +1475,14 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, waiter = rt_mutex_top_waiter(lock); /* - * Remove it from current->pi_waiters. We do not adjust a - * possible priority boost right now. We execute wakeup in the - * boosted mode and go back to normal after releasing - * lock->wait_lock. + * Remove it from current->pi_waiters and deboost. + * + * We must in fact deboost here in order to ensure we call + * rt_mutex_setprio() to update p->pi_top_task before the + * task unblocks. */ rt_mutex_dequeue_pi(current, waiter); + rt_mutex_adjust_prio(current); /* * As we are waking up the top waiter, and the waiter stays @@ -1062,9 +1494,22 @@ static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, */ lock->owner = (void *) RT_MUTEX_HAS_WAITERS; + /* + * We deboosted before waking the top waiter task such that we don't + * run two tasks with the 'same' priority (and ensure the + * p->pi_top_task pointer points to a blocked task). This however can + * lead to priority inversion if we would get preempted after the + * deboost but before waking our donor task, hence the preempt_disable() + * before unlock. + * + * Pairs with preempt_enable() in rt_mutex_postunlock(); + */ + preempt_disable(); + if (waiter->savestate) + wake_q_add_sleeper(wake_sleeper_q, waiter->task); + else + wake_q_add(wake_q, waiter->task); raw_spin_unlock(¤t->pi_lock); - - wake_q_add(wake_q, waiter->task); } /* @@ -1078,7 +1523,9 @@ static void remove_waiter(struct rt_mutex *lock, { bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); struct task_struct *owner = rt_mutex_owner(lock); - struct rt_mutex *next_lock; + struct rt_mutex *next_lock = NULL; + + lockdep_assert_held(&lock->wait_lock); raw_spin_lock(¤t->pi_lock); rt_mutex_dequeue(lock, waiter); @@ -1099,10 +1546,11 @@ static void remove_waiter(struct rt_mutex *lock, if (rt_mutex_has_waiters(lock)) rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); - __rt_mutex_adjust_prio(owner); + rt_mutex_adjust_prio(owner); /* Store the lock on which owner is blocked or NULL */ - next_lock = task_blocked_on_lock(owner); + if (rt_mutex_real_waiter(owner->pi_blocked_on)) + next_lock = task_blocked_on_lock(owner); raw_spin_unlock(&owner->pi_lock); @@ -1138,21 +1586,30 @@ void rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!waiter || (waiter->prio == task->prio && - !dl_prio(task->prio))) { + if (!rt_mutex_real_waiter(waiter) || + rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } next_lock = waiter->lock; - raw_spin_unlock_irqrestore(&task->pi_lock, flags); /* gets dropped in rt_mutex_adjust_prio_chain()! */ get_task_struct(task); + raw_spin_unlock_irqrestore(&task->pi_lock, flags); rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, next_lock, NULL, task); } +void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) +{ + debug_rt_mutex_init_waiter(waiter); + RB_CLEAR_NODE(&waiter->pi_tree_entry); + RB_CLEAR_NODE(&waiter->tree_entry); + waiter->task = NULL; + waiter->savestate = savestate; +} + /** * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop * @lock: the rt_mutex to take @@ -1166,7 +1623,8 @@ void rt_mutex_adjust_pi(struct task_struct *task) static int __sched __rt_mutex_slowlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - struct rt_mutex_waiter *waiter) + struct rt_mutex_waiter *waiter, + struct ww_acquire_ctx *ww_ctx) { int ret = 0; @@ -1175,16 +1633,17 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state, if (try_to_take_rt_mutex(lock, current, waiter)) break; - /* - * TASK_INTERRUPTIBLE checks for signals and - * timeout. Ignored otherwise. - */ - if (unlikely(state == TASK_INTERRUPTIBLE)) { - /* Signal pending? */ - if (signal_pending(current)) - ret = -EINTR; - if (timeout && !timeout->task) - ret = -ETIMEDOUT; + if (timeout && !timeout->task) { + ret = -ETIMEDOUT; + break; + } + if (signal_pending_state(state, current)) { + ret = -EINTR; + break; + } + + if (ww_ctx && ww_ctx->acquired > 0) { + ret = __mutex_lock_check_stamp(lock, ww_ctx); if (ret) break; } @@ -1223,35 +1682,94 @@ static void rt_mutex_handle_deadlock(int res, int detect_deadlock, } } -/* - * Slow path lock function: - */ -static int __sched -rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk) +static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, + struct ww_acquire_ctx *ww_ctx) { - struct rt_mutex_waiter waiter; - unsigned long flags; - int ret = 0; +#ifdef CONFIG_DEBUG_MUTEXES + /* + * If this WARN_ON triggers, you used ww_mutex_lock to acquire, + * but released with a normal mutex_unlock in this call. + * + * This should never happen, always use ww_mutex_unlock. + */ + DEBUG_LOCKS_WARN_ON(ww->ctx); - debug_rt_mutex_init_waiter(&waiter); - RB_CLEAR_NODE(&waiter.pi_tree_entry); - RB_CLEAR_NODE(&waiter.tree_entry); + /* + * Not quite done after calling ww_acquire_done() ? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); + + if (ww_ctx->contending_lock) { + /* + * After -EDEADLK you tried to + * acquire a different ww_mutex? Bad! + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); + + /* + * You called ww_mutex_lock after receiving -EDEADLK, + * but 'forgot' to unlock everything else first? + */ + DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); + ww_ctx->contending_lock = NULL; + } /* - * Technically we could use raw_spin_[un]lock_irq() here, but this can - * be called in early boot if the cmpxchg() fast path is disabled - * (debug, no architecture support). In this case we will acquire the - * rtmutex with lock->wait_lock held. But we cannot unconditionally - * enable interrupts in that early boot case. So we need to use the - * irqsave/restore variants. + * Naughty, using a different class will lead to undefined behavior! */ - raw_spin_lock_irqsave(&lock->wait_lock, flags); + DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); +#endif + ww_ctx->acquired++; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); + struct rt_mutex_waiter *waiter, *n; + + /* + * This branch gets optimized out for the common case, + * and is only important for ww_mutex_lock. + */ + ww_mutex_lock_acquired(ww, ww_ctx); + ww->ctx = ww_ctx; + + /* + * Give any possible sleeping processes the chance to wake up, + * so they can recheck if they have to back off. + */ + rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, + tree_entry) { + /* XXX debug rt mutex waiter wakeup */ + + BUG_ON(waiter->lock != lock); + rt_mutex_wake_waiter(waiter); + } +} + +#else + +static void ww_mutex_account_lock(struct rt_mutex *lock, + struct ww_acquire_ctx *ww_ctx) +{ + BUG(); +} +#endif + +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, + struct rt_mutex_waiter *waiter) +{ + int ret; /* Try to acquire the lock again: */ if (try_to_take_rt_mutex(lock, current, NULL)) { - raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + if (ww_ctx) + ww_mutex_account_lock(lock, ww_ctx); return 0; } @@ -1261,17 +1779,27 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, if (unlikely(timeout)) hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS); - ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk); + ret = task_blocks_on_rt_mutex(lock, waiter, current, chwalk); - if (likely(!ret)) + if (likely(!ret)) { /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); + ret = __rt_mutex_slowlock(lock, state, timeout, waiter, + ww_ctx); + } else if (ww_ctx) { + /* ww_mutex received EDEADLK, let it become EALREADY */ + ret = __mutex_lock_check_stamp(lock, ww_ctx); + BUG_ON(!ret); + } if (unlikely(ret)) { __set_current_state(TASK_RUNNING); if (rt_mutex_has_waiters(lock)) - remove_waiter(lock, &waiter); - rt_mutex_handle_deadlock(ret, chwalk, &waiter); + remove_waiter(lock, waiter); + /* ww_mutex want to report EDEADLK/EALREADY, let them */ + if (!ww_ctx) + rt_mutex_handle_deadlock(ret, chwalk, waiter); + } else if (ww_ctx) { + ww_mutex_account_lock(lock, ww_ctx); } /* @@ -1279,6 +1807,36 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, * unconditionally. We might have to fix that up. */ fixup_rt_mutex_waiters(lock); + return ret; +} + +/* + * Slow path lock function: + */ +static int __sched +rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx) +{ + struct rt_mutex_waiter waiter; + unsigned long flags; + int ret = 0; + + rt_mutex_init_waiter(&waiter, false); + + /* + * Technically we could use raw_spin_[un]lock_irq() here, but this can + * be called in early boot if the cmpxchg() fast path is disabled + * (debug, no architecture support). In this case we will acquire the + * rtmutex with lock->wait_lock held. But we cannot unconditionally + * enable interrupts in that early boot case. So we need to use the + * irqsave/restore variants. + */ + raw_spin_lock_irqsave(&lock->wait_lock, flags); + + ret = rt_mutex_slowlock_locked(lock, state, timeout, chwalk, ww_ctx, + &waiter); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); @@ -1328,10 +1886,12 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) /* * Slow path to release a rt-mutex. - * Return whether the current task needs to undo a potential priority boosting. + * + * Return whether the current task needs to call rt_mutex_postunlock(). */ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, - struct wake_q_head *wake_q) + struct wake_q_head *wake_q, + struct wake_q_head *wake_sleeper_q) { unsigned long flags; @@ -1340,8 +1900,6 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, debug_rt_mutex_unlock(lock); - rt_mutex_deadlock_account_unlock(current); - /* * We must be careful here if the fast path is enabled. If we * have no waiters queued we cannot set owner to NULL here @@ -1387,12 +1945,10 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, * * Queue the next waiter for wakeup once we release the wait_lock. */ - mark_wakeup_next_waiter(wake_q, lock); - + mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - /* check PI boosting */ - return true; + return true; /* call rt_mutex_postunlock() */ } /* @@ -1403,63 +1959,85 @@ static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, */ static inline int rt_mutex_fastlock(struct rt_mutex *lock, int state, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - } else - return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); + + return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx); } static inline int rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, int (*slowfn)(struct rt_mutex *lock, int state, struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk)) + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx)) { if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 0; - } else - return slowfn(lock, state, timeout, chwalk); + + return slowfn(lock, state, timeout, chwalk, ww_ctx); } static inline int rt_mutex_fasttrylock(struct rt_mutex *lock, int (*slowfn)(struct rt_mutex *lock)) { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) return 1; - } + return slowfn(lock); } +/* + * Performs the wakeup of the the top-waiter and re-enables preemption. + */ +void rt_mutex_postunlock(struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper) +{ + wake_up_q(wake_q); + wake_up_q_sleeper(wq_sleeper); + + /* Pairs with preempt_disable() in rt_mutex_slowunlock() */ + preempt_enable(); +} + static inline void rt_mutex_fastunlock(struct rt_mutex *lock, bool (*slowfn)(struct rt_mutex *lock, - struct wake_q_head *wqh)) + struct wake_q_head *wqh, + struct wake_q_head *wq_sleeper)) { WAKE_Q(wake_q); + WAKE_Q(wake_sleeper_q); - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) + return; - } else { - bool deboost = slowfn(lock, &wake_q); + if (slowfn(lock, &wake_q, &wake_sleeper_q)) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); +} - wake_up_q(&wake_q); +/** + * rt_mutex_lock_state - lock a rt_mutex with a given state + * + * @lock: The rt_mutex to be locked + * @state: The state to set when blocking on the rt_mutex + */ +int __sched rt_mutex_lock_state(struct rt_mutex *lock, int state) +{ + might_sleep(); - /* Undo pi boosting if necessary: */ - if (deboost) - rt_mutex_adjust_prio(current); - } + return rt_mutex_fastlock(lock, state, NULL, rt_mutex_slowlock); } /** @@ -1469,15 +2047,13 @@ rt_mutex_fastunlock(struct rt_mutex *lock, */ void __sched rt_mutex_lock(struct rt_mutex *lock) { - might_sleep(); - - rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); + rt_mutex_lock_state(lock, TASK_UNINTERRUPTIBLE); } EXPORT_SYMBOL_GPL(rt_mutex_lock); /** * rt_mutex_lock_interruptible - lock a rt_mutex interruptible - * + ** * @lock: the rt_mutex to be locked * * Returns: @@ -1486,23 +2062,32 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); */ int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) { - might_sleep(); - - return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); + return rt_mutex_lock_state(lock, TASK_INTERRUPTIBLE); } EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); -/* - * Futex variant with full deadlock detection. +/** + * rt_mutex_lock_killable - lock a rt_mutex killable + * + * @lock: the rt_mutex to be locked + * @detect_deadlock: deadlock detection on/off + * + * Returns: + * 0 on success + * -EINTR when interrupted by a signal */ -int rt_mutex_timed_futex_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout) +int __sched rt_mutex_lock_killable(struct rt_mutex *lock) { - might_sleep(); + return rt_mutex_lock_state(lock, TASK_KILLABLE); +} +EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_FULL_CHAINWALK, - rt_mutex_slowlock); +/* + * Futex variant, must not use fastpath. + */ +int __sched rt_mutex_futex_trylock(struct rt_mutex *lock) +{ + return rt_mutex_slowtrylock(lock); } /** @@ -1525,6 +2110,7 @@ rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, RT_MUTEX_MIN_CHAINWALK, + NULL, rt_mutex_slowlock); } EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); @@ -1542,7 +2128,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); */ int __sched rt_mutex_trylock(struct rt_mutex *lock) { +#ifdef CONFIG_PREEMPT_RT_FULL + if (WARN_ON_ONCE(in_irq() || in_nmi())) +#else if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) +#endif return 0; return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); @@ -1560,21 +2150,53 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) } EXPORT_SYMBOL_GPL(rt_mutex_unlock); +static bool __sched __rt_mutex_unlock_common(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper) +{ + lockdep_assert_held(&lock->wait_lock); + + debug_rt_mutex_unlock(lock); + + if (!rt_mutex_has_waiters(lock)) { + lock->owner = NULL; + return false; /* done */ + } + + /* + * We've already deboosted, mark_wakeup_next_waiter() will + * retain preempt_disabled when we drop the wait_lock, to + * avoid inversion prior to the wakeup. preempt_disable() + * therein pairs with rt_mutex_postunlock(). + */ + mark_wakeup_next_waiter(wake_q, wq_sleeper, lock); + + return true; /* call postunlock() */ +} + /** - * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock - * @lock: the rt_mutex to be unlocked - * - * Returns: true/false indicating whether priority adjustment is - * required or not. + * Futex variant, that since futex variants do not use the fast-path, can be + * simple and will not need to retry. */ -bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wqh) +bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper) { - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - return false; - } - return rt_mutex_slowunlock(lock, wqh); + return __rt_mutex_unlock_common(lock, wake_q, wq_sleeper); +} + +void __sched rt_mutex_futex_unlock(struct rt_mutex *lock) +{ + WAKE_Q(wake_q); + WAKE_Q(wake_sleeper_q); + bool postunlock; + + raw_spin_lock_irq(&lock->wait_lock); + postunlock = __rt_mutex_futex_unlock(lock, &wake_q, &wake_sleeper_q); + raw_spin_unlock_irq(&lock->wait_lock); + + if (postunlock) + rt_mutex_postunlock(&wake_q, &wake_sleeper_q); } /** @@ -1607,13 +2229,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy); void __rt_mutex_init(struct rt_mutex *lock, const char *name) { lock->owner = NULL; - raw_spin_lock_init(&lock->wait_lock); lock->waiters = RB_ROOT; lock->waiters_leftmost = NULL; debug_rt_mutex_init(lock, name); } -EXPORT_SYMBOL_GPL(__rt_mutex_init); +EXPORT_SYMBOL(__rt_mutex_init); /** * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a @@ -1628,10 +2249,9 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init); void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner) { - __rt_mutex_init(lock, NULL); + rt_mutex_init(lock); debug_rt_mutex_proxy_lock(lock, proxy_owner); rt_mutex_set_owner(lock, proxy_owner); - rt_mutex_deadlock_account_lock(lock, proxy_owner); } /** @@ -1647,34 +2267,45 @@ void rt_mutex_proxy_unlock(struct rt_mutex *lock, { debug_rt_mutex_proxy_unlock(lock); rt_mutex_set_owner(lock, NULL); - rt_mutex_deadlock_account_unlock(proxy_owner); } -/** - * rt_mutex_start_proxy_lock() - Start lock acquisition for another task - * @lock: the rt_mutex to take - * @waiter: the pre-initialized rt_mutex_waiter - * @task: the task to prepare - * - * Returns: - * 0 - task blocked on lock - * 1 - acquired the lock for task, caller should wake it up - * <0 - error - * - * Special API call for FUTEX_REQUEUE_PI support. - */ -int rt_mutex_start_proxy_lock(struct rt_mutex *lock, +int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task) { int ret; - raw_spin_lock_irq(&lock->wait_lock); + if (try_to_take_rt_mutex(lock, task, NULL)) + return 1; - if (try_to_take_rt_mutex(lock, task, NULL)) { +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * In PREEMPT_RT there's an added race. + * If the task, that we are about to requeue, times out, + * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue + * to skip this task. But right after the task sets + * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then + * block on the spin_lock(&hb->lock), which in RT is an rtmutex. + * This will replace the PI_WAKEUP_INPROGRESS with the actual + * lock that it blocks on. We *must not* place this task + * on this proxy lock in that case. + * + * To prevent this race, we first take the task's pi_lock + * and check if it has updated its pi_blocked_on. If it has, + * we assume that it woke up and we return -EAGAIN. + * Otherwise, we set the task's pi_blocked_on to + * PI_REQUEUE_INPROGRESS, so that if the task is waking up + * it will know that we are in the process of requeuing it. + */ + raw_spin_lock(&task->pi_lock); + if (task->pi_blocked_on) { + raw_spin_unlock(&task->pi_lock); raw_spin_unlock_irq(&lock->wait_lock); - return 1; + return -EAGAIN; } + task->pi_blocked_on = PI_REQUEUE_INPROGRESS; + raw_spin_unlock(&task->pi_lock); +#endif /* We enforce deadlock detection for futexes */ ret = task_blocks_on_rt_mutex(lock, waiter, task, @@ -1690,17 +2321,41 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock, ret = 0; } - if (unlikely(ret)) + if (ret && rt_mutex_has_waiters(lock)) remove_waiter(lock, waiter); - raw_spin_unlock_irq(&lock->wait_lock); - debug_rt_mutex_print_deadlock(waiter); return ret; } /** + * rt_mutex_start_proxy_lock() - Start lock acquisition for another task + * @lock: the rt_mutex to take + * @waiter: the pre-initialized rt_mutex_waiter + * @task: the task to prepare + * + * Returns: + * 0 - task blocked on lock + * 1 - acquired the lock for task, caller should wake it up + * <0 - error + * + * Special API call for FUTEX_REQUEUE_PI support. + */ +int rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task) +{ + int ret; + + raw_spin_lock_irq(&lock->wait_lock); + ret = __rt_mutex_start_proxy_lock(lock, waiter, task); + raw_spin_unlock_irq(&lock->wait_lock); + + return ret; +} + +/** * rt_mutex_next_owner - return the next owner of the lock * * @lock: the rt lock query @@ -1721,36 +2376,106 @@ struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock) } /** - * rt_mutex_finish_proxy_lock() - Complete lock acquisition + * rt_mutex_wait_proxy_lock() - Wait for lock acquisition * @lock: the rt_mutex we were woken on * @to: the timeout, null if none. hrtimer should already have * been started. * @waiter: the pre-initialized rt_mutex_waiter * - * Complete the lock acquisition started our behalf by another thread. + * Wait for the the lock acquisition started on our behalf by + * rt_mutex_start_proxy_lock(). Upon failure, the caller must call + * rt_mutex_cleanup_proxy_lock(). * * Returns: * 0 - success * <0 - error, one of -EINTR, -ETIMEDOUT * - * Special API call for PI-futex requeue support + * Special API call for PI-futex support */ -int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, +int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, struct hrtimer_sleeper *to, struct rt_mutex_waiter *waiter) { + struct task_struct *tsk = current; int ret; raw_spin_lock_irq(&lock->wait_lock); - + /* sleep on the mutex */ set_current_state(TASK_INTERRUPTIBLE); + ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); + /* + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might + * have to fix that up. + */ + fixup_rt_mutex_waiters(lock); - /* sleep on the mutex */ - ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); + /* + * RT has a problem here when the wait got interrupted by a timeout + * or a signal. task->pi_blocked_on is still set. The task must + * acquire the hash bucket lock when returning from this function. + * + * If the hash bucket lock is contended then the + * BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)) in + * task_blocks_on_rt_mutex() will trigger. This can be avoided by + * clearing task->pi_blocked_on which removes the task from the + * boosting chain of the rtmutex. That's correct because the task + * is not longer blocked on it. + */ + if (ret) { + raw_spin_lock(&tsk->pi_lock); + tsk->pi_blocked_on = NULL; + raw_spin_unlock(&tsk->pi_lock); + } + raw_spin_unlock_irq(&lock->wait_lock); - if (unlikely(ret)) - remove_waiter(lock, waiter); + return ret; +} +/** + * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition + * @lock: the rt_mutex we were woken on + * @waiter: the pre-initialized rt_mutex_waiter + * + * Attempt to clean up after a failed rt_mutex_wait_proxy_lock(). + * + * Unless we acquired the lock; we're still enqueued on the wait-list and can + * in fact still be granted ownership until we're removed. Therefore we can + * find we are in fact the owner and must disregard the + * rt_mutex_wait_proxy_lock() failure. + * + * Returns: + * true - did the cleanup, we done. + * false - we acquired the lock after rt_mutex_wait_proxy_lock() returned, + * caller should disregards its return value. + * + * Special API call for PI-futex support + */ +bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter) +{ + bool cleanup = false; + + raw_spin_lock_irq(&lock->wait_lock); + /* + * Do an unconditional try-lock, this deals with the lock stealing + * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter() + * sets a NULL owner. + * + * We're not interested in the return value, because the subsequent + * test on rt_mutex_owner() will infer that. If the trylock succeeded, + * we will own the lock and it will have removed the waiter. If we + * failed the trylock, we're still not owner and we need to remove + * ourselves. + */ + try_to_take_rt_mutex(lock, current, waiter); + /* + * Unless we're the owner; we're still enqueued on the wait_list. + * So check if we became owner, if not, take us off the wait_list. + */ + if (rt_mutex_owner(lock) != current) { + remove_waiter(lock, waiter); + cleanup = true; + } /* * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might * have to fix that up. @@ -1759,5 +2484,91 @@ int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, raw_spin_unlock_irq(&lock->wait_lock); + return cleanup; +} + +static inline int +ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) +{ +#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH + unsigned tmp; + + if (ctx->deadlock_inject_countdown-- == 0) { + tmp = ctx->deadlock_inject_interval; + if (tmp > UINT_MAX/4) + tmp = UINT_MAX; + else + tmp = tmp*2 + tmp + tmp/2; + + ctx->deadlock_inject_interval = tmp; + ctx->deadlock_inject_countdown = tmp; + ctx->contending_lock = lock; + + ww_mutex_unlock(lock); + + return -EDEADLK; + } +#endif + + return 0; +} + +#ifdef CONFIG_PREEMPT_RT_FULL +int __sched +__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + return ret; } +EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); + +int __sched +__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) +{ + int ret; + + might_sleep(); + + mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); + ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); + if (ret) + mutex_release(&lock->base.dep_map, 1, _RET_IP_); + else if (!ret && ww_ctx->acquired > 1) + return ww_mutex_deadlock_injection(lock, ww_ctx); + + return ret; +} +EXPORT_SYMBOL_GPL(__ww_mutex_lock); + +void __sched ww_mutex_unlock(struct ww_mutex *lock) +{ + int nest = !!lock->ctx; + + /* + * The unlocking fastpath is the 0->1 transition from 'locked' + * into 'unlocked' state: + */ + if (nest) { +#ifdef CONFIG_DEBUG_MUTEXES + DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); +#endif + if (lock->ctx->acquired > 0) + lock->ctx->acquired--; + lock->ctx = NULL; + } + + mutex_release(&lock->base.dep_map, nest, _RET_IP_); + rt_mutex_unlock(&lock->base.lock); +} +EXPORT_SYMBOL(ww_mutex_unlock); +#endif diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h index c4060584c407..6607802efa8b 100644 --- a/kernel/locking/rtmutex.h +++ b/kernel/locking/rtmutex.h @@ -11,8 +11,6 @@ */ #define rt_mutex_deadlock_check(l) (0) -#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) -#define rt_mutex_deadlock_account_unlock(l) do { } while (0) #define debug_rt_mutex_init_waiter(w) do { } while (0) #define debug_rt_mutex_free_waiter(w) do { } while (0) #define debug_rt_mutex_lock(l) do { } while (0) diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index e317e1cbb3eb..64d89d780059 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -27,12 +27,14 @@ struct rt_mutex_waiter { struct rb_node pi_tree_entry; struct task_struct *task; struct rt_mutex *lock; + bool savestate; #ifdef CONFIG_DEBUG_RT_MUTEXES unsigned long ip; struct pid *deadlock_task_pid; struct rt_mutex *deadlock_lock; #endif int prio; + u64 deadline; }; /* @@ -98,21 +100,45 @@ enum rtmutex_chainwalk { /* * PI-futex support (proxy locking functions, etc.): */ +#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) +#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, struct task_struct *proxy_owner); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock, struct task_struct *proxy_owner); +extern void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savetate); +extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, + struct task_struct *task); extern int rt_mutex_start_proxy_lock(struct rt_mutex *lock, struct rt_mutex_waiter *waiter, struct task_struct *task); -extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *to, - struct rt_mutex_waiter *waiter); -extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); -extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, - struct wake_q_head *wqh); -extern void rt_mutex_adjust_prio(struct task_struct *task); +extern int rt_mutex_wait_proxy_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *to, + struct rt_mutex_waiter *waiter); +extern bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter); + +extern int rt_mutex_futex_trylock(struct rt_mutex *l); + +extern void rt_mutex_futex_unlock(struct rt_mutex *lock); +extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, + struct wake_q_head *wqh, + struct wake_q_head *wq_sleeper); + +extern void rt_mutex_postunlock(struct wake_q_head *wake_q, + struct wake_q_head *wq_sleeper); + +/* RW semaphore special interface */ +struct ww_acquire_ctx; + +int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, + struct ww_acquire_ctx *ww_ctx, + struct rt_mutex_waiter *waiter); #ifdef CONFIG_DEBUG_RT_MUTEXES # include "rtmutex-debug.h" diff --git a/kernel/locking/rwsem-rt.c b/kernel/locking/rwsem-rt.c new file mode 100644 index 000000000000..4a708ffcded6 --- /dev/null +++ b/kernel/locking/rwsem-rt.c @@ -0,0 +1,268 @@ +/* + */ +#include <linux/rwsem.h> +#include <linux/sched.h> +#include <linux/export.h> + +#include "rtmutex_common.h" + +/* + * RT-specific reader/writer semaphores + * + * down_write() + * 1) Lock sem->rtmutex + * 2) Remove the reader BIAS to force readers into the slow path + * 3) Wait until all readers have left the critical region + * 4) Mark it write locked + * + * up_write() + * 1) Remove the write locked marker + * 2) Set the reader BIAS so readers can use the fast path again + * 3) Unlock sem->rtmutex to release blocked readers + * + * down_read() + * 1) Try fast path acquisition (reader BIAS is set) + * 2) Take sem->rtmutex.wait_lock which protects the writelocked flag + * 3) If !writelocked, acquire it for read + * 4) If writelocked, block on sem->rtmutex + * 5) unlock sem->rtmutex, goto 1) + * + * up_read() + * 1) Try fast path release (reader count != 1) + * 2) Wake the writer waiting in down_write()#3 + * + * down_read()#3 has the consequence, that rw semaphores on RT are not writer + * fair, but writers, which should be avoided in RT tasks (think mmap_sem), + * are subject to the rtmutex priority/DL inheritance mechanism. + * + * It's possible to make the rw semaphores writer fair by keeping a list of + * active readers. A blocked writer would force all newly incoming readers to + * block on the rtmutex, but the rtmutex would have to be proxy locked for one + * reader after the other. We can't use multi-reader inheritance because there + * is no way to support that with SCHED_DEADLINE. Implementing the one by one + * reader boosting/handover mechanism is a major surgery for a very dubious + * value. + * + * The risk of writer starvation is there, but the pathological use cases + * which trigger it are not necessarily the typical RT workloads. + */ + +void __rwsem_init(struct rw_semaphore *sem, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_LOCK_ALLOC + /* + * Make sure we are not reinitializing a held semaphore: + */ + debug_check_no_locks_freed((void *)sem, sizeof(*sem)); + lockdep_init_map(&sem->dep_map, name, key, 0); +#endif + atomic_set(&sem->readers, READER_BIAS); +} +EXPORT_SYMBOL(__rwsem_init); + +int __down_read_trylock(struct rw_semaphore *sem) +{ + int r, old; + + /* + * Increment reader count, if sem->readers < 0, i.e. READER_BIAS is + * set. + */ + for (r = atomic_read(&sem->readers); r < 0;) { + old = atomic_cmpxchg(&sem->readers, r, r + 1); + if (likely(old == r)) + return 1; + r = old; + } + return 0; +} + +void __sched __down_read(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + struct rt_mutex_waiter waiter; + + if (__down_read_trylock(sem)) + return; + + might_sleep(); + raw_spin_lock_irq(&m->wait_lock); + /* + * Allow readers as long as the writer has not completely + * acquired the semaphore for write. + */ + if (atomic_read(&sem->readers) != WRITER_BIAS) { + atomic_inc(&sem->readers); + raw_spin_unlock_irq(&m->wait_lock); + return; + } + + /* + * Call into the slow lock path with the rtmutex->wait_lock + * held, so this can't result in the following race: + * + * Reader1 Reader2 Writer + * down_read() + * down_write() + * rtmutex_lock(m) + * swait() + * down_read() + * unlock(m->wait_lock) + * up_read() + * swake() + * lock(m->wait_lock) + * sem->writelocked=true + * unlock(m->wait_lock) + * + * up_write() + * sem->writelocked=false + * rtmutex_unlock(m) + * down_read() + * down_write() + * rtmutex_lock(m) + * swait() + * rtmutex_lock(m) + * + * That would put Reader1 behind the writer waiting on + * Reader2 to call up_read() which might be unbound. + */ + rt_mutex_init_waiter(&waiter, false); + rt_mutex_slowlock_locked(m, TASK_UNINTERRUPTIBLE, NULL, + RT_MUTEX_MIN_CHAINWALK, NULL, + &waiter); + /* + * The slowlock() above is guaranteed to return with the rtmutex is + * now held, so there can't be a writer active. Increment the reader + * count and immediately drop the rtmutex again. + */ + atomic_inc(&sem->readers); + raw_spin_unlock_irq(&m->wait_lock); + rt_mutex_unlock(m); + + debug_rt_mutex_free_waiter(&waiter); +} + +void __up_read(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + struct task_struct *tsk; + + /* + * sem->readers can only hit 0 when a writer is waiting for the + * active readers to leave the critical region. + */ + if (!atomic_dec_and_test(&sem->readers)) + return; + + might_sleep(); + raw_spin_lock_irq(&m->wait_lock); + /* + * Wake the writer, i.e. the rtmutex owner. It might release the + * rtmutex concurrently in the fast path (due to a signal), but to + * clean up the rwsem it needs to acquire m->wait_lock. The worst + * case which can happen is a spurious wakeup. + */ + tsk = rt_mutex_owner(m); + if (tsk) + wake_up_process(tsk); + + raw_spin_unlock_irq(&m->wait_lock); +} + +static void __up_write_unlock(struct rw_semaphore *sem, int bias, + unsigned long flags) +{ + struct rt_mutex *m = &sem->rtmutex; + + atomic_add(READER_BIAS - bias, &sem->readers); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + rt_mutex_unlock(m); +} + +static int __sched __down_write_common(struct rw_semaphore *sem, int state) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + /* Take the rtmutex as a first step */ + if (rt_mutex_lock_state(m, state)) + return -EINTR; + + /* Force readers into slow path */ + atomic_sub(READER_BIAS, &sem->readers); + might_sleep(); + + set_current_state(state); + for (;;) { + raw_spin_lock_irqsave(&m->wait_lock, flags); + /* Have all readers left the critical region? */ + if (!atomic_read(&sem->readers)) { + atomic_set(&sem->readers, WRITER_BIAS); + __set_current_state(TASK_RUNNING); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return 0; + } + + if (signal_pending_state(state, current)) { + __set_current_state(TASK_RUNNING); + __up_write_unlock(sem, 0, flags); + return -EINTR; + } + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + + if (atomic_read(&sem->readers) != 0) { + schedule(); + set_current_state(state); + } + } +} + +void __sched __down_write(struct rw_semaphore *sem) +{ + __down_write_common(sem, TASK_UNINTERRUPTIBLE); +} + +int __sched __down_write_killable(struct rw_semaphore *sem) +{ + return __down_write_common(sem, TASK_KILLABLE); +} + +int __down_write_trylock(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + if (!rt_mutex_trylock(m)) + return 0; + + atomic_sub(READER_BIAS, &sem->readers); + + raw_spin_lock_irqsave(&m->wait_lock, flags); + if (!atomic_read(&sem->readers)) { + atomic_set(&sem->readers, WRITER_BIAS); + raw_spin_unlock_irqrestore(&m->wait_lock, flags); + return 1; + } + __up_write_unlock(sem, 0, flags); + return 0; +} + +void __up_write(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + __up_write_unlock(sem, WRITER_BIAS, flags); +} + +void __downgrade_write(struct rw_semaphore *sem) +{ + struct rt_mutex *m = &sem->rtmutex; + unsigned long flags; + + raw_spin_lock_irqsave(&m->wait_lock, flags); + /* Release it and account current as reader */ + __up_write_unlock(sem, WRITER_BIAS - 1, flags); +} diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index db3ccb1dd614..909779647bd1 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(locktype##_t *lock) \ * __[spin|read|write]_lock_bh() */ BUILD_LOCK_OPS(spin, raw_spinlock); + +#ifndef CONFIG_PREEMPT_RT_FULL BUILD_LOCK_OPS(read, rwlock); BUILD_LOCK_OPS(write, rwlock); +#endif #endif @@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_spinlock_t *lock) EXPORT_SYMBOL(_raw_spin_unlock_bh); #endif +#ifndef CONFIG_PREEMPT_RT_FULL + #ifndef CONFIG_INLINE_READ_TRYLOCK int __lockfunc _raw_read_trylock(rwlock_t *lock) { @@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwlock_t *lock) EXPORT_SYMBOL(_raw_write_unlock_bh); #endif +#endif /* !PREEMPT_RT_FULL */ + #ifdef CONFIG_DEBUG_LOCK_ALLOC void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) diff --git a/kernel/locking/spinlock_debug.c b/kernel/locking/spinlock_debug.c index 9aa0fccd5d43..76d0b40d9193 100644 --- a/kernel/locking/spinlock_debug.c +++ b/kernel/locking/spinlock_debug.c @@ -31,6 +31,7 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name, EXPORT_SYMBOL(__raw_spin_lock_init); +#ifndef CONFIG_PREEMPT_RT_FULL void __rwlock_init(rwlock_t *lock, const char *name, struct lock_class_key *key) { @@ -48,6 +49,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, } EXPORT_SYMBOL(__rwlock_init); +#endif static void spin_dump(raw_spinlock_t *lock, const char *msg) { @@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *lock) arch_spin_unlock(&lock->raw_lock); } +#ifndef CONFIG_PREEMPT_RT_FULL static void rwlock_bug(rwlock_t *lock, const char *msg) { if (!debug_locks_off()) @@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock) debug_write_unlock(lock); arch_write_unlock(&lock->raw_lock); } + +#endif diff --git a/kernel/module.c b/kernel/module.c index 0e54d5bf0097..f27764fbfa24 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -660,16 +660,7 @@ static void percpu_modcopy(struct module *mod, memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); } -/** - * is_module_percpu_address - test whether address is from module static percpu - * @addr: address to test - * - * Test whether @addr belongs to module static percpu area. - * - * RETURNS: - * %true if @addr is from module static percpu area - */ -bool is_module_percpu_address(unsigned long addr) +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) { struct module *mod; unsigned int cpu; @@ -683,9 +674,15 @@ bool is_module_percpu_address(unsigned long addr) continue; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(mod->percpu, cpu); - - if ((void *)addr >= start && - (void *)addr < start + mod->percpu_size) { + void *va = (void *)addr; + + if (va >= start && va < start + mod->percpu_size) { + if (can_addr) { + *can_addr = (unsigned long) (va - start); + *can_addr += (unsigned long) + per_cpu_ptr(mod->percpu, + get_boot_cpu_id()); + } preempt_enable(); return true; } @@ -696,6 +693,20 @@ bool is_module_percpu_address(unsigned long addr) return false; } +/** + * is_module_percpu_address - test whether address is from module static percpu + * @addr: address to test + * + * Test whether @addr belongs to module static percpu area. + * + * RETURNS: + * %true if @addr is from module static percpu area + */ +bool is_module_percpu_address(unsigned long addr) +{ + return __is_module_percpu_address(addr, NULL); +} + #else /* ... !CONFIG_SMP */ static inline void __percpu *mod_percpu(struct module *mod) @@ -727,6 +738,11 @@ bool is_module_percpu_address(unsigned long addr) return false; } +bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr) +{ + return false; +} + #endif /* CONFIG_SMP */ #define MODINFO_ATTR(field) \ diff --git a/kernel/panic.c b/kernel/panic.c index dbec387099b1..b67a4803ff2b 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -482,9 +482,11 @@ static u64 oops_id; static int init_oops_id(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (!oops_id) get_random_bytes(&oops_id, sizeof(oops_id)); else +#endif oops_id++; return 0; diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index b26dbc48c75b..968255f27a33 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c @@ -286,6 +286,8 @@ static int create_image(int platform_mode) local_irq_disable(); + system_state = SYSTEM_SUSPEND; + error = syscore_suspend(); if (error) { printk(KERN_ERR "PM: Some system devices failed to power down, " @@ -317,6 +319,7 @@ static int create_image(int platform_mode) syscore_resume(); Enable_irqs: + system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -446,6 +449,7 @@ static int resume_target_kernel(bool platform_mode) goto Enable_cpus; local_irq_disable(); + system_state = SYSTEM_SUSPEND; error = syscore_suspend(); if (error) @@ -479,6 +483,7 @@ static int resume_target_kernel(bool platform_mode) syscore_resume(); Enable_irqs: + system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -564,6 +569,7 @@ int hibernation_platform_enter(void) goto Enable_cpus; local_irq_disable(); + system_state = SYSTEM_SUSPEND; syscore_suspend(); if (pm_wakeup_pending()) { error = -EAGAIN; @@ -576,6 +582,7 @@ int hibernation_platform_enter(void) Power_up: syscore_resume(); + system_state = SYSTEM_RUNNING; local_irq_enable(); Enable_cpus: @@ -676,6 +683,10 @@ static int load_image_and_restore(void) return error; } +#ifndef CONFIG_SUSPEND +bool pm_in_action; +#endif + /** * hibernate - Carry out system hibernation, including saving the image. */ @@ -689,6 +700,8 @@ int hibernate(void) return -EPERM; } + pm_in_action = true; + lock_system_sleep(); /* The snapshot device should not be opened while we're running */ if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { @@ -766,6 +779,7 @@ int hibernate(void) atomic_inc(&snapshot_device_available); Unlock: unlock_system_sleep(); + pm_in_action = false; return error; } diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 6ccb08f57fcb..c8cbb5ed2fe3 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c @@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) arch_suspend_disable_irqs(); BUG_ON(!irqs_disabled()); + system_state = SYSTEM_SUSPEND; + error = syscore_suspend(); if (!error) { *wakeup = pm_wakeup_pending(); @@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t state, bool *wakeup) syscore_resume(); } + system_state = SYSTEM_RUNNING; + arch_suspend_enable_irqs(); BUG_ON(irqs_disabled()); @@ -527,6 +531,8 @@ static int enter_state(suspend_state_t state) return error; } +bool pm_in_action; + /** * pm_suspend - Externally visible function for suspending the system. * @state: System sleep state to enter. @@ -541,6 +547,8 @@ int pm_suspend(suspend_state_t state) if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) return -EINVAL; + pm_in_action = true; + error = enter_state(state); if (error) { suspend_stats.fail++; @@ -548,6 +556,7 @@ int pm_suspend(suspend_state_t state) } else { suspend_stats.success++; } + pm_in_action = false; return error; } EXPORT_SYMBOL(pm_suspend); diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c index 9c5b231684d0..cf15bdb6855b 100644 --- a/kernel/printk/printk.c +++ b/kernel/printk/printk.c @@ -351,6 +351,65 @@ __packed __aligned(4) */ DEFINE_RAW_SPINLOCK(logbuf_lock); +#ifdef CONFIG_EARLY_PRINTK +struct console *early_console; + +static void early_vprintk(const char *fmt, va_list ap) +{ + if (early_console) { + char buf[512]; + int n = vscnprintf(buf, sizeof(buf), fmt, ap); + + early_console->write(early_console, buf, n); + } +} + +asmlinkage void early_printk(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + early_vprintk(fmt, ap); + va_end(ap); +} + +/* + * This is independent of any log levels - a global + * kill switch that turns off all of printk. + * + * Used by the NMI watchdog if early-printk is enabled. + */ +static bool __read_mostly printk_killswitch; + +static int __init force_early_printk_setup(char *str) +{ + printk_killswitch = true; + return 0; +} +early_param("force_early_printk", force_early_printk_setup); + +void printk_kill(void) +{ + printk_killswitch = true; +} + +#ifdef CONFIG_PRINTK +static int forced_early_printk(const char *fmt, va_list ap) +{ + if (!printk_killswitch) + return 0; + early_vprintk(fmt, ap); + return 1; +} +#endif + +#else +static inline int forced_early_printk(const char *fmt, va_list ap) +{ + return 0; +} +#endif + #ifdef CONFIG_PRINTK DECLARE_WAIT_QUEUE_HEAD(log_wait); /* the next printk record to read by syslog(READ) or /proc/kmsg */ @@ -1337,6 +1396,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) { char *text; int len = 0; + int attempts = 0; text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); if (!text) @@ -1348,6 +1408,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) u64 seq; u32 idx; enum log_flags prev; + int num_msg; +try_again: + attempts++; + if (attempts > 10) { + len = -EBUSY; + goto out; + } + num_msg = 0; /* * Find first record that fits, including all following records, @@ -1363,6 +1431,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) prev = msg->flags; idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + raw_spin_unlock_irq(&logbuf_lock); + raw_spin_lock_irq(&logbuf_lock); + if (clear_seq < log_first_seq) + goto try_again; + } } /* move first record forward until length fits into the buffer */ @@ -1376,6 +1452,14 @@ static int syslog_print_all(char __user *buf, int size, bool clear) prev = msg->flags; idx = log_next(idx); seq++; + num_msg++; + if (num_msg > 5) { + num_msg = 0; + raw_spin_unlock_irq(&logbuf_lock); + raw_spin_lock_irq(&logbuf_lock); + if (clear_seq < log_first_seq) + goto try_again; + } } /* last message fitting into this dump */ @@ -1416,6 +1500,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear) clear_seq = log_next_seq; clear_idx = log_next_idx; } +out: raw_spin_unlock_irq(&logbuf_lock); kfree(text); @@ -1569,6 +1654,12 @@ static void call_console_drivers(int level, if (!console_drivers) return; + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { + if (in_irq() || in_nmi()) + return; + } + + migrate_disable(); for_each_console(con) { if (exclusive_console && con != exclusive_console) continue; @@ -1584,6 +1675,7 @@ static void call_console_drivers(int level, else con->write(con, text, len); } + migrate_enable(); } /* @@ -1781,6 +1873,13 @@ asmlinkage int vprintk_emit(int facility, int level, /* cpu currently holding logbuf_lock in this function */ static unsigned int logbuf_cpu = UINT_MAX; + /* + * Fall back to early_printk if a debugging subsystem has + * killed printk output + */ + if (unlikely(forced_early_printk(fmt, args))) + return 1; + if (level == LOGLEVEL_SCHED) { level = LOGLEVEL_DEFAULT; in_sched = true; @@ -1885,13 +1984,23 @@ asmlinkage int vprintk_emit(int facility, int level, /* If called from the scheduler, we can not call up(). */ if (!in_sched) { + int may_trylock = 1; + lockdep_off(); +#ifdef CONFIG_PREEMPT_RT_FULL + /* + * we can't take a sleeping lock with IRQs or preeption disabled + * so we can't print in these contexts + */ + if (!(preempt_count() == 0 && !irqs_disabled())) + may_trylock = 0; +#endif /* * Try to acquire and then immediately release the console * semaphore. The release will print out buffers and wake up * /dev/kmsg and syslog() users. */ - if (console_trylock()) + if (may_trylock && console_trylock()) console_unlock(); lockdep_on(); } @@ -2014,26 +2123,6 @@ DEFINE_PER_CPU(printk_func_t, printk_func); #endif /* CONFIG_PRINTK */ -#ifdef CONFIG_EARLY_PRINTK -struct console *early_console; - -asmlinkage __visible void early_printk(const char *fmt, ...) -{ - va_list ap; - char buf[512]; - int n; - - if (!early_console) - return; - - va_start(ap, fmt); - n = vscnprintf(buf, sizeof(buf), fmt, ap); - va_end(ap); - - early_console->write(early_console, buf, n); -} -#endif - static int __add_preferred_console(char *name, int idx, char *options, char *brl_options) { @@ -2303,11 +2392,16 @@ static void console_cont_flush(char *text, size_t size) goto out; len = cont_print_text(text, size); +#ifdef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + call_console_drivers(cont.level, NULL, 0, text, len); +#else raw_spin_unlock(&logbuf_lock); stop_critical_timings(); call_console_drivers(cont.level, NULL, 0, text, len); start_critical_timings(); local_irq_restore(flags); +#endif return; out: raw_spin_unlock_irqrestore(&logbuf_lock, flags); @@ -2431,13 +2525,17 @@ skip: console_idx = log_next(console_idx); console_seq++; console_prev = msg->flags; +#ifdef CONFIG_PREEMPT_RT_FULL + raw_spin_unlock_irqrestore(&logbuf_lock, flags); + call_console_drivers(level, ext_text, ext_len, text, len); +#else raw_spin_unlock(&logbuf_lock); stop_critical_timings(); /* don't trace print latency */ call_console_drivers(level, ext_text, ext_len, text, len); start_critical_timings(); local_irq_restore(flags); - +#endif if (do_cond_resched) cond_resched(); } @@ -2489,6 +2587,11 @@ void console_unblank(void) { struct console *c; + if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { + if (in_irq() || in_nmi()) + return; + } + /* * console_unblank can no longer be called in interrupt context unless * oops_in_progress is set to 1.. diff --git a/kernel/ptrace.c b/kernel/ptrace.c index f39a7be98fc1..583ce3aad891 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c @@ -172,7 +172,14 @@ static bool ptrace_freeze_traced(struct task_struct *task) spin_lock_irq(&task->sighand->siglock); if (task_is_traced(task) && !__fatal_signal_pending(task)) { - task->state = __TASK_TRACED; + unsigned long flags; + + raw_spin_lock_irqsave(&task->pi_lock, flags); + if (task->state & __TASK_TRACED) + task->state = __TASK_TRACED; + else + task->saved_state = __TASK_TRACED; + raw_spin_unlock_irqrestore(&task->pi_lock, flags); ret = true; } spin_unlock_irq(&task->sighand->siglock); diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c index bf08fee53dc7..eeb8ce4ad7b6 100644 --- a/kernel/rcu/rcutorture.c +++ b/kernel/rcu/rcutorture.c @@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops = { .name = "rcu" }; +#ifndef CONFIG_PREEMPT_RT_FULL /* * Definitions for rcu_bh torture testing. */ @@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops = { .name = "rcu_bh" }; +#else +static struct rcu_torture_ops rcu_bh_ops = { + .ttype = INVALID_RCU_FLAVOR, +}; +#endif + /* * Don't even think about trying any of these in real life!!! * The names includes "busted", and they really means it! diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 10f62c6f48e7..dbee19478f09 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -55,6 +55,11 @@ #include <linux/random.h> #include <linux/trace_events.h> #include <linux/suspend.h> +#include <linux/delay.h> +#include <linux/gfp.h> +#include <linux/oom.h> +#include <linux/smpboot.h> +#include "../time/tick-internal.h" #include "tree.h" #include "rcu.h" @@ -260,6 +265,19 @@ void rcu_sched_qs(void) this_cpu_ptr(&rcu_sched_data), true); } +#ifdef CONFIG_PREEMPT_RT_FULL +static void rcu_preempt_qs(void); + +void rcu_bh_qs(void) +{ + unsigned long flags; + + /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ + local_irq_save(flags); + rcu_preempt_qs(); + local_irq_restore(flags); +} +#else void rcu_bh_qs(void) { if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { @@ -269,6 +287,7 @@ void rcu_bh_qs(void) __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); } } +#endif static DEFINE_PER_CPU(int, rcu_sched_qs_mask); @@ -449,11 +468,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sched); /* * Return the number of RCU BH batches started thus far for debug & stats. */ +#ifndef CONFIG_PREEMPT_RT_FULL unsigned long rcu_batches_started_bh(void) { return rcu_bh_state.gpnum; } EXPORT_SYMBOL_GPL(rcu_batches_started_bh); +#endif /* * Return the number of RCU batches completed thus far for debug & stats. @@ -473,6 +494,7 @@ unsigned long rcu_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Return the number of RCU BH batches completed thus far for debug & stats. */ @@ -481,6 +503,7 @@ unsigned long rcu_batches_completed_bh(void) return rcu_bh_state.completed; } EXPORT_SYMBOL_GPL(rcu_batches_completed_bh); +#endif /* * Return the number of RCU expedited batches completed thus far for @@ -504,6 +527,7 @@ unsigned long rcu_exp_batches_completed_sched(void) } EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Force a quiescent state. */ @@ -522,6 +546,13 @@ void rcu_bh_force_quiescent_state(void) } EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); +#else +void rcu_force_quiescent_state(void) +{ +} +EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); +#endif + /* * Force a quiescent state for RCU-sched. */ @@ -572,9 +603,11 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, case RCU_FLAVOR: rsp = rcu_state_p; break; +#ifndef CONFIG_PREEMPT_RT_FULL case RCU_BH_FLAVOR: rsp = &rcu_bh_state; break; +#endif case RCU_SCHED_FLAVOR: rsp = &rcu_sched_state; break; @@ -3016,18 +3049,17 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ -static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) +static __latent_entropy void rcu_process_callbacks(void) { struct rcu_state *rsp; if (cpu_is_offline(smp_processor_id())) return; - trace_rcu_utilization(TPS("Start RCU core")); for_each_rcu_flavor(rsp) __rcu_process_callbacks(rsp); - trace_rcu_utilization(TPS("End RCU core")); } +static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); /* * Schedule RCU callback invocation. If the specified type of RCU * does not support RCU priority boosting, just do a direct call, @@ -3039,20 +3071,107 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) { if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) return; - if (likely(!rsp->boost)) { - rcu_do_batch(rsp, rdp); - return; - } - invoke_rcu_callbacks_kthread(); + rcu_do_batch(rsp, rdp); +} + +static void rcu_wake_cond(struct task_struct *t, int status) +{ + /* + * If the thread is yielding, only wake it when this + * is invoked from idle + */ + if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) + wake_up_process(t); } +/* + * Wake up this CPU's rcuc kthread to do RCU core processing. + */ static void invoke_rcu_core(void) { - if (cpu_online(smp_processor_id())) - raise_softirq(RCU_SOFTIRQ); + unsigned long flags; + struct task_struct *t; + + if (!cpu_online(smp_processor_id())) + return; + local_irq_save(flags); + __this_cpu_write(rcu_cpu_has_work, 1); + t = __this_cpu_read(rcu_cpu_kthread_task); + if (t != NULL && current != t) + rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); + local_irq_restore(flags); +} + +static void rcu_cpu_kthread_park(unsigned int cpu) +{ + per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +} + +static int rcu_cpu_kthread_should_run(unsigned int cpu) +{ + return __this_cpu_read(rcu_cpu_has_work); } /* + * Per-CPU kernel thread that invokes RCU callbacks. This replaces the + * RCU softirq used in flavors and configurations of RCU that do not + * support RCU priority boosting. + */ +static void rcu_cpu_kthread(unsigned int cpu) +{ + unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); + char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); + int spincnt; + + for (spincnt = 0; spincnt < 10; spincnt++) { + trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); + local_bh_disable(); + *statusp = RCU_KTHREAD_RUNNING; + this_cpu_inc(rcu_cpu_kthread_loops); + local_irq_disable(); + work = *workp; + *workp = 0; + local_irq_enable(); + if (work) + rcu_process_callbacks(); + local_bh_enable(); + if (*workp == 0) { + trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); + *statusp = RCU_KTHREAD_WAITING; + return; + } + } + *statusp = RCU_KTHREAD_YIELDING; + trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); + schedule_timeout_interruptible(2); + trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); + *statusp = RCU_KTHREAD_WAITING; +} + +static struct smp_hotplug_thread rcu_cpu_thread_spec = { + .store = &rcu_cpu_kthread_task, + .thread_should_run = rcu_cpu_kthread_should_run, + .thread_fn = rcu_cpu_kthread, + .thread_comm = "rcuc/%u", + .setup = rcu_cpu_kthread_setup, + .park = rcu_cpu_kthread_park, +}; + +/* + * Spawn per-CPU RCU core processing kthreads. + */ +static int __init rcu_spawn_core_kthreads(void) +{ + int cpu; + + for_each_possible_cpu(cpu) + per_cpu(rcu_cpu_has_work, cpu) = 0; + BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + return 0; +} +early_initcall(rcu_spawn_core_kthreads); + +/* * Handle any core-RCU processing required by a call_rcu() invocation. */ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, @@ -3195,6 +3314,7 @@ void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) } EXPORT_SYMBOL_GPL(call_rcu_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /* * Queue an RCU callback for invocation after a quicker grace period. */ @@ -3203,6 +3323,7 @@ void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) __call_rcu(head, func, &rcu_bh_state, -1, 0); } EXPORT_SYMBOL_GPL(call_rcu_bh); +#endif /* * Queue an RCU callback for lazy invocation after a grace period. @@ -3294,6 +3415,7 @@ void synchronize_sched(void) } EXPORT_SYMBOL_GPL(synchronize_sched); +#ifndef CONFIG_PREEMPT_RT_FULL /** * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. * @@ -3320,6 +3442,7 @@ void synchronize_rcu_bh(void) wait_rcu_gp(call_rcu_bh); } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); +#endif /** * get_state_synchronize_rcu - Snapshot current RCU state @@ -3698,6 +3821,7 @@ static void _rcu_barrier(struct rcu_state *rsp) mutex_unlock(&rsp->barrier_mutex); } +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. */ @@ -3706,6 +3830,7 @@ void rcu_barrier_bh(void) _rcu_barrier(&rcu_bh_state); } EXPORT_SYMBOL_GPL(rcu_barrier_bh); +#endif /** * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. @@ -4227,12 +4352,13 @@ void __init rcu_init(void) rcu_bootup_announce(); rcu_init_geometry(); +#ifndef CONFIG_PREEMPT_RT_FULL rcu_init_one(&rcu_bh_state); +#endif rcu_init_one(&rcu_sched_state); if (dump_tree) rcu_dump_rcu_node_tree(&rcu_sched_state); __rcu_init_preempt(); - open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); /* * We don't need protection against CPU-hotplug here because diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h index e99a5234d9ed..958ac107062c 100644 --- a/kernel/rcu/tree.h +++ b/kernel/rcu/tree.h @@ -588,18 +588,18 @@ extern struct list_head rcu_struct_flavors; */ extern struct rcu_state rcu_sched_state; +#ifndef CONFIG_PREEMPT_RT_FULL extern struct rcu_state rcu_bh_state; +#endif #ifdef CONFIG_PREEMPT_RCU extern struct rcu_state rcu_preempt_state; #endif /* #ifdef CONFIG_PREEMPT_RCU */ -#ifdef CONFIG_RCU_BOOST DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); DECLARE_PER_CPU(char, rcu_cpu_has_work); -#endif /* #ifdef CONFIG_RCU_BOOST */ #ifndef RCU_TREE_NONCORE @@ -619,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu_callback_t func); static void __init __rcu_init_preempt(void); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); -static void invoke_rcu_callbacks_kthread(void); static bool rcu_is_callbacks_kthread(void); +static void rcu_cpu_kthread_setup(unsigned int cpu); #ifdef CONFIG_RCU_BOOST -static void rcu_preempt_do_callbacks(void); static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, struct rcu_node *rnp); #endif /* #ifdef CONFIG_RCU_BOOST */ diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h index e3944c4b072d..be12d1aac840 100644 --- a/kernel/rcu/tree_plugin.h +++ b/kernel/rcu/tree_plugin.h @@ -24,25 +24,10 @@ * Paul E. McKenney <paulmck@linux.vnet.ibm.com> */ -#include <linux/delay.h> -#include <linux/gfp.h> -#include <linux/oom.h> -#include <linux/smpboot.h> -#include "../time/tick-internal.h" - #ifdef CONFIG_RCU_BOOST #include "../locking/rtmutex_common.h" -/* - * Control variables for per-CPU and per-rcu_node kthreads. These - * handle all flavors of RCU. - */ -static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); -DEFINE_PER_CPU(char, rcu_cpu_has_work); - #else /* #ifdef CONFIG_RCU_BOOST */ /* @@ -55,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work); #endif /* #else #ifdef CONFIG_RCU_BOOST */ +/* + * Control variables for per-CPU and per-rcu_node kthreads. These + * handle all flavors of RCU. + */ +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); +DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); +DEFINE_PER_CPU(char, rcu_cpu_has_work); + #ifdef CONFIG_RCU_NOCB_CPU static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ @@ -426,7 +419,7 @@ void rcu_read_unlock_special(struct task_struct *t) } /* Hardware IRQ handlers cannot block, complain if they get here. */ - if (in_irq() || in_serving_softirq()) { + if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { lockdep_rcu_suspicious(__FILE__, __LINE__, "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", @@ -632,15 +625,6 @@ static void rcu_preempt_check_callbacks(void) t->rcu_read_unlock_special.b.need_qs = true; } -#ifdef CONFIG_RCU_BOOST - -static void rcu_preempt_do_callbacks(void) -{ - rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); -} - -#endif /* #ifdef CONFIG_RCU_BOOST */ - /* * Queue a preemptible-RCU callback for invocation after a grace period. */ @@ -829,6 +813,19 @@ void exit_rcu(void) #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ +/* + * If boosting, set rcuc kthreads to realtime priority. + */ +static void rcu_cpu_kthread_setup(unsigned int cpu) +{ +#ifdef CONFIG_RCU_BOOST + struct sched_param sp; + + sp.sched_priority = kthread_prio; + sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +#endif /* #ifdef CONFIG_RCU_BOOST */ +} + #ifdef CONFIG_RCU_BOOST #include "../locking/rtmutex_common.h" @@ -860,16 +857,6 @@ static void rcu_initiate_boost_trace(struct rcu_node *rnp) #endif /* #else #ifdef CONFIG_RCU_TRACE */ -static void rcu_wake_cond(struct task_struct *t, int status) -{ - /* - * If the thread is yielding, only wake it when this - * is invoked from idle - */ - if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) - wake_up_process(t); -} - /* * Carry out RCU priority boosting on the task indicated by ->exp_tasks * or ->boost_tasks, advancing the pointer to the next task in the @@ -1013,23 +1000,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) } /* - * Wake up the per-CPU kthread to invoke RCU callbacks. - */ -static void invoke_rcu_callbacks_kthread(void) -{ - unsigned long flags; - - local_irq_save(flags); - __this_cpu_write(rcu_cpu_has_work, 1); - if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && - current != __this_cpu_read(rcu_cpu_kthread_task)) { - rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), - __this_cpu_read(rcu_cpu_kthread_status)); - } - local_irq_restore(flags); -} - -/* * Is the current CPU running the RCU-callbacks kthread? * Caller must have preemption disabled. */ @@ -1083,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, return 0; } -static void rcu_kthread_do_work(void) -{ - rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); - rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); - rcu_preempt_do_callbacks(); -} - -static void rcu_cpu_kthread_setup(unsigned int cpu) -{ - struct sched_param sp; - - sp.sched_priority = kthread_prio; - sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -} - -static void rcu_cpu_kthread_park(unsigned int cpu) -{ - per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -} - -static int rcu_cpu_kthread_should_run(unsigned int cpu) -{ - return __this_cpu_read(rcu_cpu_has_work); -} - -/* - * Per-CPU kernel thread that invokes RCU callbacks. This replaces the - * RCU softirq used in flavors and configurations of RCU that do not - * support RCU priority boosting. - */ -static void rcu_cpu_kthread(unsigned int cpu) -{ - unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); - char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); - int spincnt; - - for (spincnt = 0; spincnt < 10; spincnt++) { - trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); - local_bh_disable(); - *statusp = RCU_KTHREAD_RUNNING; - this_cpu_inc(rcu_cpu_kthread_loops); - local_irq_disable(); - work = *workp; - *workp = 0; - local_irq_enable(); - if (work) - rcu_kthread_do_work(); - local_bh_enable(); - if (*workp == 0) { - trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); - *statusp = RCU_KTHREAD_WAITING; - return; - } - } - *statusp = RCU_KTHREAD_YIELDING; - trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); - schedule_timeout_interruptible(2); - trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); - *statusp = RCU_KTHREAD_WAITING; -} - /* * Set the per-rcu_node kthread's affinity to cover all CPUs that are * served by the rcu_node in question. The CPU hotplug lock is still @@ -1174,26 +1083,12 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu) free_cpumask_var(cm); } -static struct smp_hotplug_thread rcu_cpu_thread_spec = { - .store = &rcu_cpu_kthread_task, - .thread_should_run = rcu_cpu_kthread_should_run, - .thread_fn = rcu_cpu_kthread, - .thread_comm = "rcuc/%u", - .setup = rcu_cpu_kthread_setup, - .park = rcu_cpu_kthread_park, -}; - /* * Spawn boost kthreads -- called as soon as the scheduler is running. */ static void __init rcu_spawn_boost_kthreads(void) { struct rcu_node *rnp; - int cpu; - - for_each_possible_cpu(cpu) - per_cpu(rcu_cpu_has_work, cpu) = 0; - BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); rcu_for_each_leaf_node(rcu_state_p, rnp) (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); } @@ -1216,11 +1111,6 @@ static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -static void invoke_rcu_callbacks_kthread(void) -{ - WARN_ON_ONCE(1); -} - static bool rcu_is_callbacks_kthread(void) { return false; @@ -1244,7 +1134,7 @@ static void rcu_prepare_kthreads(int cpu) #endif /* #else #ifdef CONFIG_RCU_BOOST */ -#if !defined(CONFIG_RCU_FAST_NO_HZ) +#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) /* * Check to see if any future RCU-related work will need to be done @@ -1261,7 +1151,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ? 0 : rcu_cpu_has_callbacks(NULL); } +#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ +#if !defined(CONFIG_RCU_FAST_NO_HZ) /* * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up * after it. @@ -1357,6 +1249,8 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void) return cbs_ready; } +#ifndef CONFIG_PREEMPT_RT_FULL + /* * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready * to invoke. If the CPU has callbacks, try to advance them. Tell the @@ -1402,6 +1296,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nextevt) *nextevt = basemono + dj * TICK_NSEC; return 0; } +#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ /* * Prepare a CPU for idle from an RCU perspective. The first major task diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c index 4f6db7e6a117..ee02e1e1b3e5 100644 --- a/kernel/rcu/update.c +++ b/kernel/rcu/update.c @@ -62,7 +62,7 @@ #ifndef CONFIG_TINY_RCU module_param(rcu_expedited, int, 0); module_param(rcu_normal, int, 0); -static int rcu_normal_after_boot; +static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); module_param(rcu_normal_after_boot, int, 0); #endif /* #ifndef CONFIG_TINY_RCU */ @@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void) } EXPORT_SYMBOL_GPL(rcu_gp_is_normal); -static atomic_t rcu_expedited_nesting = - ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0); +static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1); /* * Should normal grace-period primitives be expedited? Intended for @@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp); */ void rcu_end_inkernel_boot(void) { - if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT)) - rcu_unexpedite_gp(); + rcu_unexpedite_gp(); if (rcu_normal_after_boot) WRITE_ONCE(rcu_normal, 1); } @@ -298,6 +296,7 @@ int rcu_read_lock_held(void) } EXPORT_SYMBOL_GPL(rcu_read_lock_held); +#ifndef CONFIG_PREEMPT_RT_FULL /** * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? * @@ -324,6 +323,7 @@ int rcu_read_lock_bh_held(void) return in_softirq() || irqs_disabled(); } EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); +#endif #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile index 5e59b832ae2b..7337a7f60e3f 100644 --- a/kernel/sched/Makefile +++ b/kernel/sched/Makefile @@ -17,7 +17,7 @@ endif obj-y += core.o loadavg.o clock.o cputime.o obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o -obj-y += wait.o swait.o completion.o idle.o +obj-y += wait.o swait.o swork.o completion.o idle.o obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o obj-$(CONFIG_SCHEDSTATS) += stats.o diff --git a/kernel/sched/completion.c b/kernel/sched/completion.c index 8d0f35debf35..b62cf6400fe0 100644 --- a/kernel/sched/completion.c +++ b/kernel/sched/completion.c @@ -30,10 +30,10 @@ void complete(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); x->done++; - __wake_up_locked(&x->wait, TASK_NORMAL, 1); - spin_unlock_irqrestore(&x->wait.lock, flags); + swake_up_locked(&x->wait); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete); @@ -50,10 +50,10 @@ void complete_all(struct completion *x) { unsigned long flags; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); x->done += UINT_MAX/2; - __wake_up_locked(&x->wait, TASK_NORMAL, 0); - spin_unlock_irqrestore(&x->wait.lock, flags); + swake_up_all_locked(&x->wait); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); } EXPORT_SYMBOL(complete_all); @@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x, long (*action)(long), long timeout, int state) { if (!x->done) { - DECLARE_WAITQUEUE(wait, current); + DECLARE_SWAITQUEUE(wait); - __add_wait_queue_tail_exclusive(&x->wait, &wait); + __prepare_to_swait(&x->wait, &wait); do { if (signal_pending_state(state, current)) { timeout = -ERESTARTSYS; break; } __set_current_state(state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); timeout = action(timeout); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); } while (!x->done && timeout); - __remove_wait_queue(&x->wait, &wait); + __finish_swait(&x->wait, &wait); if (!x->done) return timeout; } @@ -89,9 +89,9 @@ __wait_for_common(struct completion *x, { might_sleep(); - spin_lock_irq(&x->wait.lock); + raw_spin_lock_irq(&x->wait.lock); timeout = do_wait_for_common(x, action, timeout, state); - spin_unlock_irq(&x->wait.lock); + raw_spin_unlock_irq(&x->wait.lock); return timeout; } @@ -277,12 +277,12 @@ bool try_wait_for_completion(struct completion *x) if (!READ_ONCE(x->done)) return 0; - spin_lock_irqsave(&x->wait.lock, flags); + raw_spin_lock_irqsave(&x->wait.lock, flags); if (!x->done) ret = 0; else x->done--; - spin_unlock_irqrestore(&x->wait.lock, flags); + raw_spin_unlock_irqrestore(&x->wait.lock, flags); return ret; } EXPORT_SYMBOL(try_wait_for_completion); @@ -311,7 +311,7 @@ bool completion_done(struct completion *x) * after it's acquired the lock. */ smp_rmb(); - spin_unlock_wait(&x->wait.lock); + raw_spin_unlock_wait(&x->wait.lock); return true; } EXPORT_SYMBOL(completion_done); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index d7dda36fbc7b..b6bc4fd2e3d8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_features = * Number of tasks to iterate in a single balance run. * Limited because this is done with IRQs disabled. */ +#ifndef CONFIG_PREEMPT_RT_FULL const_debug unsigned int sysctl_sched_nr_migrate = 32; +#else +const_debug unsigned int sysctl_sched_nr_migrate = 8; +#endif /* * period over which we average the RT time consumption, measured @@ -345,6 +349,7 @@ static void init_rq_hrtick(struct rq *rq) hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); rq->hrtick_timer.function = hrtick; + rq->hrtick_timer.irqsafe = 1; } #else /* CONFIG_SCHED_HRTICK */ static inline void hrtick_clear(struct rq *rq) @@ -425,9 +430,15 @@ static bool set_nr_if_polling(struct task_struct *p) #endif #endif -void wake_q_add(struct wake_q_head *head, struct task_struct *task) +void __wake_q_add(struct wake_q_head *head, struct task_struct *task, + bool sleeper) { - struct wake_q_node *node = &task->wake_q; + struct wake_q_node *node; + + if (sleeper) + node = &task->wake_q_sleeper; + else + node = &task->wake_q; /* * Atomically grab the task, if ->wake_q is !nil already it means @@ -449,24 +460,33 @@ void wake_q_add(struct wake_q_head *head, struct task_struct *task) head->lastp = &node->next; } -void wake_up_q(struct wake_q_head *head) +void __wake_up_q(struct wake_q_head *head, bool sleeper) { struct wake_q_node *node = head->first; while (node != WAKE_Q_TAIL) { struct task_struct *task; - task = container_of(node, struct task_struct, wake_q); + if (sleeper) + task = container_of(node, struct task_struct, wake_q_sleeper); + else + task = container_of(node, struct task_struct, wake_q); BUG_ON(!task); /* task can safely be re-inserted now */ node = node->next; - task->wake_q.next = NULL; + if (sleeper) + task->wake_q_sleeper.next = NULL; + else + task->wake_q.next = NULL; /* * wake_up_process() implies a wmb() to pair with the queueing * in wake_q_add() so as not to miss wakeups. */ - wake_up_process(task); + if (sleeper) + wake_up_lock_sleeper(task); + else + wake_up_process(task); put_task_struct(task); } } @@ -502,6 +522,38 @@ void resched_curr(struct rq *rq) trace_sched_wake_idle_without_ipi(cpu); } +#ifdef CONFIG_PREEMPT_LAZY +void resched_curr_lazy(struct rq *rq) +{ + struct task_struct *curr = rq->curr; + int cpu; + + if (!sched_feat(PREEMPT_LAZY)) { + resched_curr(rq); + return; + } + + lockdep_assert_held(&rq->lock); + + if (test_tsk_need_resched(curr)) + return; + + if (test_tsk_need_resched_lazy(curr)) + return; + + set_tsk_need_resched_lazy(curr); + + cpu = cpu_of(rq); + if (cpu == smp_processor_id()) + return; + + /* NEED_RESCHED_LAZY must be visible before we test polling */ + smp_mb(); + if (!tsk_is_polling(curr)) + smp_send_reschedule(cpu); +} +#endif + void resched_cpu(int cpu) { struct rq *rq = cpu_rq(cpu); @@ -525,11 +577,14 @@ void resched_cpu(int cpu) */ int get_nohz_timer_target(void) { - int i, cpu = smp_processor_id(); + int i, cpu; struct sched_domain *sd; + preempt_disable_rt(); + cpu = smp_processor_id(); + if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) - return cpu; + goto preempt_en_rt; rcu_read_lock(); for_each_domain(cpu, sd) { @@ -548,6 +603,8 @@ int get_nohz_timer_target(void) cpu = housekeeping_any_cpu(); unlock: rcu_read_unlock(); +preempt_en_rt: + preempt_enable_rt(); return cpu; } /* @@ -1093,7 +1150,8 @@ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_ma p->nr_cpus_allowed = cpumask_weight(new_mask); } -void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +static void __do_set_cpus_allowed_tail(struct task_struct *p, + const struct cpumask *new_mask) { struct rq *rq = task_rq(p); bool queued, running; @@ -1122,6 +1180,98 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) set_curr_task(rq, p); } +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) +{ + if (__migrate_disabled(p)) { + lockdep_assert_held(&p->pi_lock); + + cpumask_copy(&p->cpus_allowed, new_mask); +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) + p->migrate_disable_update = 1; +#endif + return; + } + __do_set_cpus_allowed_tail(p, new_mask); +} + +static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); +static DEFINE_MUTEX(sched_down_mutex); +static cpumask_t sched_down_cpumask; + +void tell_sched_cpu_down_begin(int cpu) +{ + mutex_lock(&sched_down_mutex); + cpumask_set_cpu(cpu, &sched_down_cpumask); + mutex_unlock(&sched_down_mutex); +} + +void tell_sched_cpu_down_done(int cpu) +{ + mutex_lock(&sched_down_mutex); + cpumask_clear_cpu(cpu, &sched_down_cpumask); + mutex_unlock(&sched_down_mutex); +} + +/** + * migrate_me - try to move the current task off this cpu + * + * Used by the pin_current_cpu() code to try to get tasks + * to move off the current CPU as it is going down. + * It will only move the task if the task isn't pinned to + * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) + * and the task has to be in a RUNNING state. Otherwise the + * movement of the task will wake it up (change its state + * to running) when the task did not expect it. + * + * Returns 1 if it succeeded in moving the current task + * 0 otherwise. + */ +int migrate_me(void) +{ + struct task_struct *p = current; + struct migration_arg arg; + struct cpumask *cpumask; + struct cpumask *mask; + unsigned int dest_cpu; + struct rq_flags rf; + struct rq *rq; + + /* + * We can not migrate tasks bounded to a CPU or tasks not + * running. The movement of the task will wake it up. + */ + if (p->flags & PF_NO_SETAFFINITY || p->state) + return 0; + + mutex_lock(&sched_down_mutex); + rq = task_rq_lock(p, &rf); + + cpumask = this_cpu_ptr(&sched_cpumasks); + mask = &p->cpus_allowed; + + cpumask_andnot(cpumask, mask, &sched_down_cpumask); + + if (!cpumask_weight(cpumask)) { + /* It's only on this CPU? */ + task_rq_unlock(rq, p, &rf); + mutex_unlock(&sched_down_mutex); + return 0; + } + + dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); + + arg.task = p; + arg.dest_cpu = dest_cpu; + + task_rq_unlock(rq, p, &rf); + + stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); + tlb_migrate_finish(p->mm); + mutex_unlock(&sched_down_mutex); + + return 1; +} + /* * Change a given task's CPU affinity. Migrate the thread to a * proper CPU and schedule it away if the CPU it's executing on @@ -1179,7 +1329,7 @@ static int __set_cpus_allowed_ptr(struct task_struct *p, } /* Can the task run on the task's current CPU? If so, we're done */ - if (cpumask_test_cpu(task_cpu(p), new_mask)) + if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) goto out; dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask); @@ -1366,6 +1516,18 @@ out: return ret; } +static bool check_task_state(struct task_struct *p, long match_state) +{ + bool match = false; + + raw_spin_lock_irq(&p->pi_lock); + if (p->state == match_state || p->saved_state == match_state) + match = true; + raw_spin_unlock_irq(&p->pi_lock); + + return match; +} + /* * wait_task_inactive - wait for a thread to unschedule. * @@ -1410,7 +1572,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * is actually now running somewhere else! */ while (task_running(rq, p)) { - if (match_state && unlikely(p->state != match_state)) + if (match_state && !check_task_state(p, match_state)) return 0; cpu_relax(); } @@ -1425,7 +1587,8 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) running = task_running(rq, p); queued = task_on_rq_queued(p); ncsw = 0; - if (!match_state || p->state == match_state) + if (!match_state || p->state == match_state || + p->saved_state == match_state) ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ task_rq_unlock(rq, p, &rf); @@ -1680,10 +1843,6 @@ static inline void ttwu_activate(struct rq *rq, struct task_struct *p, int en_fl { activate_task(rq, p, en_flags); p->on_rq = TASK_ON_RQ_QUEUED; - - /* if a worker is waking up, notify workqueue */ - if (p->flags & PF_WQ_WORKER) - wq_worker_waking_up(p, cpu_of(rq)); } /* @@ -2018,8 +2177,27 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) */ smp_mb__before_spinlock(); raw_spin_lock_irqsave(&p->pi_lock, flags); - if (!(p->state & state)) + if (!(p->state & state)) { + /* + * The task might be running due to a spinlock sleeper + * wakeup. Check the saved state and set it to running + * if the wakeup condition is true. + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) { + if (p->saved_state & state) { + p->saved_state = TASK_RUNNING; + success = 1; + } + } goto out; + } + + /* + * If this is a regular wakeup, then we can unconditionally + * clear the saved state of a "lock sleeper". + */ + if (!(wake_flags & WF_LOCK_SLEEPER)) + p->saved_state = TASK_RUNNING; trace_sched_waking(p); @@ -2102,53 +2280,6 @@ out: } /** - * try_to_wake_up_local - try to wake up a local task with rq lock held - * @p: the thread to be awakened - * @cookie: context's cookie for pinning - * - * Put @p on the run-queue if it's not already there. The caller must - * ensure that this_rq() is locked, @p is bound to this_rq() and not - * the current task. - */ -static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie) -{ - struct rq *rq = task_rq(p); - - if (WARN_ON_ONCE(rq != this_rq()) || - WARN_ON_ONCE(p == current)) - return; - - lockdep_assert_held(&rq->lock); - - if (!raw_spin_trylock(&p->pi_lock)) { - /* - * This is OK, because current is on_cpu, which avoids it being - * picked for load-balance and preemption/IRQs are still - * disabled avoiding further scheduler activity on it and we've - * not yet picked a replacement task. - */ - lockdep_unpin_lock(&rq->lock, cookie); - raw_spin_unlock(&rq->lock); - raw_spin_lock(&p->pi_lock); - raw_spin_lock(&rq->lock); - lockdep_repin_lock(&rq->lock, cookie); - } - - if (!(p->state & TASK_NORMAL)) - goto out; - - trace_sched_waking(p); - - if (!task_on_rq_queued(p)) - ttwu_activate(rq, p, ENQUEUE_WAKEUP); - - ttwu_do_wakeup(rq, p, 0, cookie); - ttwu_stat(p, smp_processor_id(), 0); -out: - raw_spin_unlock(&p->pi_lock); -} - -/** * wake_up_process - Wake up a specific process * @p: The process to be woken up. * @@ -2166,6 +2297,18 @@ int wake_up_process(struct task_struct *p) } EXPORT_SYMBOL(wake_up_process); +/** + * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" + * @p: The process to be woken up. + * + * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate + * the nature of the wakeup. + */ +int wake_up_lock_sleeper(struct task_struct *p) +{ + return try_to_wake_up(p, TASK_UNINTERRUPTIBLE, WF_LOCK_SLEEPER); +} + int wake_up_state(struct task_struct *p, unsigned int state) { return try_to_wake_up(p, state, 0); @@ -2442,6 +2585,9 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->on_cpu = 0; #endif init_task_preempt_count(p); +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(p)->preempt_lazy_count = 0; +#endif #ifdef CONFIG_SMP plist_node_init(&p->pushable_tasks, MAX_PRIO); RB_CLEAR_NODE(&p->pushable_dl_tasks); @@ -2770,21 +2916,16 @@ static struct rq *finish_task_switch(struct task_struct *prev) finish_arch_post_lock_switch(); fire_sched_in_preempt_notifiers(current); + /* + * We use mmdrop_delayed() here so we don't have to do the + * full __mmdrop() when we are the last user. + */ if (mm) - mmdrop(mm); + mmdrop_delayed(mm); if (unlikely(prev_state == TASK_DEAD)) { if (prev->sched_class->task_dead) prev->sched_class->task_dead(prev); - /* - * Remove function-return probe instances associated with this - * task and put them back on the free list. - */ - kprobe_flush_task(prev); - - /* Task is done with its stack. */ - put_task_stack(prev); - put_task_struct(prev); } @@ -3252,6 +3393,114 @@ static inline void schedule_debug(struct task_struct *prev) schedstat_inc(this_rq()->sched_count); } +#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) + +void migrate_disable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic++; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + if (unlikely(p->migrate_disable_atomic)) { + tracing_off(); + WARN_ON_ONCE(1); + } +#endif + + if (p->migrate_disable) { + p->migrate_disable++; + return; + } + + preempt_disable(); + preempt_lazy_disable(); + pin_current_cpu(); + p->migrate_disable = 1; + preempt_enable(); +} +EXPORT_SYMBOL(migrate_disable); + +void migrate_enable(void) +{ + struct task_struct *p = current; + + if (in_atomic() || irqs_disabled()) { +#ifdef CONFIG_SCHED_DEBUG + p->migrate_disable_atomic--; +#endif + return; + } + +#ifdef CONFIG_SCHED_DEBUG + if (unlikely(p->migrate_disable_atomic)) { + tracing_off(); + WARN_ON_ONCE(1); + } +#endif + WARN_ON_ONCE(p->migrate_disable <= 0); + + if (p->migrate_disable > 1) { + p->migrate_disable--; + return; + } + + preempt_disable(); + /* + * Clearing migrate_disable causes tsk_cpus_allowed to + * show the tasks original cpu affinity. + */ + p->migrate_disable = 0; + + if (p->migrate_disable_update) { + struct rq *rq; + struct rq_flags rf; + + rq = task_rq_lock(p, &rf); + update_rq_clock(rq); + + __do_set_cpus_allowed_tail(p, &p->cpus_allowed); + task_rq_unlock(rq, p, &rf); + + p->migrate_disable_update = 0; + + WARN_ON(smp_processor_id() != task_cpu(p)); + if (!cpumask_test_cpu(task_cpu(p), &p->cpus_allowed)) { + const struct cpumask *cpu_valid_mask = cpu_active_mask; + struct migration_arg arg; + unsigned int dest_cpu; + + if (p->flags & PF_KTHREAD) { + /* + * Kernel threads are allowed on online && !active CPUs + */ + cpu_valid_mask = cpu_online_mask; + } + dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_allowed); + arg.task = p; + arg.dest_cpu = dest_cpu; + + unpin_current_cpu(); + preempt_lazy_enable(); + preempt_enable(); + stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg); + tlb_migrate_finish(p->mm); + return; + } + } + + unpin_current_cpu(); + preempt_enable(); + preempt_lazy_enable(); +} +EXPORT_SYMBOL(migrate_enable); +#endif + /* * Pick up the highest-prio task: */ @@ -3368,19 +3617,6 @@ static void __sched notrace __schedule(bool preempt) } else { deactivate_task(rq, prev, DEQUEUE_SLEEP); prev->on_rq = 0; - - /* - * If a worker went to sleep, notify and ask workqueue - * whether it wants to wake up a task to maintain - * concurrency. - */ - if (prev->flags & PF_WQ_WORKER) { - struct task_struct *to_wakeup; - - to_wakeup = wq_worker_sleeping(prev); - if (to_wakeup) - try_to_wake_up_local(to_wakeup, cookie); - } } switch_count = &prev->nvcsw; } @@ -3390,6 +3626,7 @@ static void __sched notrace __schedule(bool preempt) next = pick_next_task(rq, prev, cookie); clear_tsk_need_resched(prev); + clear_tsk_need_resched_lazy(prev); clear_preempt_need_resched(); rq->clock_skip_update = 0; @@ -3437,9 +3674,20 @@ void __noreturn do_task_dead(void) static inline void sched_submit_work(struct task_struct *tsk) { - if (!tsk->state || tsk_is_pi_blocked(tsk)) + if (!tsk->state) return; /* + * If a worker went to sleep, notify and ask workqueue whether + * it wants to wake up a task to maintain concurrency. + */ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_sleeping(tsk); + + + if (tsk_is_pi_blocked(tsk)) + return; + + /* * If we are going to sleep and we have plugged IO queued, * make sure to submit it to avoid deadlocks. */ @@ -3447,6 +3695,12 @@ static inline void sched_submit_work(struct task_struct *tsk) blk_schedule_flush_plug(tsk); } +static void sched_update_worker(struct task_struct *tsk) +{ + if (tsk->flags & PF_WQ_WORKER) + wq_worker_running(tsk); +} + asmlinkage __visible void __sched schedule(void) { struct task_struct *tsk = current; @@ -3457,6 +3711,7 @@ asmlinkage __visible void __sched schedule(void) __schedule(false); sched_preempt_enable_no_resched(); } while (need_resched()); + sched_update_worker(tsk); } EXPORT_SYMBOL(schedule); @@ -3520,6 +3775,30 @@ static void __sched notrace preempt_schedule_common(void) } while (need_resched()); } +#ifdef CONFIG_PREEMPT_LAZY +/* + * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is + * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as + * preempt_lazy_count counter >0. + */ +static __always_inline int preemptible_lazy(void) +{ + if (test_thread_flag(TIF_NEED_RESCHED)) + return 1; + if (current_thread_info()->preempt_lazy_count) + return 0; + return 1; +} + +#else + +static inline int preemptible_lazy(void) +{ + return 1; +} + +#endif + #ifdef CONFIG_PREEMPT /* * this is the entry point to schedule() from in-kernel preemption @@ -3534,7 +3813,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void) */ if (likely(!preemptible())) return; - + if (!preemptible_lazy()) + return; preempt_schedule_common(); } NOKPROBE_SYMBOL(preempt_schedule); @@ -3561,6 +3841,9 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) if (likely(!preemptible())) return; + if (!preemptible_lazy()) + return; + do { /* * Because the function tracer can trace preempt_count_sub() @@ -3583,7 +3866,16 @@ asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) * an infinite recursion. */ prev_ctx = exception_enter(); + /* + * The add/subtract must not be traced by the function + * tracer. But we still want to account for the + * preempt off latency tracer. Since the _notrace versions + * of add/subtract skip the accounting for latency tracer + * we must force it manually. + */ + start_critical_timings(); __schedule(true); + stop_critical_timings(); exception_exit(prev_ctx); preempt_latency_stop(1); @@ -3629,10 +3921,25 @@ EXPORT_SYMBOL(default_wake_function); #ifdef CONFIG_RT_MUTEXES +static inline int __rt_effective_prio(struct task_struct *pi_task, int prio) +{ + if (pi_task) + prio = min(prio, pi_task->prio); + + return prio; +} + +static inline int rt_effective_prio(struct task_struct *p, int prio) +{ + struct task_struct *pi_task = rt_mutex_get_top_task(p); + + return __rt_effective_prio(pi_task, prio); +} + /* * rt_mutex_setprio - set the current priority of a task - * @p: task - * @prio: prio value (kernel-internal form) + * @p: task to boost + * @pi_task: donor task * * This function changes the 'effective' priority of a task. It does * not touch ->normal_prio like __setscheduler(). @@ -3640,16 +3947,40 @@ EXPORT_SYMBOL(default_wake_function); * Used by the rt_mutex code to implement priority inheritance * logic. Call site only calls if the priority of the task changed. */ -void rt_mutex_setprio(struct task_struct *p, int prio) +void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task) { - int oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; + int prio, oldprio, queued, running, queue_flag = DEQUEUE_SAVE | DEQUEUE_MOVE; const struct sched_class *prev_class; struct rq_flags rf; struct rq *rq; - BUG_ON(prio > MAX_PRIO); + /* XXX used to be waiter->prio, not waiter->task->prio */ + prio = __rt_effective_prio(pi_task, p->normal_prio); + + /* + * If nothing changed; bail early. + */ + if (p->pi_top_task == pi_task && prio == p->prio && !dl_prio(prio)) + return; rq = __task_rq_lock(p, &rf); + /* + * Set under pi_lock && rq->lock, such that the value can be used under + * either lock. + * + * Note that there is loads of tricky to make this pointer cache work + * right. rt_mutex_slowunlock()+rt_mutex_postunlock() work together to + * ensure a task is de-boosted (pi_task is set to NULL) before the + * task is allowed to run again (and can exit). This ensures the pointer + * points to a blocked task -- which guaratees the task is present. + */ + p->pi_top_task = pi_task; + + /* + * For FIFO/RR we only need to set prio, if that matches we're done. + */ + if (prio == p->prio && !dl_prio(prio)) + goto out_unlock; /* * Idle task boosting is a nono in general. There is one @@ -3669,7 +4000,7 @@ void rt_mutex_setprio(struct task_struct *p, int prio) goto out_unlock; } - trace_sched_pi_setprio(p, prio); + trace_sched_pi_setprio(p, pi_task); oldprio = p->prio; if (oldprio == prio) @@ -3693,7 +4024,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio) * running task */ if (dl_prio(prio)) { - struct task_struct *pi_task = rt_mutex_get_top_task(p); if (!dl_prio(p->normal_prio) || (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) { p->dl.dl_boosted = 1; @@ -3730,6 +4060,11 @@ out_unlock: balance_callback(rq); preempt_enable(); } +#else +static inline int rt_effective_prio(struct task_struct *p, int prio) +{ + return prio; +} #endif void set_user_nice(struct task_struct *p, long nice) @@ -3974,10 +4309,9 @@ static void __setscheduler(struct rq *rq, struct task_struct *p, * Keep a potential priority boosting if called from * sched_setscheduler(). */ + p->prio = normal_prio(p); if (keep_boost) - p->prio = rt_mutex_get_effective_prio(p, normal_prio(p)); - else - p->prio = normal_prio(p); + p->prio = rt_effective_prio(p, p->prio); if (dl_prio(p->prio)) p->sched_class = &dl_sched_class; @@ -4264,7 +4598,7 @@ change: * the runqueue. This will be done when the task deboost * itself. */ - new_effective_prio = rt_mutex_get_effective_prio(p, newprio); + new_effective_prio = rt_effective_prio(p, newprio); if (new_effective_prio == oldprio) queue_flags &= ~DEQUEUE_MOVE; } @@ -4939,6 +5273,7 @@ int __cond_resched_lock(spinlock_t *lock) } EXPORT_SYMBOL(__cond_resched_lock); +#ifndef CONFIG_PREEMPT_RT_FULL int __sched __cond_resched_softirq(void) { BUG_ON(!in_softirq()); @@ -4952,6 +5287,7 @@ int __sched __cond_resched_softirq(void) return 0; } EXPORT_SYMBOL(__cond_resched_softirq); +#endif /** * yield - yield the current processor to other threads. @@ -5315,7 +5651,9 @@ void init_idle(struct task_struct *idle, int cpu) /* Set the preempt count _outside_ the spinlocks! */ init_idle_preempt_count(idle, cpu); - +#ifdef CONFIG_HAVE_PREEMPT_LAZY + task_thread_info(idle)->preempt_lazy_count = 0; +#endif /* * The idle tasks have their own, simple scheduling class: */ @@ -5458,6 +5796,8 @@ void sched_setnuma(struct task_struct *p, int nid) #endif /* CONFIG_NUMA_BALANCING */ #ifdef CONFIG_HOTPLUG_CPU +static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); + /* * Ensures that the idle task is using init_mm right before its cpu goes * offline. @@ -5472,7 +5812,12 @@ void idle_task_exit(void) switch_mm(mm, &init_mm, current); finish_arch_post_lock_switch(); } - mmdrop(mm); + /* + * Defer the cleanup to an alive cpu. On RT we can neither + * call mmdrop() nor mmdrop_delayed() from here. + */ + per_cpu(idle_last_mm, smp_processor_id()) = mm; + } /* @@ -7433,6 +7778,10 @@ int sched_cpu_dying(unsigned int cpu) update_max_interval(); nohz_balance_exit_idle(cpu); hrtick_clear(rq); + if (per_cpu(idle_last_mm, cpu)) { + mmdrop_delayed(per_cpu(idle_last_mm, cpu)); + per_cpu(idle_last_mm, cpu) = NULL; + } return 0; } #endif @@ -7694,7 +8043,7 @@ void __init sched_init(void) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP static inline int preempt_count_equals(int preempt_offset) { - int nested = preempt_count() + rcu_preempt_depth(); + int nested = preempt_count() + sched_rcu_preempt_depth(); return (nested == preempt_offset); } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index c95c5122b105..e00accf92a4b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); timer->function = dl_task_timer; + timer->irqsafe = 1; } static diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index fa178b62ea79..935224123441 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -558,6 +558,9 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) P(rt_throttled); PN(rt_time); PN(rt_runtime); +#ifdef CONFIG_SMP + P(rt_nr_migratory); +#endif #undef PN #undef P @@ -953,6 +956,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) #endif P(policy); P(prio); +#ifdef CONFIG_PREEMPT_RT_FULL + P(migrate_disable); +#endif + P(nr_cpus_allowed); #undef PN_SCHEDSTAT #undef PN #undef __PN diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7a68c631d5b5..f9189909640b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; if (delta_exec > ideal_runtime) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); /* * The current task ran long enough, ensure it doesn't get * re-elected due to buddy favours. @@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) return; if (delta > ideal_runtime) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static void @@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) * validating it and just reschedule. */ if (queued) { - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); return; } /* @@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) * hierarchy can be throttled */ if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) - resched_curr(rq_of(cfs_rq)); + resched_curr_lazy(rq_of(cfs_rq)); } static __always_inline @@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq *rq, struct task_struct *p) if (delta < 0) { if (rq->curr == p) - resched_curr(rq); + resched_curr_lazy(rq); return; } hrtick_start(rq, delta); @@ -5862,7 +5862,7 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ return; preempt: - resched_curr(rq); + resched_curr_lazy(rq); /* * Only set the backward buddy when the current task is still * on the rq. This can happen when a wakeup gets interleaved @@ -8588,7 +8588,7 @@ static void task_fork_fair(struct task_struct *p) * 'current' within the tree based on its new key value. */ swap(curr->vruntime, se->vruntime); - resched_curr(rq); + resched_curr_lazy(rq); } se->vruntime -= cfs_rq->min_vruntime; @@ -8612,7 +8612,7 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio) */ if (rq->curr == p) { if (p->prio > oldprio) - resched_curr(rq); + resched_curr_lazy(rq); } else check_preempt_curr(rq, p, 0); } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 69631fa46c2f..6d28fcd08872 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -45,11 +45,19 @@ SCHED_FEAT(LB_BIAS, true) */ SCHED_FEAT(NONTASK_CAPACITY, true) +#ifdef CONFIG_PREEMPT_RT_FULL +SCHED_FEAT(TTWU_QUEUE, false) +# ifdef CONFIG_PREEMPT_LAZY +SCHED_FEAT(PREEMPT_LAZY, true) +# endif +#else + /* * Queue remote wakeups on the target CPU and process them * using the scheduler IPI. Reduces rq->lock contention/bounces. */ SCHED_FEAT(TTWU_QUEUE, true) +#endif #ifdef HAVE_RT_PUSH_IPI /* diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index f139f22ce30d..b0691f4e7d49 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime) hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rt_b->rt_period_timer.irqsafe = 1; rt_b->rt_period_timer.function = sched_rt_period_timer; } @@ -101,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq) rt_rq->push_cpu = nr_cpu_ids; raw_spin_lock_init(&rt_rq->push_lock); init_irq_work(&rt_rq->push_work, push_irq_work_func); + rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ; #endif #endif /* CONFIG_SMP */ /* We start is dequeued state, because no RT tasks are queued */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ad77d666583c..16df04073b57 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1152,6 +1152,7 @@ static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ #define WF_FORK 0x02 /* child wakeup after fork */ #define WF_MIGRATED 0x4 /* internal use, task got migrated */ +#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ /* * To aid in avoiding the subversion of "niceness" due to uneven distribution @@ -1335,6 +1336,15 @@ extern void init_sched_fair_class(void); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); +#ifdef CONFIG_PREEMPT_LAZY +extern void resched_curr_lazy(struct rq *rq); +#else +static inline void resched_curr_lazy(struct rq *rq) +{ + resched_curr(rq); +} +#endif + extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); diff --git a/kernel/sched/swait.c b/kernel/sched/swait.c index 82f0dff90030..ef027ff3250a 100644 --- a/kernel/sched/swait.c +++ b/kernel/sched/swait.c @@ -1,5 +1,6 @@ #include <linux/sched.h> #include <linux/swait.h> +#include <linux/suspend.h> void __init_swait_queue_head(struct swait_queue_head *q, const char *name, struct lock_class_key *key) @@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_head *q) } EXPORT_SYMBOL(swake_up_locked); +void swake_up_all_locked(struct swait_queue_head *q) +{ + struct swait_queue *curr; + int wakes = 0; + + while (!list_empty(&q->task_list)) { + + curr = list_first_entry(&q->task_list, typeof(*curr), + task_list); + wake_up_process(curr->task); + list_del_init(&curr->task_list); + wakes++; + } + if (pm_in_action) + return; + WARN(wakes > 2, "complete_all() with %d waiters\n", wakes); +} +EXPORT_SYMBOL(swake_up_all_locked); + void swake_up(struct swait_queue_head *q) { unsigned long flags; @@ -54,6 +74,7 @@ void swake_up_all(struct swait_queue_head *q) if (!swait_active(q)) return; + WARN_ON(irqs_disabled()); raw_spin_lock_irq(&q->lock); list_splice_init(&q->task_list, &tmp); while (!list_empty(&tmp)) { diff --git a/kernel/sched/swork.c b/kernel/sched/swork.c new file mode 100644 index 000000000000..1950f40ca725 --- /dev/null +++ b/kernel/sched/swork.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de + * + * Provides a framework for enqueuing callbacks from irq context + * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. + */ + +#include <linux/swait.h> +#include <linux/swork.h> +#include <linux/kthread.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/export.h> + +#define SWORK_EVENT_PENDING (1 << 0) + +static DEFINE_MUTEX(worker_mutex); +static struct sworker *glob_worker; + +struct sworker { + struct list_head events; + struct swait_queue_head wq; + + raw_spinlock_t lock; + + struct task_struct *task; + int refs; +}; + +static bool swork_readable(struct sworker *worker) +{ + bool r; + + if (kthread_should_stop()) + return true; + + raw_spin_lock_irq(&worker->lock); + r = !list_empty(&worker->events); + raw_spin_unlock_irq(&worker->lock); + + return r; +} + +static int swork_kthread(void *arg) +{ + struct sworker *worker = arg; + + for (;;) { + swait_event_interruptible(worker->wq, + swork_readable(worker)); + if (kthread_should_stop()) + break; + + raw_spin_lock_irq(&worker->lock); + while (!list_empty(&worker->events)) { + struct swork_event *sev; + + sev = list_first_entry(&worker->events, + struct swork_event, item); + list_del(&sev->item); + raw_spin_unlock_irq(&worker->lock); + + WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, + &sev->flags)); + sev->func(sev); + raw_spin_lock_irq(&worker->lock); + } + raw_spin_unlock_irq(&worker->lock); + } + return 0; +} + +static struct sworker *swork_create(void) +{ + struct sworker *worker; + + worker = kzalloc(sizeof(*worker), GFP_KERNEL); + if (!worker) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&worker->events); + raw_spin_lock_init(&worker->lock); + init_swait_queue_head(&worker->wq); + + worker->task = kthread_run(swork_kthread, worker, "kswork"); + if (IS_ERR(worker->task)) { + kfree(worker); + return ERR_PTR(-ENOMEM); + } + + return worker; +} + +static void swork_destroy(struct sworker *worker) +{ + kthread_stop(worker->task); + + WARN_ON(!list_empty(&worker->events)); + kfree(worker); +} + +/** + * swork_queue - queue swork + * + * Returns %false if @work was already on a queue, %true otherwise. + * + * The work is queued and processed on a random CPU + */ +bool swork_queue(struct swork_event *sev) +{ + unsigned long flags; + + if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) + return false; + + raw_spin_lock_irqsave(&glob_worker->lock, flags); + list_add_tail(&sev->item, &glob_worker->events); + raw_spin_unlock_irqrestore(&glob_worker->lock, flags); + + swake_up(&glob_worker->wq); + return true; +} +EXPORT_SYMBOL_GPL(swork_queue); + +/** + * swork_get - get an instance of the sworker + * + * Returns an negative error code if the initialization if the worker did not + * work, %0 otherwise. + * + */ +int swork_get(void) +{ + struct sworker *worker; + + mutex_lock(&worker_mutex); + if (!glob_worker) { + worker = swork_create(); + if (IS_ERR(worker)) { + mutex_unlock(&worker_mutex); + return -ENOMEM; + } + + glob_worker = worker; + } + + glob_worker->refs++; + mutex_unlock(&worker_mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(swork_get); + +/** + * swork_put - puts an instance of the sworker + * + * Will destroy the sworker thread. This function must not be called until all + * queued events have been completed. + */ +void swork_put(void) +{ + mutex_lock(&worker_mutex); + + glob_worker->refs--; + if (glob_worker->refs > 0) + goto out; + + swork_destroy(glob_worker); + glob_worker = NULL; +out: + mutex_unlock(&worker_mutex); +} +EXPORT_SYMBOL_GPL(swork_put); diff --git a/kernel/signal.c b/kernel/signal.c index e48668c3c972..99918dcd836f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -14,6 +14,7 @@ #include <linux/export.h> #include <linux/init.h> #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/fs.h> #include <linux/tty.h> #include <linux/binfmts.h> @@ -352,13 +353,30 @@ static bool task_participate_group_stop(struct task_struct *task) return false; } +static inline struct sigqueue *get_task_cache(struct task_struct *t) +{ + struct sigqueue *q = t->sigqueue_cache; + + if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) + return NULL; + return q; +} + +static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) +{ + if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) + return 0; + return 1; +} + /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * -__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) +__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit, int fromslab) { struct sigqueue *q = NULL; struct user_struct *user; @@ -375,7 +393,10 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi if (override_rlimit || atomic_read(&user->sigpending) <= task_rlimit(t, RLIMIT_SIGPENDING)) { - q = kmem_cache_alloc(sigqueue_cachep, flags); + if (!fromslab) + q = get_task_cache(t); + if (!q) + q = kmem_cache_alloc(sigqueue_cachep, flags); } else { print_dropped_signal(sig); } @@ -392,6 +413,13 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi return q; } +static struct sigqueue * +__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, + int override_rlimit) +{ + return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); +} + static void __sigqueue_free(struct sigqueue *q) { if (q->flags & SIGQUEUE_PREALLOC) @@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqueue *q) kmem_cache_free(sigqueue_cachep, q); } +static void sigqueue_free_current(struct sigqueue *q) +{ + struct user_struct *up; + + if (q->flags & SIGQUEUE_PREALLOC) + return; + + up = q->user; + if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { + atomic_dec(&up->sigpending); + free_uid(up); + } else + __sigqueue_free(q); +} + void flush_sigqueue(struct sigpending *queue) { struct sigqueue *q; @@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *queue) } /* + * Called from __exit_signal. Flush tsk->pending and + * tsk->sigqueue_cache + */ +void flush_task_sigqueue(struct task_struct *tsk) +{ + struct sigqueue *q; + + flush_sigqueue(&tsk->pending); + + q = get_task_cache(tsk); + if (q) + kmem_cache_free(sigqueue_cachep, q); +} + +/* * Flush all pending signals for this kthread. */ void flush_signals(struct task_struct *t) @@ -532,7 +590,7 @@ still_pending: (info->si_code == SI_TIMER) && (info->si_sys_private); - __sigqueue_free(first); + sigqueue_free_current(first); } else { /* * Ok, it wasn't in the queue. This must be @@ -568,6 +626,8 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) bool resched_timer = false; int signr; + WARN_ON_ONCE(tsk != current); + /* We only dequeue private signals from ourselves, we don't let * signalfd steal them */ @@ -1164,8 +1224,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p, * We don't want to have recursive SIGSEGV's etc, for example, * that is why we also clear SIGNAL_UNKILLABLE. */ -int -force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +static int +do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) { unsigned long int flags; int ret, blocked, ignored; @@ -1190,6 +1250,39 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t) return ret; } +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) +{ +/* + * On some archs, PREEMPT_RT has to delay sending a signal from a trap + * since it can not enable preemption, and the signal code's spin_locks + * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will + * send the signal on exit of the trap. + */ +#ifdef ARCH_RT_DELAYS_SIGNAL_SEND + if (in_atomic()) { + if (WARN_ON_ONCE(t != current)) + return 0; + if (WARN_ON_ONCE(t->forced_info.si_signo)) + return 0; + + if (is_si_special(info)) { + WARN_ON_ONCE(info != SEND_SIG_PRIV); + t->forced_info.si_signo = sig; + t->forced_info.si_errno = 0; + t->forced_info.si_code = SI_KERNEL; + t->forced_info.si_pid = 0; + t->forced_info.si_uid = 0; + } else { + t->forced_info = *info; + } + + set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); + return 0; + } +#endif + return do_force_sig_info(sig, info, t); +} + /* * Nuke all other threads in the group. */ @@ -1224,12 +1317,12 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, * Disable interrupts early to avoid deadlocks. * See rcu_read_unlock() comment header for details. */ - local_irq_save(*flags); + local_irq_save_nort(*flags); rcu_read_lock(); sighand = rcu_dereference(tsk->sighand); if (unlikely(sighand == NULL)) { rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore_nort(*flags); break; } /* @@ -1250,7 +1343,7 @@ struct sighand_struct *__lock_task_sighand(struct task_struct *tsk, } spin_unlock(&sighand->siglock); rcu_read_unlock(); - local_irq_restore(*flags); + local_irq_restore_nort(*flags); } return sighand; @@ -1493,7 +1586,8 @@ EXPORT_SYMBOL(kill_pid); */ struct sigqueue *sigqueue_alloc(void) { - struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); + /* Preallocated sigqueue objects always from the slabcache ! */ + struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); if (q) q->flags |= SIGQUEUE_PREALLOC; @@ -1854,15 +1948,7 @@ static void ptrace_stop(int exit_code, int why, int clear_code, siginfo_t *info) if (gstop_done && ptrace_reparented(current)) do_notify_parent_cldstop(current, false, why); - /* - * Don't want to allow preemption here, because - * sys_ptrace() needs this task to be inactive. - * - * XXX: implement read_unlock_no_resched(). - */ - preempt_disable(); read_unlock(&tasklist_lock); - preempt_enable_no_resched(); freezable_schedule(); } else { /* diff --git a/kernel/softirq.c b/kernel/softirq.c index 744fa611cae0..819bd7cf5ad0 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -21,10 +21,12 @@ #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/rcupdate.h> +#include <linux/delay.h> #include <linux/ftrace.h> #include <linux/smp.h> #include <linux/smpboot.h> #include <linux/tick.h> +#include <linux/locallock.h> #include <linux/irq.h> #define CREATE_TRACE_POINTS @@ -56,12 +58,108 @@ EXPORT_SYMBOL(irq_stat); static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; DEFINE_PER_CPU(struct task_struct *, ksoftirqd); +#ifdef CONFIG_PREEMPT_RT_FULL +#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) +DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); +#endif const char * const softirq_to_name[NR_SOFTIRQS] = { "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "IRQ_POLL", "TASKLET", "SCHED", "HRTIMER", "RCU" }; +#ifdef CONFIG_NO_HZ_COMMON +# ifdef CONFIG_PREEMPT_RT_FULL + +struct softirq_runner { + struct task_struct *runner[NR_SOFTIRQS]; +}; + +static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); + +static inline void softirq_set_runner(unsigned int sirq) +{ + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + + sr->runner[sirq] = current; +} + +static inline void softirq_clr_runner(unsigned int sirq) +{ + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + + sr->runner[sirq] = NULL; +} + +/* + * On preempt-rt a softirq running context might be blocked on a + * lock. There might be no other runnable task on this CPU because the + * lock owner runs on some other CPU. So we have to go into idle with + * the pending bit set. Therefor we need to check this otherwise we + * warn about false positives which confuses users and defeats the + * whole purpose of this test. + * + * This code is called with interrupts disabled. + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); + u32 warnpending; + int i; + + if (rate_limit >= 10) + return; + + warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; + for (i = 0; i < NR_SOFTIRQS; i++) { + struct task_struct *tsk = sr->runner[i]; + + /* + * The wakeup code in rtmutex.c wakes up the task + * _before_ it sets pi_blocked_on to NULL under + * tsk->pi_lock. So we need to check for both: state + * and pi_blocked_on. + */ + if (tsk) { + raw_spin_lock(&tsk->pi_lock); + if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { + /* Clear all bits pending in that task */ + warnpending &= ~(tsk->softirqs_raised); + warnpending &= ~(1 << i); + } + raw_spin_unlock(&tsk->pi_lock); + } + } + + if (warnpending) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + warnpending); + rate_limit++; + } +} +# else +/* + * On !PREEMPT_RT we just printk rate limited: + */ +void softirq_check_pending_idle(void) +{ + static int rate_limit; + + if (rate_limit < 10 && + (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { + printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", + local_softirq_pending()); + rate_limit++; + } +} +# endif + +#else /* !CONFIG_NO_HZ_COMMON */ +static inline void softirq_set_runner(unsigned int sirq) { } +static inline void softirq_clr_runner(unsigned int sirq) { } +#endif + /* * we cannot loop indefinitely here to avoid userspace starvation, * but we also don't want to introduce a worst case 1/HZ latency @@ -77,6 +175,38 @@ static void wakeup_softirqd(void) wake_up_process(tsk); } +#ifdef CONFIG_PREEMPT_RT_FULL +static void wakeup_timer_softirqd(void) +{ + /* Interrupts are disabled: no need to stop preemption */ + struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); + + if (tsk && tsk->state != TASK_RUNNING) + wake_up_process(tsk); +} +#endif + +static void handle_softirq(unsigned int vec_nr) +{ + struct softirq_action *h = softirq_vec + vec_nr; + int prev_count; + + prev_count = preempt_count(); + + kstat_incr_softirqs_this_cpu(vec_nr); + + trace_softirq_entry(vec_nr); + h->action(h); + trace_softirq_exit(vec_nr); + if (unlikely(prev_count != preempt_count())) { + pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", + vec_nr, softirq_to_name[vec_nr], h->action, + prev_count, preempt_count()); + preempt_count_set(prev_count); + } +} + +#ifndef CONFIG_PREEMPT_RT_FULL /* * If ksoftirqd is scheduled, we do not want to process pending softirqs * right now. Let ksoftirqd handle this at its own rate, to get fairness. @@ -88,6 +218,47 @@ static bool ksoftirqd_running(void) return tsk && (tsk->state == TASK_RUNNING); } +static inline int ksoftirqd_softirq_pending(void) +{ + return local_softirq_pending(); +} + +static void handle_pending_softirqs(u32 pending) +{ + struct softirq_action *h = softirq_vec; + int softirq_bit; + + local_irq_enable(); + + h = softirq_vec; + + while ((softirq_bit = ffs(pending))) { + unsigned int vec_nr; + + h += softirq_bit - 1; + vec_nr = h - softirq_vec; + handle_softirq(vec_nr); + + h++; + pending >>= softirq_bit; + } + + rcu_bh_qs(); + local_irq_disable(); +} + +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + if (ksoftirqd_softirq_pending()) { + __do_softirq(); + local_irq_enable(); + cond_resched_rcu_qs(); + return; + } + local_irq_enable(); +} + /* * preempt_count and SOFTIRQ_OFFSET usage: * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving @@ -243,10 +414,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void) unsigned long end = jiffies + MAX_SOFTIRQ_TIME; unsigned long old_flags = current->flags; int max_restart = MAX_SOFTIRQ_RESTART; - struct softirq_action *h; bool in_hardirq; __u32 pending; - int softirq_bit; /* * Mask out PF_MEMALLOC s current task context is borrowed for the @@ -265,36 +434,7 @@ restart: /* Reset the pending bitmask before enabling irqs */ set_softirq_pending(0); - local_irq_enable(); - - h = softirq_vec; - - while ((softirq_bit = ffs(pending))) { - unsigned int vec_nr; - int prev_count; - - h += softirq_bit - 1; - - vec_nr = h - softirq_vec; - prev_count = preempt_count(); - - kstat_incr_softirqs_this_cpu(vec_nr); - - trace_softirq_entry(vec_nr); - h->action(h); - trace_softirq_exit(vec_nr); - if (unlikely(prev_count != preempt_count())) { - pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", - vec_nr, softirq_to_name[vec_nr], h->action, - prev_count, preempt_count()); - preempt_count_set(prev_count); - } - h++; - pending >>= softirq_bit; - } - - rcu_bh_qs(); - local_irq_disable(); + handle_pending_softirqs(pending); pending = local_softirq_pending(); if (pending) { @@ -331,6 +471,309 @@ asmlinkage __visible void do_softirq(void) } /* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + __raise_softirq_irqoff(nr); + + /* + * If we're in an interrupt or softirq, we're done + * (this also catches softirq-disabled code). We will + * actually run the softirq once we return from + * the irq or softirq. + * + * Otherwise we wake up ksoftirqd to make sure we + * schedule the softirq soon. + */ + if (!in_interrupt()) + wakeup_softirqd(); +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + trace_softirq_raise(nr); + or_softirq_pending(1UL << nr); +} + +static inline void local_bh_disable_nort(void) { local_bh_disable(); } +static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } +static void ksoftirqd_set_sched_params(unsigned int cpu) { } + +#else /* !PREEMPT_RT_FULL */ + +/* + * On RT we serialize softirq execution with a cpu local lock per softirq + */ +static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); + +void __init softirq_early_init(void) +{ + int i; + + for (i = 0; i < NR_SOFTIRQS; i++) + local_irq_lock_init(local_softirq_locks[i]); +} + +static void lock_softirq(int which) +{ + local_lock(local_softirq_locks[which]); +} + +static void unlock_softirq(int which) +{ + local_unlock(local_softirq_locks[which]); +} + +static void do_single_softirq(int which) +{ + unsigned long old_flags = current->flags; + + current->flags &= ~PF_MEMALLOC; + vtime_account_irq_enter(current); + current->flags |= PF_IN_SOFTIRQ; + lockdep_softirq_enter(); + local_irq_enable(); + handle_softirq(which); + local_irq_disable(); + lockdep_softirq_exit(); + current->flags &= ~PF_IN_SOFTIRQ; + vtime_account_irq_enter(current); + tsk_restore_flags(current, old_flags, PF_MEMALLOC); +} + +/* + * Called with interrupts disabled. Process softirqs which were raised + * in current context (or on behalf of ksoftirqd). + */ +static void do_current_softirqs(void) +{ + while (current->softirqs_raised) { + int i = __ffs(current->softirqs_raised); + unsigned int pending, mask = (1U << i); + + current->softirqs_raised &= ~mask; + local_irq_enable(); + + /* + * If the lock is contended, we boost the owner to + * process the softirq or leave the critical section + * now. + */ + lock_softirq(i); + local_irq_disable(); + softirq_set_runner(i); + /* + * Check with the local_softirq_pending() bits, + * whether we need to process this still or if someone + * else took care of it. + */ + pending = local_softirq_pending(); + if (pending & mask) { + set_softirq_pending(pending & ~mask); + do_single_softirq(i); + } + softirq_clr_runner(i); + WARN_ON(current->softirq_nestcnt != 1); + local_irq_enable(); + unlock_softirq(i); + local_irq_disable(); + } +} + +void __local_bh_disable(void) +{ + if (++current->softirq_nestcnt == 1) + migrate_disable(); +} +EXPORT_SYMBOL(__local_bh_disable); + +void __local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + + local_irq_disable(); + if (current->softirq_nestcnt == 1 && current->softirqs_raised) + do_current_softirqs(); + local_irq_enable(); + + if (--current->softirq_nestcnt == 0) + migrate_enable(); +} +EXPORT_SYMBOL(__local_bh_enable); + +void _local_bh_enable(void) +{ + if (WARN_ON(current->softirq_nestcnt == 0)) + return; + if (--current->softirq_nestcnt == 0) + migrate_enable(); +} +EXPORT_SYMBOL(_local_bh_enable); + +int in_serving_softirq(void) +{ + return current->flags & PF_IN_SOFTIRQ; +} +EXPORT_SYMBOL(in_serving_softirq); + +/* Called with preemption disabled */ +static void run_ksoftirqd(unsigned int cpu) +{ + local_irq_disable(); + current->softirq_nestcnt++; + + do_current_softirqs(); + current->softirq_nestcnt--; + local_irq_enable(); + cond_resched_rcu_qs(); +} + +/* + * Called from netif_rx_ni(). Preemption enabled, but migration + * disabled. So the cpu can't go away under us. + */ +void thread_do_softirq(void) +{ + if (!in_serving_softirq() && current->softirqs_raised) { + current->softirq_nestcnt++; + do_current_softirqs(); + current->softirq_nestcnt--; + } +} + +static void do_raise_softirq_irqoff(unsigned int nr) +{ + unsigned int mask; + + mask = 1UL << nr; + + trace_softirq_raise(nr); + or_softirq_pending(mask); + + /* + * If we are not in a hard interrupt and inside a bh disabled + * region, we simply raise the flag on current. local_bh_enable() + * will make sure that the softirq is executed. Otherwise we + * delegate it to ksoftirqd. + */ + if (!in_irq() && current->softirq_nestcnt) + current->softirqs_raised |= mask; + else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) + return; + + if (mask & TIMER_SOFTIRQS) + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; + else + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; +} + +static void wakeup_proper_softirq(unsigned int nr) +{ + if ((1UL << nr) & TIMER_SOFTIRQS) + wakeup_timer_softirqd(); + else + wakeup_softirqd(); +} + +void __raise_softirq_irqoff(unsigned int nr) +{ + do_raise_softirq_irqoff(nr); + if (!in_irq() && !current->softirq_nestcnt) + wakeup_proper_softirq(nr); +} + +/* + * Same as __raise_softirq_irqoff() but will process them in ksoftirqd + */ +void __raise_softirq_irqoff_ksoft(unsigned int nr) +{ + unsigned int mask; + + if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || + !__this_cpu_read(ktimer_softirqd))) + return; + mask = 1UL << nr; + + trace_softirq_raise(nr); + or_softirq_pending(mask); + if (mask & TIMER_SOFTIRQS) + __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; + else + __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; + wakeup_proper_softirq(nr); +} + +/* + * This function must run with irqs disabled! + */ +void raise_softirq_irqoff(unsigned int nr) +{ + do_raise_softirq_irqoff(nr); + + /* + * If we're in an hard interrupt we let irq return code deal + * with the wakeup of ksoftirqd. + */ + if (in_irq()) + return; + /* + * If we are in thread context but outside of a bh disabled + * region, we need to wake ksoftirqd as well. + * + * CHECKME: Some of the places which do that could be wrapped + * into local_bh_disable/enable pairs. Though it's unclear + * whether this is worth the effort. To find those places just + * raise a WARN() if the condition is met. + */ + if (!current->softirq_nestcnt) + wakeup_proper_softirq(nr); +} + +static inline int ksoftirqd_softirq_pending(void) +{ + return current->softirqs_raised; +} + +static inline void local_bh_disable_nort(void) { } +static inline void _local_bh_enable_nort(void) { } + +static inline void ksoftirqd_set_sched_params(unsigned int cpu) +{ + /* Take over all but timer pending softirqs when starting */ + local_irq_disable(); + current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; + local_irq_enable(); +} + +static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) +{ + struct sched_param param = { .sched_priority = 1 }; + + sched_setscheduler(current, SCHED_FIFO, ¶m); + + /* Take over timer pending softirqs when starting */ + local_irq_disable(); + current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; + local_irq_enable(); +} + +static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, + bool online) +{ + struct sched_param param = { .sched_priority = 0 }; + + sched_setscheduler(current, SCHED_NORMAL, ¶m); +} + +static int ktimer_softirqd_should_run(unsigned int cpu) +{ + return current->softirqs_raised; +} + +#endif /* PREEMPT_RT_FULL */ +/* * Enter an interrupt context. */ void irq_enter(void) @@ -341,9 +784,9 @@ void irq_enter(void) * Prevent raise_softirq from needlessly waking up ksoftirqd * here, as softirq will be serviced on return from interrupt. */ - local_bh_disable(); + local_bh_disable_nort(); tick_irq_enter(); - _local_bh_enable(); + _local_bh_enable_nort(); } __irq_enter(); @@ -351,6 +794,7 @@ void irq_enter(void) static inline void invoke_softirq(void) { +#ifndef CONFIG_PREEMPT_RT_FULL if (ksoftirqd_running()) return; @@ -373,6 +817,18 @@ static inline void invoke_softirq(void) } else { wakeup_softirqd(); } +#else /* PREEMPT_RT_FULL */ + unsigned long flags; + + local_irq_save(flags); + if (__this_cpu_read(ksoftirqd) && + __this_cpu_read(ksoftirqd)->softirqs_raised) + wakeup_softirqd(); + if (__this_cpu_read(ktimer_softirqd) && + __this_cpu_read(ktimer_softirqd)->softirqs_raised) + wakeup_timer_softirqd(); + local_irq_restore(flags); +#endif } static inline void tick_irq_exit(void) @@ -409,26 +865,6 @@ void irq_exit(void) trace_hardirq_exit(); /* must be last! */ } -/* - * This function must run with irqs disabled! - */ -inline void raise_softirq_irqoff(unsigned int nr) -{ - __raise_softirq_irqoff(nr); - - /* - * If we're in an interrupt or softirq, we're done - * (this also catches softirq-disabled code). We will - * actually run the softirq once we return from - * the irq or softirq. - * - * Otherwise we wake up ksoftirqd to make sure we - * schedule the softirq soon. - */ - if (!in_interrupt()) - wakeup_softirqd(); -} - void raise_softirq(unsigned int nr) { unsigned long flags; @@ -438,12 +874,6 @@ void raise_softirq(unsigned int nr) local_irq_restore(flags); } -void __raise_softirq_irqoff(unsigned int nr) -{ - trace_softirq_raise(nr); - or_softirq_pending(1UL << nr); -} - void open_softirq(int nr, void (*action)(struct softirq_action *)) { softirq_vec[nr].action = action; @@ -460,15 +890,45 @@ struct tasklet_head { static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); +static void inline +__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) +{ + if (tasklet_trylock(t)) { +again: + /* We may have been preempted before tasklet_trylock + * and __tasklet_action may have already run. + * So double check the sched bit while the takslet + * is locked before adding it to the list. + */ + if (test_bit(TASKLET_STATE_SCHED, &t->state)) { + t->next = NULL; + *head->tail = t; + head->tail = &(t->next); + raise_softirq_irqoff(nr); + tasklet_unlock(t); + } else { + /* This is subtle. If we hit the corner case above + * It is possible that we get preempted right here, + * and another task has successfully called + * tasklet_schedule(), then this function, and + * failed on the trylock. Thus we must be sure + * before releasing the tasklet lock, that the + * SCHED_BIT is clear. Otherwise the tasklet + * may get its SCHED_BIT set, but not added to the + * list + */ + if (!tasklet_tryunlock(t)) + goto again; + } + } +} + void __tasklet_schedule(struct tasklet_struct *t) { unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__this_cpu_read(tasklet_vec.tail) = t; - __this_cpu_write(tasklet_vec.tail, &(t->next)); - raise_softirq_irqoff(TASKLET_SOFTIRQ); + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_schedule); @@ -478,10 +938,7 @@ void __tasklet_hi_schedule(struct tasklet_struct *t) unsigned long flags; local_irq_save(flags); - t->next = NULL; - *__this_cpu_read(tasklet_hi_vec.tail) = t; - __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); - raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); local_irq_restore(flags); } EXPORT_SYMBOL(__tasklet_hi_schedule); @@ -490,82 +947,122 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t) { BUG_ON(!irqs_disabled()); - t->next = __this_cpu_read(tasklet_hi_vec.head); - __this_cpu_write(tasklet_hi_vec.head, t); - __raise_softirq_irqoff(HI_SOFTIRQ); + __tasklet_hi_schedule(t); } EXPORT_SYMBOL(__tasklet_hi_schedule_first); -static __latent_entropy void tasklet_action(struct softirq_action *a) +void tasklet_enable(struct tasklet_struct *t) { - struct tasklet_struct *list; + if (!atomic_dec_and_test(&t->count)) + return; + if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) + tasklet_schedule(t); +} +EXPORT_SYMBOL(tasklet_enable); - local_irq_disable(); - list = __this_cpu_read(tasklet_vec.head); - __this_cpu_write(tasklet_vec.head, NULL); - __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); - local_irq_enable(); +static void __tasklet_action(struct softirq_action *a, + struct tasklet_struct *list) +{ + int loops = 1000000; while (list) { struct tasklet_struct *t = list; list = list->next; - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, - &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); + /* + * Should always succeed - after a tasklist got on the + * list (after getting the SCHED bit set from 0 to 1), + * nothing but the tasklet softirq it got queued to can + * lock it: + */ + if (!tasklet_trylock(t)) { + WARN_ON(1); + continue; } - local_irq_disable(); t->next = NULL; - *__this_cpu_read(tasklet_vec.tail) = t; - __this_cpu_write(tasklet_vec.tail, &(t->next)); - __raise_softirq_irqoff(TASKLET_SOFTIRQ); - local_irq_enable(); + + /* + * If we cannot handle the tasklet because it's disabled, + * mark it as pending. tasklet_enable() will later + * re-schedule the tasklet. + */ + if (unlikely(atomic_read(&t->count))) { +out_disabled: + /* implicit unlock: */ + wmb(); + t->state = TASKLET_STATEF_PENDING; + continue; + } + + /* + * After this point on the tasklet might be rescheduled + * on another CPU, but it can only be added to another + * CPU's tasklet list if we unlock the tasklet (which we + * dont do yet). + */ + if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + WARN_ON(1); + +again: + t->func(t->data); + + /* + * Try to unlock the tasklet. We must use cmpxchg, because + * another CPU might have scheduled or disabled the tasklet. + * We only allow the STATE_RUN -> 0 transition here. + */ + while (!tasklet_tryunlock(t)) { + /* + * If it got disabled meanwhile, bail out: + */ + if (atomic_read(&t->count)) + goto out_disabled; + /* + * If it got scheduled meanwhile, re-execute + * the tasklet function: + */ + if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) + goto again; + if (!--loops) { + printk("hm, tasklet state: %08lx\n", t->state); + WARN_ON(1); + tasklet_unlock(t); + break; + } + } } } +static void tasklet_action(struct softirq_action *a) +{ + struct tasklet_struct *list; + + local_irq_disable(); + + list = __this_cpu_read(tasklet_vec.head); + __this_cpu_write(tasklet_vec.head, NULL); + __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); + + local_irq_enable(); + + __tasklet_action(a, list); +} + static __latent_entropy void tasklet_hi_action(struct softirq_action *a) { struct tasklet_struct *list; local_irq_disable(); + list = __this_cpu_read(tasklet_hi_vec.head); __this_cpu_write(tasklet_hi_vec.head, NULL); __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); - local_irq_enable(); - - while (list) { - struct tasklet_struct *t = list; - list = list->next; - - if (tasklet_trylock(t)) { - if (!atomic_read(&t->count)) { - if (!test_and_clear_bit(TASKLET_STATE_SCHED, - &t->state)) - BUG(); - t->func(t->data); - tasklet_unlock(t); - continue; - } - tasklet_unlock(t); - } + local_irq_enable(); - local_irq_disable(); - t->next = NULL; - *__this_cpu_read(tasklet_hi_vec.tail) = t; - __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); - __raise_softirq_irqoff(HI_SOFTIRQ); - local_irq_enable(); - } + __tasklet_action(a, list); } void tasklet_init(struct tasklet_struct *t, @@ -586,7 +1083,7 @@ void tasklet_kill(struct tasklet_struct *t) while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { do { - yield(); + msleep(1); } while (test_bit(TASKLET_STATE_SCHED, &t->state)); } tasklet_unlock_wait(t); @@ -660,25 +1157,26 @@ void __init softirq_init(void) open_softirq(HI_SOFTIRQ, tasklet_hi_action); } -static int ksoftirqd_should_run(unsigned int cpu) +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) +void tasklet_unlock_wait(struct tasklet_struct *t) { - return local_softirq_pending(); -} - -static void run_ksoftirqd(unsigned int cpu) -{ - local_irq_disable(); - if (local_softirq_pending()) { + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { /* - * We can safely run softirq on inline stack, as we are not deep - * in the task stack here. + * Hack for now to avoid this busy-loop: */ - __do_softirq(); - local_irq_enable(); - cond_resched_rcu_qs(); - return; +#ifdef CONFIG_PREEMPT_RT_FULL + msleep(1); +#else + barrier(); +#endif } - local_irq_enable(); +} +EXPORT_SYMBOL(tasklet_unlock_wait); +#endif + +static int ksoftirqd_should_run(unsigned int cpu) +{ + return ksoftirqd_softirq_pending(); } #ifdef CONFIG_HOTPLUG_CPU @@ -745,17 +1243,31 @@ static int takeover_tasklets(unsigned int cpu) static struct smp_hotplug_thread softirq_threads = { .store = &ksoftirqd, + .setup = ksoftirqd_set_sched_params, .thread_should_run = ksoftirqd_should_run, .thread_fn = run_ksoftirqd, .thread_comm = "ksoftirqd/%u", }; +#ifdef CONFIG_PREEMPT_RT_FULL +static struct smp_hotplug_thread softirq_timer_threads = { + .store = &ktimer_softirqd, + .setup = ktimer_softirqd_set_sched_params, + .cleanup = ktimer_softirqd_clr_sched_params, + .thread_should_run = ktimer_softirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ktimersoftd/%u", +}; +#endif + static __init int spawn_ksoftirqd(void) { cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL, takeover_tasklets); BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); - +#ifdef CONFIG_PREEMPT_RT_FULL + BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); +#endif return 0; } early_initcall(spawn_ksoftirqd); diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index ec9ab2f01489..8b89dbedeaff 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c @@ -36,7 +36,7 @@ struct cpu_stop_done { struct cpu_stopper { struct task_struct *thread; - spinlock_t lock; + raw_spinlock_t lock; bool enabled; /* is this stopper enabled? */ struct list_head works; /* list of pending works */ @@ -78,14 +78,14 @@ static bool cpu_stop_queue_work(unsigned int cpu, struct cpu_stop_work *work) unsigned long flags; bool enabled; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); enabled = stopper->enabled; if (enabled) __cpu_stop_queue_work(stopper, work); else if (work->done) cpu_stop_signal_done(work->done); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return enabled; } @@ -231,8 +231,8 @@ static int cpu_stop_queue_two_works(int cpu1, struct cpu_stop_work *work1, struct cpu_stopper *stopper2 = per_cpu_ptr(&cpu_stopper, cpu2); int err; retry: - spin_lock_irq(&stopper1->lock); - spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_irq(&stopper1->lock); + raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); err = -ENOENT; if (!stopper1->enabled || !stopper2->enabled) @@ -255,8 +255,8 @@ retry: __cpu_stop_queue_work(stopper1, work1); __cpu_stop_queue_work(stopper2, work2); unlock: - spin_unlock(&stopper2->lock); - spin_unlock_irq(&stopper1->lock); + raw_spin_unlock(&stopper2->lock); + raw_spin_unlock_irq(&stopper1->lock); if (unlikely(err == -EDEADLK)) { while (stop_cpus_in_progress) @@ -448,9 +448,9 @@ static int cpu_stop_should_run(unsigned int cpu) unsigned long flags; int run; - spin_lock_irqsave(&stopper->lock, flags); + raw_spin_lock_irqsave(&stopper->lock, flags); run = !list_empty(&stopper->works); - spin_unlock_irqrestore(&stopper->lock, flags); + raw_spin_unlock_irqrestore(&stopper->lock, flags); return run; } @@ -461,13 +461,13 @@ static void cpu_stopper_thread(unsigned int cpu) repeat: work = NULL; - spin_lock_irq(&stopper->lock); + raw_spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } - spin_unlock_irq(&stopper->lock); + raw_spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; @@ -475,6 +475,8 @@ repeat: struct cpu_stop_done *done = work->done; int ret; + /* XXX */ + /* cpu stop callbacks must not sleep, make in_atomic() == T */ preempt_count_inc(); ret = fn(arg); @@ -541,7 +543,7 @@ static int __init cpu_stop_init(void) for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - spin_lock_init(&stopper->lock); + raw_spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index bb5ec425dfe0..8338b14ed3a3 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -53,6 +53,7 @@ #include <asm/uaccess.h> #include <trace/events/timer.h> +#include <trace/events/hist.h> #include "tick-internal.h" @@ -695,6 +696,29 @@ static void hrtimer_switch_to_hres(void) retrigger_next_event(NULL); } +#ifdef CONFIG_PREEMPT_RT_FULL + +static struct swork_event clock_set_delay_work; + +static void run_clock_set_delay(struct swork_event *event) +{ + clock_was_set(); +} + +void clock_was_set_delayed(void) +{ + swork_queue(&clock_set_delay_work); +} + +static __init int create_clock_set_delay_thread(void) +{ + WARN_ON(swork_get()); + INIT_SWORK(&clock_set_delay_work, run_clock_set_delay); + return 0; +} +early_initcall(create_clock_set_delay_thread); +#else /* PREEMPT_RT_FULL */ + static void clock_was_set_work(struct work_struct *work) { clock_was_set(); @@ -710,6 +734,7 @@ void clock_was_set_delayed(void) { schedule_work(&hrtimer_work); } +#endif #else @@ -719,11 +744,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } static inline void hrtimer_switch_to_hres(void) { } static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } -static inline int hrtimer_reprogram(struct hrtimer *timer, - struct hrtimer_clock_base *base) -{ - return 0; -} +static inline void hrtimer_reprogram(struct hrtimer *timer, + struct hrtimer_clock_base *base) { } static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } static inline void retrigger_next_event(void *arg) { } @@ -855,6 +877,32 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) } EXPORT_SYMBOL_GPL(hrtimer_forward); +#ifdef CONFIG_PREEMPT_RT_BASE +# define wake_up_timer_waiters(b) wake_up(&(b)->wait) + +/** + * hrtimer_wait_for_timer - Wait for a running timer + * + * @timer: timer to wait for + * + * The function waits in case the timers callback function is + * currently executed on the waitqueue of the timer base. The + * waitqueue is woken up after the timer callback function has + * finished execution. + */ +void hrtimer_wait_for_timer(const struct hrtimer *timer) +{ + struct hrtimer_clock_base *base = timer->base; + + if (base && base->cpu_base && !timer->irqsafe) + wait_event(base->cpu_base->wait, + !(hrtimer_callback_running(timer))); +} + +#else +# define wake_up_timer_waiters(b) do { } while (0) +#endif + /* * enqueue_hrtimer - internal function to (re)start a timer * @@ -896,6 +944,11 @@ static void __remove_hrtimer(struct hrtimer *timer, if (!(state & HRTIMER_STATE_ENQUEUED)) return; + if (unlikely(!list_empty(&timer->cb_entry))) { + list_del_init(&timer->cb_entry); + return; + } + if (!timerqueue_del(&base->active, &timer->node)) cpu_base->active_bases &= ~(1 << base->index); @@ -991,7 +1044,16 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); timer_stats_hrtimer_set_start_info(timer); +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + { + ktime_t now = new_base->get_time(); + if (ktime_to_ns(tim) < ktime_to_ns(now)) + timer->praecox = now; + else + timer->praecox = ktime_set(0, 0); + } +#endif leftmost = enqueue_hrtimer(timer, new_base); if (!leftmost) goto unlock; @@ -1063,7 +1125,7 @@ int hrtimer_cancel(struct hrtimer *timer) if (ret >= 0) return ret; - cpu_relax(); + hrtimer_wait_for_timer(timer); } } EXPORT_SYMBOL_GPL(hrtimer_cancel); @@ -1127,6 +1189,7 @@ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id, base = hrtimer_clockid_to_base(clock_id); timer->base = &cpu_base->clock_base[base]; + INIT_LIST_HEAD(&timer->cb_entry); timerqueue_init(&timer->node); #ifdef CONFIG_TIMER_STATS @@ -1167,6 +1230,7 @@ bool hrtimer_active(const struct hrtimer *timer) seq = raw_read_seqcount_begin(&cpu_base->seq); if (timer->state != HRTIMER_STATE_INACTIVE || + cpu_base->running_soft == timer || cpu_base->running == timer) return true; @@ -1265,10 +1329,112 @@ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base, cpu_base->running = NULL; } +#ifdef CONFIG_PREEMPT_RT_BASE +static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, + struct hrtimer_clock_base *base) +{ + int leftmost; + + if (restart != HRTIMER_NORESTART && + !(timer->state & HRTIMER_STATE_ENQUEUED)) { + + leftmost = enqueue_hrtimer(timer, base); + if (!leftmost) + return; +#ifdef CONFIG_HIGH_RES_TIMERS + if (!hrtimer_is_hres_active(timer)) { + /* + * Kick to reschedule the next tick to handle the new timer + * on dynticks target. + */ + if (base->cpu_base->nohz_active) + wake_up_nohz_cpu(base->cpu_base->cpu); + } else { + + hrtimer_reprogram(timer, base); + } +#endif + } +} + +/* + * The changes in mainline which removed the callback modes from + * hrtimer are not yet working with -rt. The non wakeup_process() + * based callbacks which involve sleeping locks need to be treated + * seperately. + */ +static void hrtimer_rt_run_pending(void) +{ + enum hrtimer_restart (*fn)(struct hrtimer *); + struct hrtimer_cpu_base *cpu_base; + struct hrtimer_clock_base *base; + struct hrtimer *timer; + int index, restart; + + local_irq_disable(); + cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); + + raw_spin_lock(&cpu_base->lock); + + for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { + base = &cpu_base->clock_base[index]; + + while (!list_empty(&base->expired)) { + timer = list_first_entry(&base->expired, + struct hrtimer, cb_entry); + + /* + * Same as the above __run_hrtimer function + * just we run with interrupts enabled. + */ + debug_deactivate(timer); + cpu_base->running_soft = timer; + raw_write_seqcount_barrier(&cpu_base->seq); + + __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); + timer_stats_account_hrtimer(timer); + fn = timer->function; + + raw_spin_unlock_irq(&cpu_base->lock); + restart = fn(timer); + raw_spin_lock_irq(&cpu_base->lock); + + hrtimer_rt_reprogram(restart, timer, base); + raw_write_seqcount_barrier(&cpu_base->seq); + + WARN_ON_ONCE(cpu_base->running_soft != timer); + cpu_base->running_soft = NULL; + } + } + + raw_spin_unlock_irq(&cpu_base->lock); + + wake_up_timer_waiters(cpu_base); +} + +static int hrtimer_rt_defer(struct hrtimer *timer) +{ + if (timer->irqsafe) + return 0; + + __remove_hrtimer(timer, timer->base, timer->state, 0); + list_add_tail(&timer->cb_entry, &timer->base->expired); + return 1; +} + +#else + +static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } + +#endif + +static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); + static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) { struct hrtimer_clock_base *base = cpu_base->clock_base; unsigned int active = cpu_base->active_bases; + int raise = 0; for (; active; base++, active >>= 1) { struct timerqueue_node *node; @@ -1284,6 +1450,15 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) timer = container_of(node, struct hrtimer, node); + trace_hrtimer_interrupt(raw_smp_processor_id(), + ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? + timer->praecox : hrtimer_get_expires(timer), + basenow)), + current, + timer->function == hrtimer_wakeup ? + container_of(timer, struct hrtimer_sleeper, + timer)->task : NULL); + /* * The immediate goal for using the softexpires is * minimizing wakeups, not running timers at the @@ -1299,9 +1474,14 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) break; - __run_hrtimer(cpu_base, base, timer, &basenow); + if (!hrtimer_rt_defer(timer)) + __run_hrtimer(cpu_base, base, timer, &basenow); + else + raise = 1; } } + if (raise) + raise_softirq_irqoff(HRTIMER_SOFTIRQ); } #ifdef CONFIG_HIGH_RES_TIMERS @@ -1464,16 +1644,18 @@ static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer) void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) { sl->timer.function = hrtimer_wakeup; + sl->timer.irqsafe = 1; sl->task = task; } EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); -static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) +static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, + unsigned long state) { hrtimer_init_sleeper(t, current); do { - set_current_state(TASK_INTERRUPTIBLE); + set_current_state(state); hrtimer_start_expires(&t->timer, mode); if (likely(t->task)) @@ -1515,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) HRTIMER_MODE_ABS); hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - if (do_nanosleep(&t, HRTIMER_MODE_ABS)) + /* cpu_chill() does not care about restart state. */ + if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) goto out; rmtp = restart->nanosleep.rmtp; @@ -1532,8 +1715,10 @@ out: return ret; } -long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, - const enum hrtimer_mode mode, const clockid_t clockid) +static long +__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid, + unsigned long state) { struct restart_block *restart; struct hrtimer_sleeper t; @@ -1546,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, hrtimer_init_on_stack(&t.timer, clockid, mode); hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); - if (do_nanosleep(&t, mode)) + if (do_nanosleep(&t, mode, state)) goto out; /* Absolute timers do not update the rmtp value and restart: */ @@ -1573,6 +1758,12 @@ out: return ret; } +long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, + const enum hrtimer_mode mode, const clockid_t clockid) +{ + return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); +} + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, struct timespec __user *, rmtp) { @@ -1587,6 +1778,26 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); } +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Sleep for 1 ms in hope whoever holds what we want will let it go. + */ +void cpu_chill(void) +{ + struct timespec tu = { + .tv_nsec = NSEC_PER_MSEC, + }; + unsigned int freeze_flag = current->flags & PF_NOFREEZE; + + current->flags |= PF_NOFREEZE; + __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, + TASK_UNINTERRUPTIBLE); + if (!freeze_flag) + current->flags &= ~PF_NOFREEZE; +} +EXPORT_SYMBOL(cpu_chill); +#endif + /* * Functions related to boot-time initialization: */ @@ -1598,10 +1809,14 @@ int hrtimers_prepare_cpu(unsigned int cpu) for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); + INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); } cpu_base->cpu = cpu; hrtimer_init_hres(cpu_base); +#ifdef CONFIG_PREEMPT_RT_BASE + init_waitqueue_head(&cpu_base->wait); +#endif return 0; } @@ -1671,9 +1886,26 @@ int hrtimers_dead_cpu(unsigned int scpu) #endif /* CONFIG_HOTPLUG_CPU */ +#ifdef CONFIG_PREEMPT_RT_BASE + +static void run_hrtimer_softirq(struct softirq_action *h) +{ + hrtimer_rt_run_pending(); +} + +static void hrtimers_open_softirq(void) +{ + open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); +} + +#else +static void hrtimers_open_softirq(void) { } +#endif + void __init hrtimers_init(void) { hrtimers_prepare_cpu(smp_processor_id()); + hrtimers_open_softirq(); } /** diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c index 1d5c7204ddc9..184de6751180 100644 --- a/kernel/time/itimer.c +++ b/kernel/time/itimer.c @@ -213,6 +213,7 @@ again: /* We are sharing ->siglock with it_real_fn() */ if (hrtimer_try_to_cancel(timer) < 0) { spin_unlock_irq(&tsk->sighand->siglock); + hrtimer_wait_for_timer(&tsk->signal->real_timer); goto again; } expires = timeval_to_ktime(value->it_value); diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 555e21f7b966..a5d6435fabbb 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -74,7 +74,8 @@ static struct clocksource clocksource_jiffies = { .max_cycles = 10, }; -__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); +__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); +__cacheline_aligned_in_smp seqcount_t jiffies_seq; #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) @@ -83,9 +84,9 @@ u64 get_jiffies_64(void) u64 ret; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); ret = jiffies_64; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); return ret; } EXPORT_SYMBOL(get_jiffies_64); diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 6df8927c58a5..05b7391bf9bd 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/rtc.h> #include <linux/math64.h> +#include <linux/swork.h> #include "ntp_internal.h" #include "timekeeping_internal.h" @@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_struct *work) &sync_cmos_work, timespec64_to_jiffies(&next)); } +#ifdef CONFIG_PREEMPT_RT_FULL + +static void run_clock_set_delay(struct swork_event *event) +{ + queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); +} + +static struct swork_event ntp_cmos_swork; + +void ntp_notify_cmos_timer(void) +{ + swork_queue(&ntp_cmos_swork); +} + +static __init int create_cmos_delay_thread(void) +{ + WARN_ON(swork_get()); + INIT_SWORK(&ntp_cmos_swork, run_clock_set_delay); + return 0; +} +early_initcall(create_cmos_delay_thread); + +#else + void ntp_notify_cmos_timer(void) { queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); } +#endif /* CONFIG_PREEMPT_RT_FULL */ #else void ntp_notify_cmos_timer(void) { } diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 39008d78927a..633f4eaca9e7 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -3,6 +3,7 @@ */ #include <linux/sched.h> +#include <linux/sched/rt.h> #include <linux/posix-timers.h> #include <linux/errno.h> #include <linux/math64.h> @@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags, /* * Disarm any old timer after extracting its expiry time. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); ret = 0; old_incr = timer->it.cpu.incr; @@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) /* * Now re-arm for the new expiry time. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); arm_timer(timer); unlock_task_sighand(p, &flags); @@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(struct task_struct *tsk) * already updated our counts. We need to check if any timers fire now. * Interrupts are disabled. */ -void run_posix_cpu_timers(struct task_struct *tsk) +static void __run_posix_cpu_timers(struct task_struct *tsk) { LIST_HEAD(firing); struct k_itimer *timer, *next; unsigned long flags; - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); /* * The fast path checks that there are no expired thread or thread @@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_struct *tsk) } } +#ifdef CONFIG_PREEMPT_RT_BASE +#include <linux/kthread.h> +#include <linux/cpu.h> +DEFINE_PER_CPU(struct task_struct *, posix_timer_task); +DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); + +static int posix_cpu_timers_thread(void *data) +{ + int cpu = (long)data; + + BUG_ON(per_cpu(posix_timer_task,cpu) != current); + + while (!kthread_should_stop()) { + struct task_struct *tsk = NULL; + struct task_struct *next = NULL; + + if (cpu_is_offline(cpu)) + goto wait_to_die; + + /* grab task list */ + raw_local_irq_disable(); + tsk = per_cpu(posix_timer_tasklist, cpu); + per_cpu(posix_timer_tasklist, cpu) = NULL; + raw_local_irq_enable(); + + /* its possible the list is empty, just return */ + if (!tsk) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + __set_current_state(TASK_RUNNING); + continue; + } + + /* Process task list */ + while (1) { + /* save next */ + next = tsk->posix_timer_list; + + /* run the task timers, clear its ptr and + * unreference it + */ + __run_posix_cpu_timers(tsk); + tsk->posix_timer_list = NULL; + put_task_struct(tsk); + + /* check if this is the last on the list */ + if (next == tsk) + break; + tsk = next; + } + } + return 0; + +wait_to_die: + /* Wait for kthread_stop */ + set_current_state(TASK_INTERRUPTIBLE); + while (!kthread_should_stop()) { + schedule(); + set_current_state(TASK_INTERRUPTIBLE); + } + __set_current_state(TASK_RUNNING); + return 0; +} + +static inline int __fastpath_timer_check(struct task_struct *tsk) +{ + /* tsk == current, ensure it is safe to use ->signal/sighand */ + if (unlikely(tsk->exit_state)) + return 0; + + if (!task_cputime_zero(&tsk->cputime_expires)) + return 1; + + if (!task_cputime_zero(&tsk->signal->cputime_expires)) + return 1; + + return 0; +} + +void run_posix_cpu_timers(struct task_struct *tsk) +{ + unsigned long cpu = smp_processor_id(); + struct task_struct *tasklist; + + BUG_ON(!irqs_disabled()); + if(!per_cpu(posix_timer_task, cpu)) + return; + /* get per-cpu references */ + tasklist = per_cpu(posix_timer_tasklist, cpu); + + /* check to see if we're already queued */ + if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { + get_task_struct(tsk); + if (tasklist) { + tsk->posix_timer_list = tasklist; + } else { + /* + * The list is terminated by a self-pointing + * task_struct + */ + tsk->posix_timer_list = tsk; + } + per_cpu(posix_timer_tasklist, cpu) = tsk; + + wake_up_process(per_cpu(posix_timer_task, cpu)); + } +} + +/* + * posix_cpu_thread_call - callback that gets triggered when a CPU is added. + * Here we can start up the necessary migration thread for the new CPU. + */ +static int posix_cpu_thread_call(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + struct task_struct *p; + struct sched_param param; + + switch (action) { + case CPU_UP_PREPARE: + p = kthread_create(posix_cpu_timers_thread, hcpu, + "posixcputmr/%d",cpu); + if (IS_ERR(p)) + return NOTIFY_BAD; + p->flags |= PF_NOFREEZE; + kthread_bind(p, cpu); + /* Must be high prio to avoid getting starved */ + param.sched_priority = MAX_RT_PRIO-1; + sched_setscheduler(p, SCHED_FIFO, ¶m); + per_cpu(posix_timer_task,cpu) = p; + break; + case CPU_ONLINE: + /* Strictly unneccessary, as first user will wake it. */ + wake_up_process(per_cpu(posix_timer_task,cpu)); + break; +#ifdef CONFIG_HOTPLUG_CPU + case CPU_UP_CANCELED: + /* Unbind it from offline cpu so it can run. Fall thru. */ + kthread_bind(per_cpu(posix_timer_task, cpu), + cpumask_any(cpu_online_mask)); + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; + case CPU_DEAD: + kthread_stop(per_cpu(posix_timer_task,cpu)); + per_cpu(posix_timer_task,cpu) = NULL; + break; +#endif + } + return NOTIFY_OK; +} + +/* Register at highest priority so that task migration (migrate_all_tasks) + * happens before everything else. + */ +static struct notifier_block posix_cpu_thread_notifier = { + .notifier_call = posix_cpu_thread_call, + .priority = 10 +}; + +static int __init posix_cpu_thread_init(void) +{ + void *hcpu = (void *)(long)smp_processor_id(); + /* Start one for boot CPU. */ + unsigned long cpu; + + /* init the per-cpu posix_timer_tasklets */ + for_each_possible_cpu(cpu) + per_cpu(posix_timer_tasklist, cpu) = NULL; + + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); + posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); + register_cpu_notifier(&posix_cpu_thread_notifier); + return 0; +} +early_initcall(posix_cpu_thread_init); +#else /* CONFIG_PREEMPT_RT_BASE */ +void run_posix_cpu_timers(struct task_struct *tsk) +{ + __run_posix_cpu_timers(tsk); +} +#endif /* CONFIG_PREEMPT_RT_BASE */ + /* * Set one of the process-wide special case CPU timers or RLIMIT_CPU. * The tsk->sighand->siglock must be held by the caller. diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c index f2826c35e918..464a98155a0e 100644 --- a/kernel/time/posix-timers.c +++ b/kernel/time/posix-timers.c @@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer) static struct pid *good_sigevent(sigevent_t * event) { struct task_struct *rtn = current->group_leader; + int sig = event->sigev_signo; if ((event->sigev_notify & SIGEV_THREAD_ID ) && (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || @@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigevent_t * event) return NULL; if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && - ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) + (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || + sig_kernel_coredump(sig))) return NULL; return task_pid(rtn); @@ -826,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_t, timer_id) return overrun; } +/* + * Protected by RCU! + */ +static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) +{ +#ifdef CONFIG_PREEMPT_RT_FULL + if (kc->timer_set == common_timer_set) + hrtimer_wait_for_timer(&timr->it.real.timer); + else + /* FIXME: Whacky hack for posix-cpu-timers */ + schedule_timeout(1); +#endif +} + /* Set a POSIX.1b interval timer. */ /* timr->it_lock is taken. */ static int @@ -903,6 +919,7 @@ retry: if (!timr) return -EINVAL; + rcu_read_lock(); kc = clockid_to_kclock(timr->it_clock); if (WARN_ON_ONCE(!kc || !kc->timer_set)) error = -EINVAL; @@ -911,9 +928,12 @@ retry: unlock_timer(timr, flag); if (error == TIMER_RETRY) { + timer_wait_for_callback(kc, timr); rtn = NULL; // We already got the old time... + rcu_read_unlock(); goto retry; } + rcu_read_unlock(); if (old_setting && !error && copy_to_user(old_setting, &old_spec, sizeof (old_spec))) @@ -951,10 +971,15 @@ retry_delete: if (!timer) return -EINVAL; + rcu_read_lock(); if (timer_delete_hook(timer) == TIMER_RETRY) { unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } + rcu_read_unlock(); spin_lock(¤t->sighand->siglock); list_del(&timer->list); @@ -980,8 +1005,18 @@ static void itimer_delete(struct k_itimer *timer) retry_delete: spin_lock_irqsave(&timer->it_lock, flags); + /* On RT we can race with a deletion */ + if (!timer->it_signal) { + unlock_timer(timer, flags); + return; + } + if (timer_delete_hook(timer) == TIMER_RETRY) { + rcu_read_lock(); unlock_timer(timer, flags); + timer_wait_for_callback(clockid_to_kclock(timer->it_clock), + timer); + rcu_read_unlock(); goto retry_delete; } list_del(&timer->list); diff --git a/kernel/time/tick-broadcast-hrtimer.c b/kernel/time/tick-broadcast-hrtimer.c index 690b797f522e..fe8ba1619879 100644 --- a/kernel/time/tick-broadcast-hrtimer.c +++ b/kernel/time/tick-broadcast-hrtimer.c @@ -107,5 +107,6 @@ void tick_setup_hrtimer_broadcast(void) { hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); bctimer.function = bc_handler; + bctimer.irqsafe = true; clockevents_register_device(&ce_broadcast_hrtimer); } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 4fcd99e12aa0..5a47f2e98faf 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -79,13 +79,15 @@ int tick_is_oneshot_available(void) static void tick_periodic(int cpu) { if (tick_do_timer_cpu == cpu) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Keep track of the next tick event */ tick_next_period = ktime_add(tick_next_period, tick_period); do_timer(1); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -157,9 +159,9 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) ktime_t next; do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); next = tick_next_period; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 3bcb61b52f6c..66d85482a96e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(ktime_t now) return; /* Reevaluate with jiffies_lock held */ - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); delta = ktime_sub(now, last_jiffies_update); if (delta.tv64 >= tick_period.tv64) { @@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(ktime_t now) /* Keep the tick_next_period variable up to date */ tick_next_period = ktime_add(last_jiffies_update, tick_period); } else { - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return; } - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } @@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(void) { ktime_t period; - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); /* Did we start the jiffies update yet ? */ if (last_jiffies_update.tv64 == 0) last_jiffies_update = tick_next_period; period = last_jiffies_update; - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); return period; } @@ -215,6 +220,7 @@ static void nohz_full_kick_func(struct irq_work *work) static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { .func = nohz_full_kick_func, + .flags = IRQ_WORK_HARD_IRQ, }; /* @@ -673,10 +679,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, /* Read jiffies and the time when jiffies were updated last */ do { - seq = read_seqbegin(&jiffies_lock); + seq = read_seqcount_begin(&jiffies_seq); basemono = last_jiffies_update.tv64; basejiff = jiffies; - } while (read_seqretry(&jiffies_lock, seq)); + } while (read_seqcount_retry(&jiffies_seq, seq)); ts->last_jiffies = basejiff; if (rcu_needs_cpu(basemono, &next_rcu) || @@ -877,14 +883,7 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts) return false; if (unlikely(local_softirq_pending() && cpu_online(cpu))) { - static int ratelimit; - - if (ratelimit < 10 && - (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { - pr_warn("NOHZ: local_softirq_pending %02x\n", - (unsigned int) local_softirq_pending()); - ratelimit++; - } + softirq_check_pending_idle(); return false; } @@ -1193,6 +1192,7 @@ void tick_setup_sched_timer(void) * Emulate tick processing via per-CPU hrtimers: */ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + ts->sched_timer.irqsafe = 1; ts->sched_timer.function = tick_sched_timer; /* Get the next period (per-CPU) */ diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index d831827d7ab0..76d982c11ac3 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2348,8 +2348,10 @@ EXPORT_SYMBOL(hardpps); */ void xtime_update(unsigned long ticks) { - write_seqlock(&jiffies_lock); + raw_spin_lock(&jiffies_lock); + write_seqcount_begin(&jiffies_seq); do_timer(ticks); - write_sequnlock(&jiffies_lock); + write_seqcount_end(&jiffies_seq); + raw_spin_unlock(&jiffies_lock); update_wall_time(); } diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index 704f595ce83f..763a3e5121ff 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h @@ -19,7 +19,8 @@ extern void timekeeping_resume(void); extern void do_timer(unsigned long ticks); extern void update_wall_time(void); -extern seqlock_t jiffies_lock; +extern raw_spinlock_t jiffies_lock; +extern seqcount_t jiffies_seq; #define CS_NAME_LEN 32 diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 7d670362891a..31703677505f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -193,8 +193,11 @@ EXPORT_SYMBOL(jiffies_64); #endif struct timer_base { - spinlock_t lock; + raw_spinlock_t lock; struct timer_list *running_timer; +#ifdef CONFIG_PREEMPT_RT_FULL + struct swait_queue_head wait_for_running_timer; +#endif unsigned long clk; unsigned long next_expiry; unsigned int cpu; @@ -955,10 +958,10 @@ static struct timer_base *lock_timer_base(struct timer_list *timer, if (!(tf & TIMER_MIGRATING)) { base = get_timer_base(tf); - spin_lock_irqsave(&base->lock, *flags); + raw_spin_lock_irqsave(&base->lock, *flags); if (timer->flags == tf) return base; - spin_unlock_irqrestore(&base->lock, *flags); + raw_spin_unlock_irqrestore(&base->lock, *flags); } cpu_relax(); } @@ -1037,9 +1040,9 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) /* See the comment in lock_timer_base() */ timer->flags |= TIMER_MIGRATING; - spin_unlock(&base->lock); + raw_spin_unlock(&base->lock); base = new_base; - spin_lock(&base->lock); + raw_spin_lock(&base->lock); WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | base->cpu); forward_timer_base(base); @@ -1062,7 +1065,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) } out_unlock: - spin_unlock_irqrestore(&base->lock, flags); + raw_spin_unlock_irqrestore(&base->lock, flags); return ret; } @@ -1156,9 +1159,9 @@ void add_timer_on(struct timer_list *timer, int cpu) if (base != new_base) { timer->flags |= TIMER_MIGRATING; - spin_unlock(&base->lock); + raw_spin_unlock(&base->lock); base = new_base; - spin_lock(&base->lock); + raw_spin_lock(&base->lock); WRITE_ONCE(timer->flags, (timer->flags & ~TIMER_BASEMASK) | cpu); } @@ -1166,10 +1169,37 @@ void add_timer_on(struct timer_list *timer, int cpu) debug_activate(timer, timer->expires); internal_add_timer(base, timer); - spin_unlock_irqrestore(&base->lock, flags); + raw_spin_unlock_irqrestore(&base->lock, flags); } EXPORT_SYMBOL_GPL(add_timer_on); +#ifdef CONFIG_PREEMPT_RT_FULL +/* + * Wait for a running timer + */ +static void wait_for_running_timer(struct timer_list *timer) +{ + struct timer_base *base; + u32 tf = timer->flags; + + if (tf & TIMER_MIGRATING) + return; + + base = get_timer_base(tf); + swait_event(base->wait_for_running_timer, + base->running_timer != timer); +} + +# define wakeup_timer_waiters(b) swake_up_all(&(b)->wait_for_running_timer) +#else +static inline void wait_for_running_timer(struct timer_list *timer) +{ + cpu_relax(); +} + +# define wakeup_timer_waiters(b) do { } while (0) +#endif + /** * del_timer - deactive a timer. * @timer: the timer to be deactivated @@ -1193,7 +1223,7 @@ int del_timer(struct timer_list *timer) if (timer_pending(timer)) { base = lock_timer_base(timer, &flags); ret = detach_if_pending(timer, base, true); - spin_unlock_irqrestore(&base->lock, flags); + raw_spin_unlock_irqrestore(&base->lock, flags); } return ret; @@ -1221,13 +1251,13 @@ int try_to_del_timer_sync(struct timer_list *timer) timer_stats_timer_clear_start_info(timer); ret = detach_if_pending(timer, base, true); } - spin_unlock_irqrestore(&base->lock, flags); + raw_spin_unlock_irqrestore(&base->lock, flags); return ret; } EXPORT_SYMBOL(try_to_del_timer_sync); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) /** * del_timer_sync - deactivate a timer and wait for the handler to finish. * @timer: the timer to be deactivated @@ -1287,7 +1317,7 @@ int del_timer_sync(struct timer_list *timer) int ret = try_to_del_timer_sync(timer); if (ret >= 0) return ret; - cpu_relax(); + wait_for_running_timer(timer); } } EXPORT_SYMBOL(del_timer_sync); @@ -1352,14 +1382,17 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) fn = timer->function; data = timer->data; - if (timer->flags & TIMER_IRQSAFE) { - spin_unlock(&base->lock); + if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && + timer->flags & TIMER_IRQSAFE) { + raw_spin_unlock(&base->lock); call_timer_fn(timer, fn, data); - spin_lock(&base->lock); + base->running_timer = NULL; + raw_spin_lock(&base->lock); } else { - spin_unlock_irq(&base->lock); + raw_spin_unlock_irq(&base->lock); call_timer_fn(timer, fn, data); - spin_lock_irq(&base->lock); + base->running_timer = NULL; + raw_spin_lock_irq(&base->lock); } } } @@ -1528,7 +1561,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) if (cpu_is_offline(smp_processor_id())) return expires; - spin_lock(&base->lock); + raw_spin_lock(&base->lock); nextevt = __next_timer_interrupt(base); is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA); base->next_expiry = nextevt; @@ -1562,7 +1595,7 @@ u64 get_next_timer_interrupt(unsigned long basej, u64 basem) base->is_idle = true; } } - spin_unlock(&base->lock); + raw_spin_unlock(&base->lock); return cmp_next_hrtimer_event(basem, expires); } @@ -1627,13 +1660,13 @@ void update_process_times(int user_tick) /* Note: this timer irq context must be accounted for as well. */ account_process_tick(p, user_tick); + scheduler_tick(); run_local_timers(); rcu_check_callbacks(user_tick); -#ifdef CONFIG_IRQ_WORK +#if defined(CONFIG_IRQ_WORK) if (in_irq()) irq_work_tick(); #endif - scheduler_tick(); run_posix_cpu_timers(p); } @@ -1649,7 +1682,7 @@ static inline void __run_timers(struct timer_base *base) if (!time_after_eq(jiffies, base->clk)) return; - spin_lock_irq(&base->lock); + raw_spin_lock_irq(&base->lock); while (time_after_eq(jiffies, base->clk)) { @@ -1659,8 +1692,8 @@ static inline void __run_timers(struct timer_base *base) while (levels--) expire_timers(base, heads + levels); } - base->running_timer = NULL; - spin_unlock_irq(&base->lock); + raw_spin_unlock_irq(&base->lock); + wakeup_timer_waiters(base); } /* @@ -1683,6 +1716,8 @@ static __latent_entropy void run_timer_softirq(struct softirq_action *h) */ base->must_forward_clk = false; + irq_work_tick_soft(); + __run_timers(base); if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active) __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF])); @@ -1868,16 +1903,16 @@ int timers_dead_cpu(unsigned int cpu) * The caller is globally serialized and nobody else * takes two locks at once, deadlock is not possible. */ - spin_lock_irq(&new_base->lock); - spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); + raw_spin_lock_irq(&new_base->lock); + raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); BUG_ON(old_base->running_timer); for (i = 0; i < WHEEL_SIZE; i++) migrate_timer_list(new_base, old_base->vectors + i); - spin_unlock(&old_base->lock); - spin_unlock_irq(&new_base->lock); + raw_spin_unlock(&old_base->lock); + raw_spin_unlock_irq(&new_base->lock); put_cpu_ptr(&timer_bases); } return 0; @@ -1893,8 +1928,11 @@ static void __init init_timer_cpu(int cpu) for (i = 0; i < NR_BASES; i++) { base = per_cpu_ptr(&timer_bases[i], cpu); base->cpu = cpu; - spin_lock_init(&base->lock); + raw_spin_lock_init(&base->lock); base->clk = jiffies; +#ifdef CONFIG_PREEMPT_RT_FULL + init_swait_queue_head(&base->wait_for_running_timer); +#endif } } diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 2a96b063d659..812e37237eb8 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig @@ -182,6 +182,24 @@ config IRQSOFF_TRACER enabled. This option and the preempt-off timing option can be used together or separately.) +config INTERRUPT_OFF_HIST + bool "Interrupts-off Latency Histogram" + depends on IRQSOFF_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the duration of time periods with interrupts disabled. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff + + If PREEMPT_OFF_HIST is also selected, additional histograms (one + per cpu) are generated that accumulate the duration of time periods + when both interrupts and preemption are disabled. The histogram data + will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/irqsoff + config PREEMPT_TRACER bool "Preemption-off Latency Tracer" default n @@ -206,6 +224,24 @@ config PREEMPT_TRACER enabled. This option and the irqs-off timing option can be used together or separately.) +config PREEMPT_OFF_HIST + bool "Preemption-off Latency Histogram" + depends on PREEMPT_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the duration of time periods with preemption disabled. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff + + If INTERRUPT_OFF_HIST is also selected, additional histograms (one + per cpu) are generated that accumulate the duration of time periods + when both interrupts and preemption are disabled. The histogram data + will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/preemptoff + config SCHED_TRACER bool "Scheduling Latency Tracer" select GENERIC_TRACER @@ -251,6 +287,74 @@ config HWLAT_TRACER file. Every time a latency is greater than tracing_thresh, it will be recorded into the ring buffer. +config WAKEUP_LATENCY_HIST + bool "Scheduling Latency Histogram" + depends on SCHED_TRACER + help + This option generates continuously updated histograms (one per cpu) + of the scheduling latency of the highest priority task. + The histograms are disabled by default. To enable them, write a + non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/wakeup + + Two different algorithms are used, one to determine the latency of + processes that exclusively use the highest priority of the system and + another one to determine the latency of processes that share the + highest system priority with other processes. The former is used to + improve hardware and system software, the latter to optimize the + priority design of a given system. The histogram data will be + located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/wakeup + + and + + /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio + + If both Scheduling Latency Histogram and Missed Timer Offsets + Histogram are selected, additional histogram data will be collected + that contain, in addition to the wakeup latency, the timer latency, in + case the wakeup was triggered by an expired timer. These histograms + are available in the + + /sys/kernel/debug/tracing/latency_hist/timerandwakeup + + directory. They reflect the apparent interrupt and scheduling latency + and are best suitable to determine the worst-case latency of a given + system. To enable these histograms, write a non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup + +config MISSED_TIMER_OFFSETS_HIST + depends on HIGH_RES_TIMERS + select GENERIC_TRACER + bool "Missed Timer Offsets Histogram" + help + Generate a histogram of missed timer offsets in microseconds. The + histograms are disabled by default. To enable them, write a non-zero + number to + + /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets + + The histogram data will be located in the debug file system at + + /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets + + If both Scheduling Latency Histogram and Missed Timer Offsets + Histogram are selected, additional histogram data will be collected + that contain, in addition to the wakeup latency, the timer latency, in + case the wakeup was triggered by an expired timer. These histograms + are available in the + + /sys/kernel/debug/tracing/latency_hist/timerandwakeup + + directory. They reflect the apparent interrupt and scheduling latency + and are best suitable to determine the worst-case latency of a given + system. To enable these histograms, write a non-zero number to + + /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup + config ENABLE_DEFAULT_TRACERS bool "Trace process context switches and events" depends on !GENERIC_TRACER diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index e57980845549..83af000b783c 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile @@ -38,6 +38,10 @@ obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o +obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o +obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o +obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o +obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o obj-$(CONFIG_NOP_TRACER) += trace_nop.o obj-$(CONFIG_STACK_TRACER) += trace_stack.o obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c new file mode 100644 index 000000000000..7f6ee70dea41 --- /dev/null +++ b/kernel/trace/latency_hist.c @@ -0,0 +1,1178 @@ +/* + * kernel/trace/latency_hist.c + * + * Add support for histograms of preemption-off latency and + * interrupt-off latency and wakeup latency, it depends on + * Real-Time Preemption Support. + * + * Copyright (C) 2005 MontaVista Software, Inc. + * Yi Yang <yyang@ch.mvista.com> + * + * Converted to work with the new latency tracer. + * Copyright (C) 2008 Red Hat, Inc. + * Steven Rostedt <srostedt@redhat.com> + * + */ +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/percpu.h> +#include <linux/kallsyms.h> +#include <linux/uaccess.h> +#include <linux/sched.h> +#include <linux/sched/rt.h> +#include <linux/slab.h> +#include <linux/atomic.h> +#include <asm/div64.h> + +#include "trace.h" +#include <trace/events/sched.h> + +#define NSECS_PER_USECS 1000L + +#define CREATE_TRACE_POINTS +#include <trace/events/hist.h> + +enum { + IRQSOFF_LATENCY = 0, + PREEMPTOFF_LATENCY, + PREEMPTIRQSOFF_LATENCY, + WAKEUP_LATENCY, + WAKEUP_LATENCY_SHAREDPRIO, + MISSED_TIMER_OFFSETS, + TIMERANDWAKEUP_LATENCY, + MAX_LATENCY_TYPE, +}; + +#define MAX_ENTRY_NUM 10240 + +struct hist_data { + atomic_t hist_mode; /* 0 log, 1 don't log */ + long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ + long min_lat; + long max_lat; + unsigned long long below_hist_bound_samples; + unsigned long long above_hist_bound_samples; + long long accumulate_lat; + unsigned long long total_samples; + unsigned long long hist_array[MAX_ENTRY_NUM]; +}; + +struct enable_data { + int latency_type; + int enabled; +}; + +static char *latency_hist_dir_root = "latency_hist"; + +#ifdef CONFIG_INTERRUPT_OFF_HIST +static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); +static char *irqsoff_hist_dir = "irqsoff"; +static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); +static DEFINE_PER_CPU(int, hist_irqsoff_counting); +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST +static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); +static char *preemptoff_hist_dir = "preemptoff"; +static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); +static DEFINE_PER_CPU(int, hist_preemptoff_counting); +#endif + +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) +static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); +static char *preemptirqsoff_hist_dir = "preemptirqsoff"; +static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); +static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); +#endif + +#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) +static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); +static struct enable_data preemptirqsoff_enabled_data = { + .latency_type = PREEMPTIRQSOFF_LATENCY, + .enabled = 0, +}; +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +struct maxlatproc_data { + char comm[FIELD_SIZEOF(struct task_struct, comm)]; + char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; + int pid; + int current_pid; + int prio; + int current_prio; + long latency; + long timeroffset; + cycle_t timestamp; +}; +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); +static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); +static char *wakeup_latency_hist_dir = "wakeup"; +static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; +static notrace void probe_wakeup_latency_hist_start(void *v, + struct task_struct *p); +static notrace void probe_wakeup_latency_hist_stop(void *v, + bool preempt, struct task_struct *prev, struct task_struct *next); +static notrace void probe_sched_migrate_task(void *, + struct task_struct *task, int cpu); +static struct enable_data wakeup_latency_enabled_data = { + .latency_type = WAKEUP_LATENCY, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); +static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); +static DEFINE_PER_CPU(struct task_struct *, wakeup_task); +static DEFINE_PER_CPU(int, wakeup_sharedprio); +static unsigned long wakeup_pid; +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST +static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); +static char *missed_timer_offsets_dir = "missed_timer_offsets"; +static notrace void probe_hrtimer_interrupt(void *v, int cpu, + long long offset, struct task_struct *curr, struct task_struct *task); +static struct enable_data missed_timer_offsets_enabled_data = { + .latency_type = MISSED_TIMER_OFFSETS, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); +static unsigned long missed_timer_offsets_pid; +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); +static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; +static struct enable_data timerandwakeup_enabled_data = { + .latency_type = TIMERANDWAKEUP_LATENCY, + .enabled = 0, +}; +static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); +#endif + +void notrace latency_hist(int latency_type, int cpu, long latency, + long timeroffset, cycle_t stop, + struct task_struct *p) +{ + struct hist_data *my_hist; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + struct maxlatproc_data *mp = NULL; +#endif + + if (!cpu_possible(cpu) || latency_type < 0 || + latency_type >= MAX_LATENCY_TYPE) + return; + + switch (latency_type) { +#ifdef CONFIG_INTERRUPT_OFF_HIST + case IRQSOFF_LATENCY: + my_hist = &per_cpu(irqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_PREEMPT_OFF_HIST + case PREEMPTOFF_LATENCY: + my_hist = &per_cpu(preemptoff_hist, cpu); + break; +#endif +#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + my_hist = &per_cpu(preemptirqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + my_hist = &per_cpu(wakeup_latency_hist, cpu); + mp = &per_cpu(wakeup_maxlatproc, cpu); + break; + case WAKEUP_LATENCY_SHAREDPRIO: + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + my_hist = &per_cpu(missed_timer_offsets, cpu); + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); + mp = &per_cpu(timerandwakeup_maxlatproc, cpu); + break; +#endif + + default: + return; + } + + latency += my_hist->offset; + + if (atomic_read(&my_hist->hist_mode) == 0) + return; + + if (latency < 0 || latency >= MAX_ENTRY_NUM) { + if (latency < 0) + my_hist->below_hist_bound_samples++; + else + my_hist->above_hist_bound_samples++; + } else + my_hist->hist_array[latency]++; + + if (unlikely(latency > my_hist->max_lat || + my_hist->min_lat == LONG_MAX)) { +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + if (latency_type == WAKEUP_LATENCY || + latency_type == WAKEUP_LATENCY_SHAREDPRIO || + latency_type == MISSED_TIMER_OFFSETS || + latency_type == TIMERANDWAKEUP_LATENCY) { + strncpy(mp->comm, p->comm, sizeof(mp->comm)); + strncpy(mp->current_comm, current->comm, + sizeof(mp->current_comm)); + mp->pid = task_pid_nr(p); + mp->current_pid = task_pid_nr(current); + mp->prio = p->prio; + mp->current_prio = current->prio; + mp->latency = latency; + mp->timeroffset = timeroffset; + mp->timestamp = stop; + } +#endif + my_hist->max_lat = latency; + } + if (unlikely(latency < my_hist->min_lat)) + my_hist->min_lat = latency; + my_hist->total_samples++; + my_hist->accumulate_lat += latency; +} + +static void *l_start(struct seq_file *m, loff_t *pos) +{ + loff_t *index_ptr = NULL; + loff_t index = *pos; + struct hist_data *my_hist = m->private; + + if (index == 0) { + char minstr[32], avgstr[32], maxstr[32]; + + atomic_dec(&my_hist->hist_mode); + + if (likely(my_hist->total_samples)) { + long avg = (long) div64_s64(my_hist->accumulate_lat, + my_hist->total_samples); + snprintf(minstr, sizeof(minstr), "%ld", + my_hist->min_lat - my_hist->offset); + snprintf(avgstr, sizeof(avgstr), "%ld", + avg - my_hist->offset); + snprintf(maxstr, sizeof(maxstr), "%ld", + my_hist->max_lat - my_hist->offset); + } else { + strcpy(minstr, "<undef>"); + strcpy(avgstr, minstr); + strcpy(maxstr, minstr); + } + + seq_printf(m, "#Minimum latency: %s microseconds\n" + "#Average latency: %s microseconds\n" + "#Maximum latency: %s microseconds\n" + "#Total samples: %llu\n" + "#There are %llu samples lower than %ld" + " microseconds.\n" + "#There are %llu samples greater or equal" + " than %ld microseconds.\n" + "#usecs\t%16s\n", + minstr, avgstr, maxstr, + my_hist->total_samples, + my_hist->below_hist_bound_samples, + -my_hist->offset, + my_hist->above_hist_bound_samples, + MAX_ENTRY_NUM - my_hist->offset, + "samples"); + } + if (index < MAX_ENTRY_NUM) { + index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); + if (index_ptr) + *index_ptr = index; + } + + return index_ptr; +} + +static void *l_next(struct seq_file *m, void *p, loff_t *pos) +{ + loff_t *index_ptr = p; + struct hist_data *my_hist = m->private; + + if (++*pos >= MAX_ENTRY_NUM) { + atomic_inc(&my_hist->hist_mode); + return NULL; + } + *index_ptr = *pos; + return index_ptr; +} + +static void l_stop(struct seq_file *m, void *p) +{ + kfree(p); +} + +static int l_show(struct seq_file *m, void *p) +{ + int index = *(loff_t *) p; + struct hist_data *my_hist = m->private; + + seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, + my_hist->hist_array[index]); + return 0; +} + +static const struct seq_operations latency_hist_seq_op = { + .start = l_start, + .next = l_next, + .stop = l_stop, + .show = l_show +}; + +static int latency_hist_open(struct inode *inode, struct file *file) +{ + int ret; + + ret = seq_open(file, &latency_hist_seq_op); + if (!ret) { + struct seq_file *seq = file->private_data; + seq->private = inode->i_private; + } + return ret; +} + +static const struct file_operations latency_hist_fops = { + .open = latency_hist_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release, +}; + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static void clear_maxlatprocdata(struct maxlatproc_data *mp) +{ + mp->comm[0] = mp->current_comm[0] = '\0'; + mp->prio = mp->current_prio = mp->pid = mp->current_pid = + mp->latency = mp->timeroffset = -1; + mp->timestamp = 0; +} +#endif + +static void hist_reset(struct hist_data *hist) +{ + atomic_dec(&hist->hist_mode); + + memset(hist->hist_array, 0, sizeof(hist->hist_array)); + hist->below_hist_bound_samples = 0ULL; + hist->above_hist_bound_samples = 0ULL; + hist->min_lat = LONG_MAX; + hist->max_lat = LONG_MIN; + hist->total_samples = 0ULL; + hist->accumulate_lat = 0LL; + + atomic_inc(&hist->hist_mode); +} + +static ssize_t +latency_hist_reset(struct file *file, const char __user *a, + size_t size, loff_t *off) +{ + int cpu; + struct hist_data *hist = NULL; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + struct maxlatproc_data *mp = NULL; +#endif + off_t latency_type = (off_t) file->private_data; + + for_each_online_cpu(cpu) { + + switch (latency_type) { +#ifdef CONFIG_PREEMPT_OFF_HIST + case PREEMPTOFF_LATENCY: + hist = &per_cpu(preemptoff_hist, cpu); + break; +#endif +#ifdef CONFIG_INTERRUPT_OFF_HIST + case IRQSOFF_LATENCY: + hist = &per_cpu(irqsoff_hist, cpu); + break; +#endif +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + hist = &per_cpu(preemptirqsoff_hist, cpu); + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + hist = &per_cpu(wakeup_latency_hist, cpu); + mp = &per_cpu(wakeup_maxlatproc, cpu); + break; + case WAKEUP_LATENCY_SHAREDPRIO: + hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); + mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + hist = &per_cpu(missed_timer_offsets, cpu); + mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + hist = &per_cpu(timerandwakeup_latency_hist, cpu); + mp = &per_cpu(timerandwakeup_maxlatproc, cpu); + break; +#endif + } + + hist_reset(hist); +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + if (latency_type == WAKEUP_LATENCY || + latency_type == WAKEUP_LATENCY_SHAREDPRIO || + latency_type == MISSED_TIMER_OFFSETS || + latency_type == TIMERANDWAKEUP_LATENCY) + clear_maxlatprocdata(mp); +#endif + } + + return size; +} + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static ssize_t +show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + int r; + unsigned long *this_pid = file->private_data; + + r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t do_pid(struct file *file, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + char buf[64]; + unsigned long pid; + unsigned long *this_pid = file->private_data; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = '\0'; + + if (kstrtoul(buf, 10, &pid)) + return -EINVAL; + + *this_pid = pid; + + return cnt; +} +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static ssize_t +show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + int r; + struct maxlatproc_data *mp = file->private_data; + int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); + unsigned long long t; + unsigned long usecs, secs; + char *buf; + + if (mp->pid == -1 || mp->current_pid == -1) { + buf = "(none)\n"; + return simple_read_from_buffer(ubuf, cnt, ppos, buf, + strlen(buf)); + } + + buf = kmalloc(strmaxlen, GFP_KERNEL); + if (buf == NULL) + return -ENOMEM; + + t = ns2usecs(mp->timestamp); + usecs = do_div(t, USEC_PER_SEC); + secs = (unsigned long) t; + r = snprintf(buf, strmaxlen, + "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, + MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, + mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, + secs, usecs); + r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); + kfree(buf); + return r; +} +#endif + +static ssize_t +show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + struct enable_data *ed = file->private_data; + int r; + + r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); + return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); +} + +static ssize_t +do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) +{ + char buf[64]; + long enable; + struct enable_data *ed = file->private_data; + + if (cnt >= sizeof(buf)) + return -EINVAL; + + if (copy_from_user(&buf, ubuf, cnt)) + return -EFAULT; + + buf[cnt] = 0; + + if (kstrtoul(buf, 10, &enable)) + return -EINVAL; + + if ((enable && ed->enabled) || (!enable && !ed->enabled)) + return cnt; + + if (enable) { + int ret; + + switch (ed->latency_type) { +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + ret = register_trace_preemptirqsoff_hist( + probe_preemptirqsoff_hist, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_preemptirqsoff_hist " + "to trace_preemptirqsoff_hist\n"); + return ret; + } + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + ret = register_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_start " + "to trace_sched_wakeup\n"); + return ret; + } + ret = register_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_start " + "to trace_sched_wakeup_new\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + return ret; + } + ret = register_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_wakeup_latency_hist_stop " + "to trace_sched_switch\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + return ret; + } + ret = register_trace_sched_migrate_task( + probe_sched_migrate_task, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_sched_migrate_task " + "to trace_sched_migrate_task\n"); + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + return ret; + } + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + ret = register_trace_hrtimer_interrupt( + probe_hrtimer_interrupt, NULL); + if (ret) { + pr_info("wakeup trace: Couldn't assign " + "probe_hrtimer_interrupt " + "to trace_hrtimer_interrupt\n"); + return ret; + } + break; +#endif +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + case TIMERANDWAKEUP_LATENCY: + if (!wakeup_latency_enabled_data.enabled || + !missed_timer_offsets_enabled_data.enabled) + return -EINVAL; + break; +#endif + default: + break; + } + } else { + switch (ed->latency_type) { +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + case PREEMPTIRQSOFF_LATENCY: + { + int cpu; + + unregister_trace_preemptirqsoff_hist( + probe_preemptirqsoff_hist, NULL); + for_each_online_cpu(cpu) { +#ifdef CONFIG_INTERRUPT_OFF_HIST + per_cpu(hist_irqsoff_counting, + cpu) = 0; +#endif +#ifdef CONFIG_PREEMPT_OFF_HIST + per_cpu(hist_preemptoff_counting, + cpu) = 0; +#endif +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + per_cpu(hist_preemptirqsoff_counting, + cpu) = 0; +#endif + } + } + break; +#endif +#ifdef CONFIG_WAKEUP_LATENCY_HIST + case WAKEUP_LATENCY: + { + int cpu; + + unregister_trace_sched_wakeup( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_wakeup_new( + probe_wakeup_latency_hist_start, NULL); + unregister_trace_sched_switch( + probe_wakeup_latency_hist_stop, NULL); + unregister_trace_sched_migrate_task( + probe_sched_migrate_task, NULL); + + for_each_online_cpu(cpu) { + per_cpu(wakeup_task, cpu) = NULL; + per_cpu(wakeup_sharedprio, cpu) = 0; + } + } +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + timerandwakeup_enabled_data.enabled = 0; +#endif + break; +#endif +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + case MISSED_TIMER_OFFSETS: + unregister_trace_hrtimer_interrupt( + probe_hrtimer_interrupt, NULL); +#ifdef CONFIG_WAKEUP_LATENCY_HIST + timerandwakeup_enabled_data.enabled = 0; +#endif + break; +#endif + default: + break; + } + } + ed->enabled = enable; + return cnt; +} + +static const struct file_operations latency_hist_reset_fops = { + .open = tracing_open_generic, + .write = latency_hist_reset, +}; + +static const struct file_operations enable_fops = { + .open = tracing_open_generic, + .read = show_enable, + .write = do_enable, +}; + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) +static const struct file_operations pid_fops = { + .open = tracing_open_generic, + .read = show_pid, + .write = do_pid, +}; + +static const struct file_operations maxlatproc_fops = { + .open = tracing_open_generic, + .read = show_maxlatproc, +}; +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) +static notrace void probe_preemptirqsoff_hist(void *v, int reason, + int starthist) +{ + int cpu = raw_smp_processor_id(); + int time_set = 0; + + if (starthist) { + cycle_t uninitialized_var(start); + + if (!preempt_count() && !irqs_disabled()) + return; + +#ifdef CONFIG_INTERRUPT_OFF_HIST + if ((reason == IRQS_OFF || reason == TRACE_START) && + !per_cpu(hist_irqsoff_counting, cpu)) { + per_cpu(hist_irqsoff_counting, cpu) = 1; + start = ftrace_now(cpu); + time_set++; + per_cpu(hist_irqsoff_start, cpu) = start; + } +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + if ((reason == PREEMPT_OFF || reason == TRACE_START) && + !per_cpu(hist_preemptoff_counting, cpu)) { + per_cpu(hist_preemptoff_counting, cpu) = 1; + if (!(time_set++)) + start = ftrace_now(cpu); + per_cpu(hist_preemptoff_start, cpu) = start; + } +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + if (per_cpu(hist_irqsoff_counting, cpu) && + per_cpu(hist_preemptoff_counting, cpu) && + !per_cpu(hist_preemptirqsoff_counting, cpu)) { + per_cpu(hist_preemptirqsoff_counting, cpu) = 1; + if (!time_set) + start = ftrace_now(cpu); + per_cpu(hist_preemptirqsoff_start, cpu) = start; + } +#endif + } else { + cycle_t uninitialized_var(stop); + +#ifdef CONFIG_INTERRUPT_OFF_HIST + if ((reason == IRQS_ON || reason == TRACE_STOP) && + per_cpu(hist_irqsoff_counting, cpu)) { + cycle_t start = per_cpu(hist_irqsoff_start, cpu); + + stop = ftrace_now(cpu); + time_set++; + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, + stop, NULL); + } + per_cpu(hist_irqsoff_counting, cpu) = 0; + } +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + if ((reason == PREEMPT_ON || reason == TRACE_STOP) && + per_cpu(hist_preemptoff_counting, cpu)) { + cycle_t start = per_cpu(hist_preemptoff_start, cpu); + + if (!(time_set++)) + stop = ftrace_now(cpu); + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(PREEMPTOFF_LATENCY, cpu, latency, + 0, stop, NULL); + } + per_cpu(hist_preemptoff_counting, cpu) = 0; + } +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + if ((!per_cpu(hist_irqsoff_counting, cpu) || + !per_cpu(hist_preemptoff_counting, cpu)) && + per_cpu(hist_preemptirqsoff_counting, cpu)) { + cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); + + if (!time_set) + stop = ftrace_now(cpu); + if (start) { + long latency = ((long) (stop - start)) / + NSECS_PER_USECS; + + latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, + latency, 0, stop, NULL); + } + per_cpu(hist_preemptirqsoff_counting, cpu) = 0; + } +#endif + } +} +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST +static DEFINE_RAW_SPINLOCK(wakeup_lock); +static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, + int cpu) +{ + int old_cpu = task_cpu(task); + + if (cpu != old_cpu) { + unsigned long flags; + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); + if (task == cpu_wakeup_task) { + put_task_struct(cpu_wakeup_task); + per_cpu(wakeup_task, old_cpu) = NULL; + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; + get_task_struct(cpu_wakeup_task); + } + + raw_spin_unlock_irqrestore(&wakeup_lock, flags); + } +} + +static notrace void probe_wakeup_latency_hist_start(void *v, + struct task_struct *p) +{ + unsigned long flags; + struct task_struct *curr = current; + int cpu = task_cpu(p); + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, cpu); + + if (wakeup_pid) { + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || + p->prio == curr->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + if (likely(wakeup_pid != task_pid_nr(p))) + goto out; + } else { + if (likely(!rt_task(p)) || + (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || + p->prio > curr->prio) + goto out; + if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || + p->prio == curr->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + } + + if (cpu_wakeup_task) + put_task_struct(cpu_wakeup_task); + cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; + get_task_struct(cpu_wakeup_task); + cpu_wakeup_task->preempt_timestamp_hist = + ftrace_now(raw_smp_processor_id()); +out: + raw_spin_unlock_irqrestore(&wakeup_lock, flags); +} + +static notrace void probe_wakeup_latency_hist_stop(void *v, + bool preempt, struct task_struct *prev, struct task_struct *next) +{ + unsigned long flags; + int cpu = task_cpu(next); + long latency; + cycle_t stop; + struct task_struct *cpu_wakeup_task; + + raw_spin_lock_irqsave(&wakeup_lock, flags); + + cpu_wakeup_task = per_cpu(wakeup_task, cpu); + + if (cpu_wakeup_task == NULL) + goto out; + + /* Already running? */ + if (unlikely(current == cpu_wakeup_task)) + goto out_reset; + + if (next != cpu_wakeup_task) { + if (next->prio < cpu_wakeup_task->prio) + goto out_reset; + + if (next->prio == cpu_wakeup_task->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + + goto out; + } + + if (current->prio == cpu_wakeup_task->prio) + per_cpu(wakeup_sharedprio, cpu) = 1; + + /* + * The task we are waiting for is about to be switched to. + * Calculate latency and store it in histogram. + */ + stop = ftrace_now(raw_smp_processor_id()); + + latency = ((long) (stop - next->preempt_timestamp_hist)) / + NSECS_PER_USECS; + + if (per_cpu(wakeup_sharedprio, cpu)) { + latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, + next); + per_cpu(wakeup_sharedprio, cpu) = 0; + } else { + latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + if (timerandwakeup_enabled_data.enabled) { + latency_hist(TIMERANDWAKEUP_LATENCY, cpu, + next->timer_offset + latency, next->timer_offset, + stop, next); + } +#endif + } + +out_reset: +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + next->timer_offset = 0; +#endif + put_task_struct(cpu_wakeup_task); + per_cpu(wakeup_task, cpu) = NULL; +out: + raw_spin_unlock_irqrestore(&wakeup_lock, flags); +} +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST +static notrace void probe_hrtimer_interrupt(void *v, int cpu, + long long latency_ns, struct task_struct *curr, + struct task_struct *task) +{ + if (latency_ns <= 0 && task != NULL && rt_task(task) && + (task->prio < curr->prio || + (task->prio == curr->prio && + !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { + long latency; + cycle_t now; + + if (missed_timer_offsets_pid) { + if (likely(missed_timer_offsets_pid != + task_pid_nr(task))) + return; + } + + now = ftrace_now(cpu); + latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); + latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, + task); +#ifdef CONFIG_WAKEUP_LATENCY_HIST + task->timer_offset = latency; +#endif + } +} +#endif + +static __init int latency_hist_init(void) +{ + struct dentry *latency_hist_root = NULL; + struct dentry *dentry; +#ifdef CONFIG_WAKEUP_LATENCY_HIST + struct dentry *dentry_sharedprio; +#endif + struct dentry *entry; + struct dentry *enable_root; + int i = 0; + struct hist_data *my_hist; + char name[64]; + char *cpufmt = "CPU%d"; +#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + char *cpufmt_maxlatproc = "max_latency-CPU%d"; + struct maxlatproc_data *mp = NULL; +#endif + + dentry = tracing_init_dentry(); + latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); + enable_root = debugfs_create_dir("enable", latency_hist_root); + +#ifdef CONFIG_INTERRUPT_OFF_HIST + dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(irqsoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(irqsoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#ifdef CONFIG_PREEMPT_OFF_HIST + dentry = debugfs_create_dir(preemptoff_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(preemptoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(preemptoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) + dentry = debugfs_create_dir(preemptirqsoff_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); + my_hist = &per_cpu(preemptirqsoff_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); +#endif + +#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) + entry = debugfs_create_file("preemptirqsoff", 0644, + enable_root, (void *)&preemptirqsoff_enabled_data, + &enable_fops); +#endif + +#ifdef CONFIG_WAKEUP_LATENCY_HIST + dentry = debugfs_create_dir(wakeup_latency_hist_dir, + latency_hist_root); + dentry_sharedprio = debugfs_create_dir( + wakeup_latency_hist_dir_sharedprio, dentry); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(wakeup_latency_hist, i), + &latency_hist_fops); + my_hist = &per_cpu(wakeup_latency_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + entry = debugfs_create_file(name, 0444, dentry_sharedprio, + &per_cpu(wakeup_latency_hist_sharedprio, i), + &latency_hist_fops); + my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + + mp = &per_cpu(wakeup_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + + mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); + entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("pid", 0644, dentry, + (void *)&wakeup_pid, &pid_fops); + entry = debugfs_create_file("reset", 0644, dentry, + (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); + entry = debugfs_create_file("reset", 0644, dentry_sharedprio, + (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); + entry = debugfs_create_file("wakeup", 0644, + enable_root, (void *)&wakeup_latency_enabled_data, + &enable_fops); +#endif + +#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST + dentry = debugfs_create_dir(missed_timer_offsets_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(missed_timer_offsets, i), &latency_hist_fops); + my_hist = &per_cpu(missed_timer_offsets, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + mp = &per_cpu(missed_timer_offsets_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("pid", 0644, dentry, + (void *)&missed_timer_offsets_pid, &pid_fops); + entry = debugfs_create_file("reset", 0644, dentry, + (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); + entry = debugfs_create_file("missed_timer_offsets", 0644, + enable_root, (void *)&missed_timer_offsets_enabled_data, + &enable_fops); +#endif + +#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ + defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) + dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, + latency_hist_root); + for_each_possible_cpu(i) { + sprintf(name, cpufmt, i); + entry = debugfs_create_file(name, 0444, dentry, + &per_cpu(timerandwakeup_latency_hist, i), + &latency_hist_fops); + my_hist = &per_cpu(timerandwakeup_latency_hist, i); + atomic_set(&my_hist->hist_mode, 1); + my_hist->min_lat = LONG_MAX; + + sprintf(name, cpufmt_maxlatproc, i); + mp = &per_cpu(timerandwakeup_maxlatproc, i); + entry = debugfs_create_file(name, 0444, dentry, mp, + &maxlatproc_fops); + clear_maxlatprocdata(mp); + } + entry = debugfs_create_file("reset", 0644, dentry, + (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); + entry = debugfs_create_file("timerandwakeup", 0644, + enable_root, (void *)&timerandwakeup_enabled_data, + &enable_fops); +#endif + return 0; +} + +device_initcall(latency_hist_init); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c1e50cc0d7b0..035e61604455 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, struct task_struct *tsk = current; entry->preempt_count = pc & 0xff; + entry->preempt_lazy_count = preempt_lazy_count(); entry->pid = (tsk) ? tsk->pid : 0; entry->flags = #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT @@ -1907,8 +1908,11 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags, ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) | ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) | - (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | + (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | + (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); + + entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; } EXPORT_SYMBOL_GPL(tracing_generic_entry_update); @@ -2898,14 +2902,17 @@ get_total_entries(struct trace_buffer *buf, static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n" - "# / _-----=> irqs-off \n" - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" - "# |||| / delay \n" - "# cmd pid ||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _--------=> CPU# \n" + "# / _-------=> irqs-off \n" + "# | / _------=> need-resched \n" + "# || / _-----=> need-resched_lazy \n" + "# ||| / _----=> hardirq/softirq \n" + "# |||| / _---=> preempt-depth \n" + "# ||||| / _--=> preempt-lazy-depth\n" + "# |||||| / _-=> migrate-disable \n" + "# ||||||| / delay \n" + "# cmd pid |||||||| time | caller \n" + "# \\ / |||||||| \\ | / \n"); } static void print_event_info(struct trace_buffer *buf, struct seq_file *m) @@ -2931,11 +2938,14 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file print_event_info(buf, m); seq_puts(m, "# _-----=> irqs-off\n" "# / _----=> need-resched\n" - "# | / _---=> hardirq/softirq\n" - "# || / _--=> preempt-depth\n" - "# ||| / delay\n" - "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" - "# | | | |||| | |\n"); + "# |/ _-----=> need-resched_lazy\n" + "# || / _---=> hardirq/softirq\n" + "# ||| / _--=> preempt-depth\n" + "# |||| / _-=> preempt-lazy-depth\n" + "# ||||| / _-=> migrate-disable \n" + "# |||||| / delay\n" + "# TASK-PID CPU# ||||||| TIMESTAMP FUNCTION\n" + "# | | | ||||||| | |\n"); } void diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index b0d8576c27ae..702b9376b278 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head { * NEED_RESCHED - reschedule is requested * HARDIRQ - inside an interrupt handler * SOFTIRQ - inside a softirq handler + * NEED_RESCHED_LAZY - lazy reschedule is requested */ enum trace_flag_type { TRACE_FLAG_IRQS_OFF = 0x01, @@ -133,6 +134,7 @@ enum trace_flag_type { TRACE_FLAG_SOFTIRQ = 0x10, TRACE_FLAG_PREEMPT_RESCHED = 0x20, TRACE_FLAG_NMI = 0x40, + TRACE_FLAG_NEED_RESCHED_LAZY = 0x80, }; #define TRACE_BUF_SIZE 1024 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 03c0a48c3ac4..0b85d516b491 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -187,6 +187,8 @@ static int trace_define_common_fields(void) __common_field(unsigned char, flags); __common_field(unsigned char, preempt_count); __common_field(int, pid); + __common_field(unsigned short, migrate_disable); + __common_field(unsigned short, padding); return ret; } diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 03cdff84d026..940bd10b4406 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -13,6 +13,7 @@ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/ftrace.h> +#include <trace/events/hist.h> #include "trace.h" @@ -424,11 +425,13 @@ void start_critical_timings(void) { if (preempt_trace() || irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1); } EXPORT_SYMBOL_GPL(start_critical_timings); void stop_critical_timings(void) { + trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0); if (preempt_trace() || irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings); #ifdef CONFIG_PROVE_LOCKING void time_hardirqs_on(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(a0, a1); } @@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1) { if (!preempt_trace() && irq_trace()) start_critical_timing(a0, a1); + trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1); } #else /* !CONFIG_PROVE_LOCKING */ @@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct task_struct *curr) */ void trace_hardirqs_on(void) { + trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); } @@ -480,11 +486,13 @@ void trace_hardirqs_off(void) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off); __visible void trace_hardirqs_on_caller(unsigned long caller_addr) { + trace_preemptirqsoff_hist(IRQS_ON, 0); if (!preempt_trace() && irq_trace()) stop_critical_timing(CALLER_ADDR0, caller_addr); } @@ -494,6 +502,7 @@ __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { if (!preempt_trace() && irq_trace()) start_critical_timing(CALLER_ADDR0, caller_addr); + trace_preemptirqsoff_hist(IRQS_OFF, 1); } EXPORT_SYMBOL(trace_hardirqs_off_caller); @@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller); #ifdef CONFIG_PREEMPT_TRACER void trace_preempt_on(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist(PREEMPT_ON, 0); if (preempt_trace() && !irq_trace()) stop_critical_timing(a0, a1); } void trace_preempt_off(unsigned long a0, unsigned long a1) { + trace_preemptirqsoff_hist(PREEMPT_ON, 1); if (preempt_trace() && !irq_trace()) start_critical_timing(a0, a1); } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 3fc20422c166..65a6dde71a7d 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) { char hardsoft_irq; char need_resched; + char need_resched_lazy; char irqs_off; int hardirq; int softirq; @@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) break; } + need_resched_lazy = + (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + hardsoft_irq = (nmi && hardirq) ? 'Z' : nmi ? 'z' : @@ -424,14 +428,25 @@ int trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry) softirq ? 's' : '.' ; - trace_seq_printf(s, "%c%c%c", - irqs_off, need_resched, hardsoft_irq); + trace_seq_printf(s, "%c%c%c%c", + irqs_off, need_resched, need_resched_lazy, + hardsoft_irq); if (entry->preempt_count) trace_seq_printf(s, "%x", entry->preempt_count); else trace_seq_putc(s, '.'); + if (entry->preempt_lazy_count) + trace_seq_printf(s, "%x", entry->preempt_lazy_count); + else + trace_seq_putc(s, '.'); + + if (entry->migrate_disable) + trace_seq_printf(s, "%x", entry->migrate_disable); + else + trace_seq_putc(s, '.'); + return !trace_seq_has_overflowed(s); } diff --git a/kernel/user.c b/kernel/user.c index b069ccbfb0b0..1a2e88e98b5e 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -161,11 +161,11 @@ void free_uid(struct user_struct *up) if (!up) return; - local_irq_save(flags); + local_irq_save_nort(flags); if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) free_user(up, flags); else - local_irq_restore(flags); + local_irq_restore_nort(flags); } struct user_struct *alloc_uid(kuid_t uid) diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 63177be0159e..59fe007ad496 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -381,6 +381,7 @@ static void watchdog_enable(unsigned int cpu) /* kick off the timer for the hardlockup detector */ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; + hrtimer->irqsafe = 1; /* Enable the perf event */ watchdog_nmi_enable(cpu); diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c index 12b8dd640786..4c90d2ee7433 100644 --- a/kernel/watchdog_hld.c +++ b/kernel/watchdog_hld.c @@ -19,6 +19,7 @@ static DEFINE_PER_CPU(bool, hard_watchdog_warn); static DEFINE_PER_CPU(bool, watchdog_nmi_touch); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); +static DEFINE_RAW_SPINLOCK(watchdog_output_lock); /* boot commands */ /* @@ -104,6 +105,13 @@ static void watchdog_overflow_callback(struct perf_event *event, /* only print hardlockups once */ if (__this_cpu_read(hard_watchdog_warn) == true) return; + /* + * If early-printk is enabled then make sure we do not + * lock up in printk() and kill console logging: + */ + printk_kill(); + + raw_spin_lock(&watchdog_output_lock); pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); print_modules(); @@ -121,6 +129,7 @@ static void watchdog_overflow_callback(struct perf_event *event, !test_and_set_bit(0, &hardlockup_allcpu_dumped)) trigger_allbutself_cpu_backtrace(); + raw_spin_unlock(&watchdog_output_lock); if (hardlockup_panic) nmi_panic(regs, "Hard LOCKUP"); diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 776dda02e751..f8da6bd7702b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -48,6 +48,8 @@ #include <linux/nodemask.h> #include <linux/moduleparam.h> #include <linux/uaccess.h> +#include <linux/locallock.h> +#include <linux/delay.h> #include "workqueue_internal.h" @@ -121,11 +123,16 @@ enum { * cpu or grabbing pool->lock is enough for read access. If * POOL_DISASSOCIATED is set, it's identical to L. * + * On RT we need the extra protection via rt_lock_idle_list() for + * the list manipulations against read access from + * wq_worker_sleeping(). All other places are nicely serialized via + * pool->lock. + * * A: pool->attach_mutex protected. * * PL: wq_pool_mutex protected. * - * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. + * PR: wq_pool_mutex protected for writes. RCU protected for reads. * * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. * @@ -134,7 +141,7 @@ enum { * * WQ: wq->mutex protected. * - * WR: wq->mutex protected for writes. Sched-RCU protected for reads. + * WR: wq->mutex protected for writes. RCU protected for reads. * * MD: wq_mayday_lock protected. */ @@ -185,7 +192,7 @@ struct worker_pool { atomic_t nr_running ____cacheline_aligned_in_smp; /* - * Destruction of pool is sched-RCU protected to allow dereferences + * Destruction of pool is RCU protected to allow dereferences * from get_work_pool(). */ struct rcu_head rcu; @@ -214,7 +221,7 @@ struct pool_workqueue { /* * Release of unbound pwq is punted to system_wq. See put_pwq() * and pwq_unbound_release_workfn() for details. pool_workqueue - * itself is also sched-RCU protected so that the first pwq can be + * itself is also RCU protected so that the first pwq can be * determined without grabbing wq->mutex. */ struct work_struct unbound_release_work; @@ -348,6 +355,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient_wq); struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); +static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); + static int worker_thread(void *__worker); static void workqueue_sysfs_unregister(struct workqueue_struct *wq); @@ -355,20 +364,20 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); #include <trace/events/workqueue.h> #define assert_rcu_or_pool_mutex() \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq_pool_mutex), \ - "sched RCU or wq_pool_mutex should be held") + "RCU or wq_pool_mutex should be held") #define assert_rcu_or_wq_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq->mutex), \ - "sched RCU or wq->mutex should be held") + "RCU or wq->mutex should be held") #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ - RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ + RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ !lockdep_is_held(&wq->mutex) && \ !lockdep_is_held(&wq_pool_mutex), \ - "sched RCU, wq->mutex or wq_pool_mutex should be held") + "RCU, wq->mutex or wq_pool_mutex should be held") #define for_each_cpu_worker_pool(pool, cpu) \ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ @@ -380,7 +389,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); * @pool: iteration cursor * @pi: integer used for iteration * - * This must be called either with wq_pool_mutex held or sched RCU read + * This must be called either with wq_pool_mutex held or RCU read * locked. If the pool needs to be used beyond the locking in effect, the * caller is responsible for guaranteeing that the pool stays online. * @@ -412,7 +421,7 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); * @pwq: iteration cursor * @wq: the target workqueue * - * This must be called either with wq->mutex held or sched RCU read locked. + * This must be called either with wq->mutex held or RCU read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. * @@ -424,6 +433,31 @@ static void workqueue_sysfs_unregister(struct workqueue_struct *wq); if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ else +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void rt_lock_idle_list(struct worker_pool *pool) +{ + preempt_disable(); +} +static inline void rt_unlock_idle_list(struct worker_pool *pool) +{ + preempt_enable(); +} +static inline void sched_lock_idle_list(struct worker_pool *pool) { } +static inline void sched_unlock_idle_list(struct worker_pool *pool) { } +#else +static inline void rt_lock_idle_list(struct worker_pool *pool) { } +static inline void rt_unlock_idle_list(struct worker_pool *pool) { } +static inline void sched_lock_idle_list(struct worker_pool *pool) +{ + spin_lock_irq(&pool->lock); +} +static inline void sched_unlock_idle_list(struct worker_pool *pool) +{ + spin_unlock_irq(&pool->lock); +} +#endif + + #ifdef CONFIG_DEBUG_OBJECTS_WORK static struct debug_obj_descr work_debug_descr; @@ -548,7 +582,7 @@ static int worker_pool_assign_id(struct worker_pool *pool) * @wq: the target workqueue * @node: the node ID * - * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU + * This must be called with any of wq_pool_mutex, wq->mutex or RCU * read locked. * If the pwq needs to be used beyond the locking in effect, the caller is * responsible for guaranteeing that the pwq stays online. @@ -692,8 +726,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) * @work: the work item of interest * * Pools are created and destroyed under wq_pool_mutex, and allows read - * access under sched-RCU read lock. As such, this function should be - * called under wq_pool_mutex or with preemption disabled. + * access under RCU read lock. As such, this function should be + * called under wq_pool_mutex or inside of a rcu_read_lock() region. * * All fields of the returned pool are accessible as long as the above * mentioned locking is in effect. If the returned pool needs to be used @@ -830,50 +864,45 @@ static struct worker *first_idle_worker(struct worker_pool *pool) */ static void wake_up_worker(struct worker_pool *pool) { - struct worker *worker = first_idle_worker(pool); + struct worker *worker; + + rt_lock_idle_list(pool); + + worker = first_idle_worker(pool); if (likely(worker)) wake_up_process(worker->task); + + rt_unlock_idle_list(pool); } /** - * wq_worker_waking_up - a worker is waking up + * wq_worker_running - a worker is running again * @task: task waking up - * @cpu: CPU @task is waking up to * - * This function is called during try_to_wake_up() when a worker is - * being awoken. - * - * CONTEXT: - * spin_lock_irq(rq->lock) + * This function is called when a worker returns from schedule() */ -void wq_worker_waking_up(struct task_struct *task, int cpu) +void wq_worker_running(struct task_struct *task) { struct worker *worker = kthread_data(task); - if (!(worker->flags & WORKER_NOT_RUNNING)) { - WARN_ON_ONCE(worker->pool->cpu != cpu); + if (!worker->sleeping) + return; + if (!(worker->flags & WORKER_NOT_RUNNING)) atomic_inc(&worker->pool->nr_running); - } + worker->sleeping = 0; } /** * wq_worker_sleeping - a worker is going to sleep * @task: task going to sleep * - * This function is called during schedule() when a busy worker is - * going to sleep. Worker on the same cpu can be woken up by - * returning pointer to its task. - * - * CONTEXT: - * spin_lock_irq(rq->lock) - * - * Return: - * Worker task on @cpu to wake up, %NULL if none. + * This function is called from schedule() when a busy worker is + * going to sleep. */ -struct task_struct *wq_worker_sleeping(struct task_struct *task) +void wq_worker_sleeping(struct task_struct *task) { - struct worker *worker = kthread_data(task), *to_wakeup = NULL; + struct worker *worker = kthread_data(task); struct worker_pool *pool; /* @@ -882,29 +911,26 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task) * checking NOT_RUNNING. */ if (worker->flags & WORKER_NOT_RUNNING) - return NULL; + return; pool = worker->pool; - /* this can only happen on the local cpu */ - if (WARN_ON_ONCE(pool->cpu != raw_smp_processor_id())) - return NULL; + if (WARN_ON_ONCE(worker->sleeping)) + return; + + worker->sleeping = 1; /* * The counterpart of the following dec_and_test, implied mb, * worklist not empty test sequence is in insert_work(). * Please read comment there. - * - * NOT_RUNNING is clear. This means that we're bound to and - * running on the local cpu w/ rq lock held and preemption - * disabled, which in turn means that none else could be - * manipulating idle_list, so dereferencing idle_list without pool - * lock is safe. */ if (atomic_dec_and_test(&pool->nr_running) && - !list_empty(&pool->worklist)) - to_wakeup = first_idle_worker(pool); - return to_wakeup ? to_wakeup->task : NULL; + !list_empty(&pool->worklist)) { + sched_lock_idle_list(pool); + wake_up_worker(pool); + sched_unlock_idle_list(pool); + } } /** @@ -1098,12 +1124,14 @@ static void put_pwq_unlocked(struct pool_workqueue *pwq) { if (pwq) { /* - * As both pwqs and pools are sched-RCU protected, the + * As both pwqs and pools are RCU protected, the * following lock operations are safe. */ - spin_lock_irq(&pwq->pool->lock); + rcu_read_lock(); + local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); put_pwq(pwq); - spin_unlock_irq(&pwq->pool->lock); + local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + rcu_read_unlock(); } } @@ -1207,7 +1235,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, struct worker_pool *pool; struct pool_workqueue *pwq; - local_irq_save(*flags); + local_lock_irqsave(pendingb_lock, *flags); /* try to steal the timer if it exists */ if (is_dwork) { @@ -1226,6 +1254,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) return 0; + rcu_read_lock(); /* * The queueing is in progress, or it is already queued. Try to * steal it from ->worklist without clearing WORK_STRUCT_PENDING. @@ -1264,14 +1293,16 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, set_work_pool_and_keep_pending(work, pool->id); spin_unlock(&pool->lock); + rcu_read_unlock(); return 1; } spin_unlock(&pool->lock); fail: - local_irq_restore(*flags); + rcu_read_unlock(); + local_unlock_irqrestore(pendingb_lock, *flags); if (work_is_canceling(work)) return -ENOENT; - cpu_relax(); + cpu_chill(); return -EAGAIN; } @@ -1373,7 +1404,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, * queued or lose PENDING. Grabbing PENDING and queueing should * happen with IRQ disabled. */ - WARN_ON_ONCE(!irqs_disabled()); + WARN_ON_ONCE_NONRT(!irqs_disabled()); debug_work_activate(work); @@ -1381,6 +1412,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, if (unlikely(wq->flags & __WQ_DRAINING) && WARN_ON_ONCE(!is_chained_work(wq))) return; + rcu_read_lock(); retry: if (req_cpu == WORK_CPU_UNBOUND) cpu = wq_select_unbound_cpu(raw_smp_processor_id()); @@ -1437,10 +1469,8 @@ retry: /* pwq determined, queue */ trace_workqueue_queue_work(req_cpu, pwq, work); - if (WARN_ON(!list_empty(&work->entry))) { - spin_unlock(&pwq->pool->lock); - return; - } + if (WARN_ON(!list_empty(&work->entry))) + goto out; pwq->nr_in_flight[pwq->work_color]++; work_flags = work_color_to_flags(pwq->work_color); @@ -1458,7 +1488,9 @@ retry: insert_work(pwq, work, worklist, work_flags); +out: spin_unlock(&pwq->pool->lock); + rcu_read_unlock(); } /** @@ -1478,14 +1510,14 @@ bool queue_work_on(int cpu, struct workqueue_struct *wq, bool ret = false; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(pendingb_lock,flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_work(cpu, wq, work); ret = true; } - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } EXPORT_SYMBOL(queue_work_on); @@ -1552,14 +1584,14 @@ bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, unsigned long flags; /* read the comment in __queue_work() */ - local_irq_save(flags); + local_lock_irqsave(pendingb_lock, flags); if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { __queue_delayed_work(cpu, wq, dwork, delay); ret = true; } - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } EXPORT_SYMBOL(queue_delayed_work_on); @@ -1594,7 +1626,7 @@ bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, if (likely(ret >= 0)) { __queue_delayed_work(cpu, wq, dwork, delay); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); } /* -ENOENT from try_to_grab_pending() becomes %true */ @@ -1627,7 +1659,9 @@ static void worker_enter_idle(struct worker *worker) worker->last_active = jiffies; /* idle_list is LIFO */ + rt_lock_idle_list(pool); list_add(&worker->entry, &pool->idle_list); + rt_unlock_idle_list(pool); if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); @@ -1660,7 +1694,9 @@ static void worker_leave_idle(struct worker *worker) return; worker_clr_flags(worker, WORKER_IDLE); pool->nr_idle--; + rt_lock_idle_list(pool); list_del_init(&worker->entry); + rt_unlock_idle_list(pool); } static struct worker *alloc_worker(int node) @@ -1826,7 +1862,9 @@ static void destroy_worker(struct worker *worker) pool->nr_workers--; pool->nr_idle--; + rt_lock_idle_list(pool); list_del_init(&worker->entry); + rt_unlock_idle_list(pool); worker->flags |= WORKER_DIE; wake_up_process(worker->task); } @@ -2785,14 +2823,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) might_sleep(); - local_irq_disable(); + rcu_read_lock(); pool = get_work_pool(work); if (!pool) { - local_irq_enable(); + rcu_read_unlock(); return false; } - spin_lock(&pool->lock); + spin_lock_irq(&pool->lock); /* see the comment in try_to_grab_pending() with the same code */ pwq = get_work_pwq(work); if (pwq) { @@ -2821,10 +2859,11 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) else lock_map_acquire_read(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map); - + rcu_read_unlock(); return true; already_gone: spin_unlock_irq(&pool->lock); + rcu_read_unlock(); return false; } @@ -2911,7 +2950,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) /* tell other tasks trying to grab @work to back off */ mark_work_canceling(work); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); flush_work(work); clear_work_data(work); @@ -2966,10 +3005,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); */ bool flush_delayed_work(struct delayed_work *dwork) { - local_irq_disable(); + local_lock_irq(pendingb_lock); if (del_timer_sync(&dwork->timer)) __queue_work(dwork->cpu, dwork->wq, &dwork->work); - local_irq_enable(); + local_unlock_irq(pendingb_lock); return flush_work(&dwork->work); } EXPORT_SYMBOL(flush_delayed_work); @@ -2987,7 +3026,7 @@ static bool __cancel_work(struct work_struct *work, bool is_dwork) return false; set_work_pool_and_clear_pending(work, get_work_pool_id(work)); - local_irq_restore(flags); + local_unlock_irqrestore(pendingb_lock, flags); return ret; } @@ -3245,7 +3284,7 @@ static void rcu_free_pool(struct rcu_head *rcu) * put_unbound_pool - put a worker_pool * @pool: worker_pool to put * - * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU + * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU * safe manner. get_unbound_pool() calls this function on its failure path * and this function should be able to release pools which went through, * successfully or not, init_worker_pool(). @@ -3299,8 +3338,8 @@ static void put_unbound_pool(struct worker_pool *pool) del_timer_sync(&pool->idle_timer); del_timer_sync(&pool->mayday_timer); - /* sched-RCU protected to allow dereferences from get_work_pool() */ - call_rcu_sched(&pool->rcu, rcu_free_pool); + /* RCU protected to allow dereferences from get_work_pool() */ + call_rcu(&pool->rcu, rcu_free_pool); } /** @@ -3407,14 +3446,14 @@ static void pwq_unbound_release_workfn(struct work_struct *work) put_unbound_pool(pool); mutex_unlock(&wq_pool_mutex); - call_rcu_sched(&pwq->rcu, rcu_free_pwq); + call_rcu(&pwq->rcu, rcu_free_pwq); /* * If we're the last pwq going away, @wq is already dead and no one * is gonna access it anymore. Schedule RCU free. */ if (is_last) - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } /** @@ -4078,7 +4117,7 @@ void destroy_workqueue(struct workqueue_struct *wq) * The base ref is never dropped on per-cpu pwqs. Directly * schedule RCU free. */ - call_rcu_sched(&wq->rcu, rcu_free_wq); + call_rcu(&wq->rcu, rcu_free_wq); } else { /* * We're the sole accessor of @wq at this point. Directly @@ -4172,7 +4211,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) struct pool_workqueue *pwq; bool ret; - rcu_read_lock_sched(); + rcu_read_lock(); + preempt_disable(); if (cpu == WORK_CPU_UNBOUND) cpu = smp_processor_id(); @@ -4183,7 +4223,8 @@ bool workqueue_congested(int cpu, struct workqueue_struct *wq) pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); ret = !list_empty(&pwq->delayed_works); - rcu_read_unlock_sched(); + preempt_enable(); + rcu_read_unlock(); return ret; } @@ -4209,15 +4250,15 @@ unsigned int work_busy(struct work_struct *work) if (work_pending(work)) ret |= WORK_BUSY_PENDING; - local_irq_save(flags); + rcu_read_lock(); pool = get_work_pool(work); if (pool) { - spin_lock(&pool->lock); + spin_lock_irqsave(&pool->lock, flags); if (find_worker_executing_work(pool, work)) ret |= WORK_BUSY_RUNNING; - spin_unlock(&pool->lock); + spin_unlock_irqrestore(&pool->lock, flags); } - local_irq_restore(flags); + rcu_read_unlock(); return ret; } @@ -4406,7 +4447,7 @@ void show_workqueue_state(void) unsigned long flags; int pi; - rcu_read_lock_sched(); + rcu_read_lock(); pr_info("Showing busy workqueues and worker pools:\n"); @@ -4459,7 +4500,7 @@ void show_workqueue_state(void) spin_unlock_irqrestore(&pool->lock, flags); } - rcu_read_unlock_sched(); + rcu_read_unlock(); } /* @@ -4797,16 +4838,16 @@ bool freeze_workqueues_busy(void) * nr_active is monotonically decreasing. It's safe * to peek without lock. */ - rcu_read_lock_sched(); + rcu_read_lock(); for_each_pwq(pwq, wq) { WARN_ON_ONCE(pwq->nr_active < 0); if (pwq->nr_active) { busy = true; - rcu_read_unlock_sched(); + rcu_read_unlock(); goto out_unlock; } } - rcu_read_unlock_sched(); + rcu_read_unlock(); } out_unlock: mutex_unlock(&wq_pool_mutex); @@ -4996,7 +5037,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, const char *delim = ""; int node, written = 0; - rcu_read_lock_sched(); + get_online_cpus(); + rcu_read_lock(); for_each_node(node) { written += scnprintf(buf + written, PAGE_SIZE - written, "%s%d:%d", delim, node, @@ -5004,7 +5046,8 @@ static ssize_t wq_pool_ids_show(struct device *dev, delim = " "; } written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); - rcu_read_unlock_sched(); + rcu_read_unlock(); + put_online_cpus(); return written; } diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index 8635417c587b..f000c4d6917e 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -43,6 +43,7 @@ struct worker { unsigned long last_active; /* L: last active timestamp */ unsigned int flags; /* X: flags */ int id; /* I: worker id */ + int sleeping; /* None */ /* * Opaque string set with work_set_desc(). Printed out with task @@ -68,7 +69,7 @@ static inline struct worker *current_wq_worker(void) * Scheduler hooks for concurrency managed workqueue. Only to be used from * sched/core.c and workqueue.c. */ -void wq_worker_waking_up(struct task_struct *task, int cpu); -struct task_struct *wq_worker_sleeping(struct task_struct *task); +void wq_worker_running(struct task_struct *task); +void wq_worker_sleeping(struct task_struct *task); #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 260a80e313b9..b06becb3f477 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -400,6 +400,7 @@ config CHECK_SIGNATURE config CPUMASK_OFFSTACK bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS + depends on !PREEMPT_RT_FULL help Use dynamic allocation for cpumask_var_t, instead of putting them on the stack. This is a bit more expensive, but avoids diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 056052dc8e91..d8494e126de8 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -308,7 +308,10 @@ __debug_object_init(void *addr, struct debug_obj_descr *descr, int onstack) struct debug_obj *obj; unsigned long flags; - fill_pool(); +#ifdef CONFIG_PREEMPT_RT_FULL + if (preempt_count() == 0 && !irqs_disabled()) +#endif + fill_pool(); db = get_bucket((unsigned long) addr); diff --git a/lib/idr.c b/lib/idr.c index 6098336df267..9decbe914595 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -30,6 +30,7 @@ #include <linux/idr.h> #include <linux/spinlock.h> #include <linux/percpu.h> +#include <linux/locallock.h> #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) @@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *, idr_preload_head); static DEFINE_PER_CPU(int, idr_preload_cnt); static DEFINE_SPINLOCK(simple_ida_lock); +#ifdef CONFIG_PREEMPT_RT_FULL +static DEFINE_LOCAL_IRQ_LOCK(idr_lock); + +static inline void idr_preload_lock(void) +{ + local_lock(idr_lock); +} + +static inline void idr_preload_unlock(void) +{ + local_unlock(idr_lock); +} + +void idr_preload_end(void) +{ + idr_preload_unlock(); +} +EXPORT_SYMBOL(idr_preload_end); +#else +static inline void idr_preload_lock(void) +{ + preempt_disable(); +} + +static inline void idr_preload_unlock(void) +{ + preempt_enable(); +} +#endif + + /* the maximum ID which can be allocated given idr->layers */ static int idr_max(int layers) { @@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr) * context. See idr_preload() for details. */ if (!in_interrupt()) { - preempt_disable(); + idr_preload_lock(); new = __this_cpu_read(idr_preload_head); if (new) { __this_cpu_write(idr_preload_head, new->ary[0]); __this_cpu_dec(idr_preload_cnt); new->ary[0] = NULL; } - preempt_enable(); + idr_preload_unlock(); if (new) return new; } @@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id, idr_mark_full(pa, id); } - /** * idr_preload - preload for idr_alloc() * @gfp_mask: allocation mask to use for preloading @@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask) WARN_ON_ONCE(in_interrupt()); might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - preempt_disable(); + idr_preload_lock(); /* * idr_alloc() is likely to succeed w/o full idr_layer buffer and @@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask) while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { struct idr_layer *new; - preempt_enable(); + idr_preload_unlock(); new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); - preempt_disable(); + idr_preload_lock(); if (!new) break; diff --git a/lib/irq_poll.c b/lib/irq_poll.c index 1d6565e81030..b23a79761df7 100644 --- a/lib/irq_poll.c +++ b/lib/irq_poll.c @@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop) list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_sched); @@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *iop) local_irq_save(flags); __irq_poll_complete(iop); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(irq_poll_complete); @@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) } local_irq_enable(); + preempt_check_resched_rt(); /* Even though interrupts have been re-enabled, this * access is safe because interrupts can only add new @@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_softirq(struct softirq_action *h) __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); } /** @@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned int cpu) this_cpu_ptr(&blk_cpu_iopoll)); __raise_softirq_irqoff(IRQ_POLL_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); return 0; } diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index f3a217ea0388..4611b156ef79 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -590,6 +590,8 @@ GENERATE_TESTCASE(init_held_rsem) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) @@ -605,9 +607,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) +#endif + #undef E1 #undef E2 +#ifndef CONFIG_PREEMPT_RT_FULL /* * Enabling hardirqs with a softirq-safe lock held: */ @@ -640,6 +645,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #undef E1 #undef E2 +#endif + /* * Enabling irqs with an irq-safe lock held: */ @@ -663,6 +670,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) @@ -678,6 +687,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) +#endif + #undef E1 #undef E2 @@ -709,6 +720,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) @@ -724,6 +737,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) +#endif + #undef E1 #undef E2 #undef E3 @@ -757,6 +772,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) #include "locking-selftest-spin-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) +#ifndef CONFIG_PREEMPT_RT_FULL + #include "locking-selftest-rlock-hardirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) @@ -772,10 +789,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) #include "locking-selftest-wlock-softirq.h" GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) +#endif + #undef E1 #undef E2 #undef E3 +#ifndef CONFIG_PREEMPT_RT_FULL + /* * read-lock / write-lock irq inversion. * @@ -838,6 +859,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) #undef E2 #undef E3 +#endif + +#ifndef CONFIG_PREEMPT_RT_FULL + /* * read-lock / write-lock recursion that is actually safe. */ @@ -876,6 +901,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft) #undef E2 #undef E3 +#endif + /* * read-lock / write-lock recursion that is unsafe. */ @@ -1858,6 +1885,7 @@ void locking_selftest(void) printk(" --------------------------------------------------------------------------\n"); +#ifndef CONFIG_PREEMPT_RT_FULL /* * irq-context testcases: */ @@ -1870,6 +1898,28 @@ void locking_selftest(void) DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); +#else + /* On -rt, we only do hardirq context test for raw spinlock */ + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); + DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); + DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); + + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); + DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); +#endif ww_tests(); diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c index 6d40944960de..822a2c027e72 100644 --- a/lib/percpu_ida.c +++ b/lib/percpu_ida.c @@ -26,6 +26,9 @@ #include <linux/string.h> #include <linux/spinlock.h> #include <linux/percpu_ida.h> +#include <linux/locallock.h> + +static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); struct percpu_ida_cpu { /* @@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) unsigned long flags; int tag; - local_irq_save(flags); + local_lock_irqsave(irq_off_lock, flags); tags = this_cpu_ptr(pool->tag_cpu); /* Fastpath */ tag = alloc_local_tag(tags); if (likely(tag >= 0)) { - local_irq_restore(flags); + local_unlock_irqrestore(irq_off_lock, flags); return tag; } @@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) if (!tags->nr_free) alloc_global_tags(pool, tags); + if (!tags->nr_free) steal_tags(pool, tags); @@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) } spin_unlock(&pool->lock); - local_irq_restore(flags); + local_unlock_irqrestore(irq_off_lock, flags); if (tag >= 0 || state == TASK_RUNNING) break; @@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state) schedule(); - local_irq_save(flags); + local_lock_irqsave(irq_off_lock, flags); tags = this_cpu_ptr(pool->tag_cpu); } if (state != TASK_RUNNING) @@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) BUG_ON(tag >= pool->nr_tags); - local_irq_save(flags); + local_lock_irqsave(irq_off_lock, flags); tags = this_cpu_ptr(pool->tag_cpu); spin_lock(&tags->lock); @@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *pool, unsigned tag) spin_unlock(&pool->lock); } - local_irq_restore(flags); + local_unlock_irqrestore(irq_off_lock, flags); } EXPORT_SYMBOL_GPL(percpu_ida_free); @@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, struct percpu_ida_cpu *remote; unsigned cpu, i, err = 0; - local_irq_save(flags); + local_lock_irqsave(irq_off_lock, flags); for_each_possible_cpu(cpu) { remote = per_cpu_ptr(pool->tag_cpu, cpu); spin_lock(&remote->lock); @@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn, } spin_unlock(&pool->lock); out: - local_irq_restore(flags); + local_unlock_irqrestore(irq_off_lock, flags); return err; } EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 8e6d552c40dd..741da5a77fd5 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -36,7 +36,7 @@ #include <linux/bitops.h> #include <linux/rcupdate.h> #include <linux/preempt.h> /* in_interrupt() */ - +#include <linux/locallock.h> /* Number of nodes in fully populated tree of given height */ static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly; @@ -68,6 +68,7 @@ struct radix_tree_preload { struct radix_tree_node *nodes; }; static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; +static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock); static inline void *node_to_entry(void *ptr) { @@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_root *root) * succeed in getting a node here (and never reach * kmem_cache_alloc) */ - rtp = this_cpu_ptr(&radix_tree_preloads); + rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads); if (rtp->nr) { ret = rtp->nodes; rtp->nodes = ret->private_data; ret->private_data = NULL; rtp->nr--; } + put_locked_var(radix_tree_preloads_lock, radix_tree_preloads); /* * Update the allocation stack trace as this is more useful * for debugging. @@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gfp_mask, int nr) */ gfp_mask &= ~__GFP_ACCOUNT; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); while (rtp->nr < nr) { - preempt_enable(); + local_unlock(radix_tree_preloads_lock); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); if (node == NULL) goto out; - preempt_disable(); + local_lock(radix_tree_preloads_lock); rtp = this_cpu_ptr(&radix_tree_preloads); if (rtp->nr < nr) { node->private_data = rtp->nodes; @@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_mask) if (gfpflags_allow_blocking(gfp_mask)) return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE); /* Preloading doesn't help anything with this gfp mask, skip it */ - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } EXPORT_SYMBOL(radix_tree_maybe_preload); @@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) /* Preloading doesn't help anything with this gfp mask, skip it */ if (!gfpflags_allow_blocking(gfp_mask)) { - preempt_disable(); + local_lock(radix_tree_preloads_lock); return 0; } @@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order) return __radix_tree_preload(gfp_mask, nr_nodes); } +void radix_tree_preload_end(void) +{ + local_unlock(radix_tree_preloads_lock); +} +EXPORT_SYMBOL(radix_tree_preload_end); + /* * The maximum index which can be stored in a radix tree */ diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 004fc70fc56a..ccc46992a517 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -620,7 +620,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter) flush_kernel_dcache_page(miter->page); if (miter->__flags & SG_MITER_ATOMIC) { - WARN_ON_ONCE(preemptible()); + WARN_ON_ONCE(!pagefault_disabled()); kunmap_atomic(miter->addr); } else kunmap(miter->page); @@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, if (!sg_miter_skip(&miter, skip)) return false; - local_irq_save(flags); + local_irq_save_nort(flags); while (sg_miter_next(&miter) && offset < buflen) { unsigned int len; @@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, sg_miter_stop(&miter); - local_irq_restore(flags); + local_irq_restore_nort(flags); return offset; } EXPORT_SYMBOL(sg_copy_buffer); diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 1afec32de6f2..11fa431046a8 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -39,8 +39,9 @@ notrace static unsigned int check_preemption_disabled(const char *what1, if (!printk_ratelimit()) goto out_enable; - printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", - what1, what2, preempt_count() - 1, current->comm, current->pid); + printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n", + what1, what2, preempt_count() - 1, __migrate_disabled(current), + current->comm, current->pid); print_symbol("caller is %s\n", (long)__builtin_return_address(0)); dump_stack(); diff --git a/localversion-rt b/localversion-rt new file mode 100644 index 000000000000..38c40b21a885 --- /dev/null +++ b/localversion-rt @@ -0,0 +1 @@ +-rt45 diff --git a/mm/Kconfig b/mm/Kconfig index 86e3e0e74d20..77e5862a1ed2 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -410,7 +410,7 @@ config NOMMU_INITIAL_TRIM_EXCESS config TRANSPARENT_HUGEPAGE bool "Transparent Hugepage Support" - depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE + depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL select COMPACTION select RADIX_TREE_MULTIORDER help diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6ff2d7744223..b5a91dd53b5f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeback_congested *congested) { unsigned long flags; - local_irq_save(flags); + local_irq_save_nort(flags); if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { - local_irq_restore(flags); + local_irq_restore_nort(flags); return; } diff --git a/mm/compaction.c b/mm/compaction.c index 70e6bec46dc2..6678ed58b7c6 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1593,10 +1593,12 @@ check_drain: block_start_pfn(cc->migrate_pfn, cc->order); if (cc->last_migrated_pfn < current_block_start) { - cpu = get_cpu(); + cpu = get_cpu_light(); + local_lock_irq(swapvec_lock); lru_add_drain_cpu(cpu); + local_unlock_irq(swapvec_lock); drain_local_pages(zone); - put_cpu(); + put_cpu_light(); /* No more flushing until we migrate again */ cc->last_migrated_pfn = 0; } diff --git a/mm/filemap.c b/mm/filemap.c index edfb90e3830c..a8d2c7a73d54 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct address_space *mapping, * node->private_list is protected by * mapping->tree_lock. */ - if (!list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, + if (!list_empty(&node->private_list)) { + local_lock(workingset_shadow_lock); + list_lru_del(&__workingset_shadow_nodes, &node->private_list); + local_unlock(workingset_shadow_lock); + } } return 0; } @@ -217,8 +220,10 @@ static void page_cache_tree_delete(struct address_space *mapping, if (!dax_mapping(mapping) && !workingset_node_pages(node) && list_empty(&node->private_list)) { node->private_data = mapping; - list_lru_add(&workingset_shadow_nodes, - &node->private_list); + local_lock(workingset_shadow_lock); + list_lru_add(&__workingset_shadow_nodes, + &node->private_list); + local_unlock(workingset_shadow_lock); } } diff --git a/mm/highmem.c b/mm/highmem.c index 50b4ca6787f0..77518a3b35a1 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -29,10 +29,11 @@ #include <linux/kgdb.h> #include <asm/tlbflush.h> - +#ifndef CONFIG_PREEMPT_RT_FULL #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) DEFINE_PER_CPU(int, __kmap_atomic_idx); #endif +#endif /* * Virtual_count is not a pure "count". @@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) unsigned long totalhigh_pages __read_mostly; EXPORT_SYMBOL(totalhigh_pages); - +#ifndef CONFIG_PREEMPT_RT_FULL EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); +#endif unsigned int nr_free_highpages (void) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2a800c4a39bd..12b94909ba7b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -67,6 +67,7 @@ #include <net/sock.h> #include <net/ip.h> #include "slab.h" +#include <linux/locallock.h> #include <asm/uaccess.h> @@ -92,6 +93,8 @@ int do_swap_account __read_mostly; #define do_swap_account 0 #endif +static DEFINE_LOCAL_IRQ_LOCK(event_lock); + /* Whether legacy memory+swap accounting is active */ static bool do_memsw_account(void) { @@ -1695,6 +1698,7 @@ struct memcg_stock_pcp { #define FLUSHING_CACHED_CHARGE 0 }; static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); +static DEFINE_LOCAL_IRQ_LOCK(memcg_stock_ll); static DEFINE_MUTEX(percpu_charge_mutex); /** @@ -1717,7 +1721,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) if (nr_pages > CHARGE_BATCH) return ret; - local_irq_save(flags); + local_lock_irqsave(memcg_stock_ll, flags); stock = this_cpu_ptr(&memcg_stock); if (memcg == stock->cached && stock->nr_pages >= nr_pages) { @@ -1725,7 +1729,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) ret = true; } - local_irq_restore(flags); + local_unlock_irqrestore(memcg_stock_ll, flags); return ret; } @@ -1752,13 +1756,13 @@ static void drain_local_stock(struct work_struct *dummy) struct memcg_stock_pcp *stock; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(memcg_stock_ll, flags); stock = this_cpu_ptr(&memcg_stock); drain_stock(stock); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); - local_irq_restore(flags); + local_unlock_irqrestore(memcg_stock_ll, flags); } /* @@ -1770,7 +1774,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; unsigned long flags; - local_irq_save(flags); + local_lock_irqsave(memcg_stock_ll, flags); stock = this_cpu_ptr(&memcg_stock); if (stock->cached != memcg) { /* reset if necessary */ @@ -1779,7 +1783,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) } stock->nr_pages += nr_pages; - local_irq_restore(flags); + local_unlock_irqrestore(memcg_stock_ll, flags); } /* @@ -1795,7 +1799,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) return; /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); - curcpu = get_cpu(); + curcpu = get_cpu_light(); for_each_online_cpu(cpu) { struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); struct mem_cgroup *memcg; @@ -1812,7 +1816,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) schedule_work_on(cpu, &stock->work); } } - put_cpu(); + put_cpu_light(); put_online_cpus(); mutex_unlock(&percpu_charge_mutex); } @@ -4558,12 +4562,12 @@ static int mem_cgroup_move_account(struct page *page, ret = 0; - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(to, page, compound, nr_pages); memcg_check_events(to, page); mem_cgroup_charge_statistics(from, page, compound, -nr_pages); memcg_check_events(from, page); - local_irq_enable(); + local_unlock_irq(event_lock); out_unlock: unlock_page(page); out: @@ -5438,10 +5442,10 @@ void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg, commit_charge(page, memcg, lrucare); - local_irq_disable(); + local_lock_irq(event_lock); mem_cgroup_charge_statistics(memcg, page, compound, nr_pages); memcg_check_events(memcg, page); - local_irq_enable(); + local_unlock_irq(event_lock); if (do_memsw_account() && PageSwapCache(page)) { swp_entry_t entry = { .val = page_private(page) }; @@ -5497,14 +5501,14 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, memcg_oom_recover(memcg); } - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); __this_cpu_add(memcg->stat->nr_page_events, nr_pages); memcg_check_events(memcg, dummy_page); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); if (!mem_cgroup_is_root(memcg)) css_put_many(&memcg->css, nr_pages); @@ -5659,10 +5663,10 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage) commit_charge(newpage, memcg, false); - local_irq_save(flags); + local_lock_irqsave(event_lock, flags); mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages); memcg_check_events(memcg, newpage); - local_irq_restore(flags); + local_unlock_irqrestore(event_lock, flags); } DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key); @@ -5853,6 +5857,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { struct mem_cgroup *memcg, *swap_memcg; unsigned short oldid; + unsigned long flags; VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); @@ -5893,12 +5898,16 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) * important here to have the interrupts disabled because it is the * only synchronisation we have for udpating the per-CPU variables. */ + local_lock_irqsave(event_lock, flags); +#ifndef CONFIG_PREEMPT_RT_BASE VM_BUG_ON(!irqs_disabled()); +#endif mem_cgroup_charge_statistics(memcg, page, false, -1); memcg_check_events(memcg, page); if (!mem_cgroup_is_root(memcg)) css_put(&memcg->css); + local_unlock_irqrestore(event_lock, flags); } /* diff --git a/mm/mmu_context.c b/mm/mmu_context.c index 6f4d27c5bb32..5cd25c745a8f 100644 --- a/mm/mmu_context.c +++ b/mm/mmu_context.c @@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm) struct task_struct *tsk = current; task_lock(tsk); + preempt_disable_rt(); active_mm = tsk->active_mm; if (active_mm != mm) { atomic_inc(&mm->mm_count); @@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm) } tsk->mm = mm; switch_mm(active_mm, mm, tsk); + preempt_enable_rt(); task_unlock(tsk); #ifdef finish_arch_post_lock_switch finish_arch_post_lock_switch(); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7064aae8ded7..6a187c2adcc9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -61,6 +61,7 @@ #include <linux/page_ext.h> #include <linux/hugetlb.h> #include <linux/sched/rt.h> +#include <linux/locallock.h> #include <linux/page_owner.h> #include <linux/kthread.h> #include <linux/memcontrol.h> @@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids); EXPORT_SYMBOL(nr_online_nodes); #endif +static DEFINE_LOCAL_IRQ_LOCK(pa_lock); + +#ifdef CONFIG_PREEMPT_RT_BASE +# define cpu_lock_irqsave(cpu, flags) \ + local_lock_irqsave_on(pa_lock, flags, cpu) +# define cpu_unlock_irqrestore(cpu, flags) \ + local_unlock_irqrestore_on(pa_lock, flags, cpu) +#else +# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) +# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) +#endif + int page_group_by_mobility_disabled __read_mostly; #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT @@ -1083,7 +1096,7 @@ static bool bulkfree_pcp_prepare(struct page *page) #endif /* CONFIG_DEBUG_VM */ /* - * Frees a number of pages from the PCP lists + * Frees a number of pages which have been collected from the pcp lists. * Assumes all pages on list are in same zone, and of same order. * count is the number of pages to free. * @@ -1094,19 +1107,58 @@ static bool bulkfree_pcp_prepare(struct page *page) * pinned" detection logic. */ static void free_pcppages_bulk(struct zone *zone, int count, - struct per_cpu_pages *pcp) + struct list_head *list) { - int migratetype = 0; - int batch_free = 0; unsigned long nr_scanned; bool isolated_pageblocks; + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); - spin_lock(&zone->lock); isolated_pageblocks = has_isolate_pageblock(zone); nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); if (nr_scanned) __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); + while (!list_empty(list)) { + struct page *page; + int mt; /* migratetype of the to-be-freed page */ + + page = list_first_entry(list, struct page, lru); + /* must delete as __free_one_page list manipulates */ + list_del(&page->lru); + + mt = get_pcppage_migratetype(page); + /* MIGRATE_ISOLATE page should not go to pcplists */ + VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); + /* Pageblock could have been isolated meanwhile */ + if (unlikely(isolated_pageblocks)) + mt = get_pageblock_migratetype(page); + + if (bulkfree_pcp_prepare(page)) + continue; + + __free_one_page(page, page_to_pfn(page), zone, 0, mt); + trace_mm_page_pcpu_drain(page, 0, mt); + count--; + } + WARN_ON(count != 0); + spin_unlock_irqrestore(&zone->lock, flags); +} + +/* + * Moves a number of pages from the PCP lists to free list which + * is freed outside of the locked region. + * + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + */ +static void isolate_pcp_pages(int count, struct per_cpu_pages *src, + struct list_head *dst) +{ + int migratetype = 0; + int batch_free = 0; + while (count) { struct page *page; struct list_head *list; @@ -1122,7 +1174,7 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free++; if (++migratetype == MIGRATE_PCPTYPES) migratetype = 0; - list = &pcp->lists[migratetype]; + list = &src->lists[migratetype]; } while (list_empty(list)); /* This is the only non-empty list. Free them all. */ @@ -1130,27 +1182,12 @@ static void free_pcppages_bulk(struct zone *zone, int count, batch_free = count; do { - int mt; /* migratetype of the to-be-freed page */ - page = list_last_entry(list, struct page, lru); - /* must delete as __free_one_page list manipulates */ list_del(&page->lru); - mt = get_pcppage_migratetype(page); - /* MIGRATE_ISOLATE page should not go to pcplists */ - VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); - /* Pageblock could have been isolated meanwhile */ - if (unlikely(isolated_pageblocks)) - mt = get_pageblock_migratetype(page); - - if (bulkfree_pcp_prepare(page)) - continue; - - __free_one_page(page, page_to_pfn(page), zone, 0, mt); - trace_mm_page_pcpu_drain(page, 0, mt); + list_add(&page->lru, dst); } while (--count && --batch_free && !list_empty(list)); } - spin_unlock(&zone->lock); } static void free_one_page(struct zone *zone, @@ -1159,7 +1196,9 @@ static void free_one_page(struct zone *zone, int migratetype) { unsigned long nr_scanned; - spin_lock(&zone->lock); + unsigned long flags; + + spin_lock_irqsave(&zone->lock, flags); nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED); if (nr_scanned) __mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned); @@ -1169,7 +1208,7 @@ static void free_one_page(struct zone *zone, migratetype = get_pfnblock_migratetype(page, pfn); } __free_one_page(page, pfn, zone, order, migratetype); - spin_unlock(&zone->lock); + spin_unlock_irqrestore(&zone->lock, flags); } static void __meminit __init_single_page(struct page *page, unsigned long pfn, @@ -1255,10 +1294,10 @@ static void __free_pages_ok(struct page *page, unsigned int order) return; migratetype = get_pfnblock_migratetype(page, pfn); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_events(PGFREE, 1 << order); free_one_page(page_zone(page), page, pfn, order, migratetype); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } static void __init __free_pages_boot_core(struct page *page, unsigned int order) @@ -2273,16 +2312,18 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order, void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) { unsigned long flags; + LIST_HEAD(dst); int to_drain, batch; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); batch = READ_ONCE(pcp->batch); to_drain = min(pcp->count, batch); if (to_drain > 0) { - free_pcppages_bulk(zone, to_drain, pcp); + isolate_pcp_pages(to_drain, pcp, &dst); pcp->count -= to_drain; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, to_drain, &dst); } #endif @@ -2298,16 +2339,21 @@ static void drain_pages_zone(unsigned int cpu, struct zone *zone) unsigned long flags; struct per_cpu_pageset *pset; struct per_cpu_pages *pcp; + LIST_HEAD(dst); + int count; - local_irq_save(flags); + cpu_lock_irqsave(cpu, flags); pset = per_cpu_ptr(zone->pageset, cpu); pcp = &pset->pcp; - if (pcp->count) { - free_pcppages_bulk(zone, pcp->count, pcp); + count = pcp->count; + if (count) { + isolate_pcp_pages(count, pcp, &dst); pcp->count = 0; } - local_irq_restore(flags); + cpu_unlock_irqrestore(cpu, flags); + if (count) + free_pcppages_bulk(zone, count, &dst); } /* @@ -2393,8 +2439,17 @@ void drain_all_pages(struct zone *zone) else cpumask_clear_cpu(cpu, &cpus_with_pcps); } +#ifndef CONFIG_PREEMPT_RT_BASE on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, zone, 1); +#else + for_each_cpu(cpu, &cpus_with_pcps) { + if (zone) + drain_pages_zone(cpu, zone); + else + drain_pages(cpu); + } +#endif } #ifdef CONFIG_HIBERNATION @@ -2454,7 +2509,7 @@ void free_hot_cold_page(struct page *page, bool cold) migratetype = get_pfnblock_migratetype(page, pfn); set_pcppage_migratetype(page, migratetype); - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); __count_vm_event(PGFREE); /* @@ -2480,12 +2535,17 @@ void free_hot_cold_page(struct page *page, bool cold) pcp->count++; if (pcp->count >= pcp->high) { unsigned long batch = READ_ONCE(pcp->batch); - free_pcppages_bulk(zone, batch, pcp); + LIST_HEAD(dst); + + isolate_pcp_pages(batch, pcp, &dst); pcp->count -= batch; + local_unlock_irqrestore(pa_lock, flags); + free_pcppages_bulk(zone, batch, &dst); + return; } out: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } /* @@ -2627,7 +2687,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, struct per_cpu_pages *pcp; struct list_head *list; - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); do { pcp = &this_cpu_ptr(zone->pageset)->pcp; list = &pcp->lists[migratetype]; @@ -2654,7 +2714,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, * allocate greater than order-1 page units with __GFP_NOFAIL. */ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1)); - spin_lock_irqsave(&zone->lock, flags); + local_spin_lock_irqsave(pa_lock, &zone->lock, flags); do { page = NULL; @@ -2666,22 +2726,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, if (!page) page = __rmqueue(zone, order, migratetype); } while (page && check_new_pages(page, order)); - spin_unlock(&zone->lock); - if (!page) + if (!page) { + spin_unlock(&zone->lock); goto failed; + } __mod_zone_freepage_state(zone, -(1 << order), get_pcppage_migratetype(page)); + spin_unlock(&zone->lock); } __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); VM_BUG_ON_PAGE(bad_range(zone, page), page); return page; failed: - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); return NULL; } @@ -6559,7 +6621,9 @@ static int page_alloc_cpu_notify(struct notifier_block *self, int cpu = (unsigned long)hcpu; if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) { + local_lock_irq_on(swapvec_lock, cpu); lru_add_drain_cpu(cpu); + local_unlock_irq_on(swapvec_lock, cpu); drain_pages(cpu); /* @@ -6585,6 +6649,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self, void __init page_alloc_init(void) { hotcpu_notifier(page_alloc_cpu_notify, 0); + local_irq_lock_init(pa_lock); } /* @@ -7413,7 +7478,7 @@ void zone_pcp_reset(struct zone *zone) struct per_cpu_pageset *pset; /* avoid races with drain_pages() */ - local_irq_save(flags); + local_lock_irqsave(pa_lock, flags); if (zone->pageset != &boot_pageset) { for_each_online_cpu(cpu) { pset = per_cpu_ptr(zone->pageset, cpu); @@ -7422,7 +7487,7 @@ void zone_pcp_reset(struct zone *zone) free_percpu(zone->pageset); zone->pageset = &boot_pageset; } - local_irq_restore(flags); + local_unlock_irqrestore(pa_lock, flags); } #ifdef CONFIG_MEMORY_HOTREMOVE diff --git a/mm/percpu.c b/mm/percpu.c index f014cebbf405..4e739fcf91bf 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -1283,18 +1283,7 @@ void free_percpu(void __percpu *ptr) } EXPORT_SYMBOL_GPL(free_percpu); -/** - * is_kernel_percpu_address - test whether address is from static percpu area - * @addr: address to test - * - * Test whether @addr belongs to in-kernel static percpu area. Module - * static percpu areas are not considered. For those, use - * is_module_percpu_address(). - * - * RETURNS: - * %true if @addr is from in-kernel static percpu area, %false otherwise. - */ -bool is_kernel_percpu_address(unsigned long addr) +bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr) { #ifdef CONFIG_SMP const size_t static_size = __per_cpu_end - __per_cpu_start; @@ -1303,16 +1292,39 @@ bool is_kernel_percpu_address(unsigned long addr) for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(base, cpu); + void *va = (void *)addr; - if ((void *)addr >= start && (void *)addr < start + static_size) + if (va >= start && va < start + static_size) { + if (can_addr) { + *can_addr = (unsigned long) (va - start); + *can_addr += (unsigned long) + per_cpu_ptr(base, get_boot_cpu_id()); + } return true; - } + } + } #endif /* on UP, can't distinguish from other static vars, always false */ return false; } /** + * is_kernel_percpu_address - test whether address is from static percpu area + * @addr: address to test + * + * Test whether @addr belongs to in-kernel static percpu area. Module + * static percpu areas are not considered. For those, use + * is_module_percpu_address(). + * + * RETURNS: + * %true if @addr is from in-kernel static percpu area, %false otherwise. + */ +bool is_kernel_percpu_address(unsigned long addr) +{ + return __is_kernel_percpu_address(addr, NULL); +} + +/** * per_cpu_ptr_to_phys - convert translated percpu address to physical address * @addr: the address to be converted to physical address * diff --git a/mm/slab.h b/mm/slab.h index ceb7d70cdb76..dfd281e43fbe 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, * The slab lists for all objects. */ struct kmem_cache_node { +#ifdef CONFIG_SLUB + raw_spinlock_t list_lock; +#else spinlock_t list_lock; +#endif #ifdef CONFIG_SLAB struct list_head slabs_partial; /* partial list first, better asm code */ diff --git a/mm/slub.c b/mm/slub.c index edc79ca3c6d5..67eb368b9314 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1144,7 +1144,7 @@ static noinline int free_debug_processing( unsigned long uninitialized_var(flags); int ret = 0; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); slab_lock(page); if (s->flags & SLAB_CONSISTENCY_CHECKS) { @@ -1179,7 +1179,7 @@ out: bulk_cnt, cnt); slab_unlock(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); if (!ret) slab_fix(s, "Object at 0x%p not freed", object); return ret; @@ -1307,6 +1307,12 @@ static inline void dec_slabs_node(struct kmem_cache *s, int node, #endif /* CONFIG_SLUB_DEBUG */ +struct slub_free_list { + raw_spinlock_t lock; + struct list_head list; +}; +static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); + /* * Hooks for other subsystems that check memory allocations. In a typical * production configuration these hooks all should produce no code at all. @@ -1530,10 +1536,17 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) void *start, *p; int idx, order; bool shuffle; + bool enableirqs = false; flags &= gfp_allowed_mask; if (gfpflags_allow_blocking(flags)) + enableirqs = true; +#ifdef CONFIG_PREEMPT_RT_FULL + if (system_state == SYSTEM_RUNNING) + enableirqs = true; +#endif + if (enableirqs) local_irq_enable(); flags |= s->allocflags; @@ -1608,7 +1621,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) page->frozen = 1; out: - if (gfpflags_allow_blocking(flags)) + if (enableirqs) local_irq_disable(); if (!page) return NULL; @@ -1667,6 +1680,16 @@ static void __free_slab(struct kmem_cache *s, struct page *page) __free_pages(page, order); } +static void free_delayed(struct list_head *h) +{ + while(!list_empty(h)) { + struct page *page = list_first_entry(h, struct page, lru); + + list_del(&page->lru); + __free_slab(page->slab_cache, page); + } +} + #define need_reserve_slab_rcu \ (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) @@ -1698,6 +1721,12 @@ static void free_slab(struct kmem_cache *s, struct page *page) } call_rcu(head, rcu_free_slab); + } else if (irqs_disabled()) { + struct slub_free_list *f = this_cpu_ptr(&slub_free_list); + + raw_spin_lock(&f->lock); + list_add(&page->lru, &f->list); + raw_spin_unlock(&f->lock); } else __free_slab(s, page); } @@ -1805,7 +1834,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, if (!n || !n->nr_partial) return NULL; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); list_for_each_entry_safe(page, page2, &n->partial, lru) { void *t; @@ -1830,7 +1859,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n, break; } - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); return object; } @@ -2076,7 +2105,7 @@ redo: * that acquire_slab() will see a slab page that * is frozen */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } else { m = M_FULL; @@ -2087,7 +2116,7 @@ redo: * slabs from diagnostic functions will not see * any frozen slabs. */ - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } } @@ -2122,7 +2151,7 @@ redo: goto redo; if (lock) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); if (m == M_FREE) { stat(s, DEACTIVATE_EMPTY); @@ -2154,10 +2183,10 @@ static void unfreeze_partials(struct kmem_cache *s, n2 = get_node(s, page_to_nid(page)); if (n != n2) { if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); n = n2; - spin_lock(&n->list_lock); + raw_spin_lock(&n->list_lock); } do { @@ -2186,7 +2215,7 @@ static void unfreeze_partials(struct kmem_cache *s, } if (n) - spin_unlock(&n->list_lock); + raw_spin_unlock(&n->list_lock); while (discard_page) { page = discard_page; @@ -2225,14 +2254,21 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) pobjects = oldpage->pobjects; pages = oldpage->pages; if (drain && pobjects > s->cpu_partial) { + struct slub_free_list *f; unsigned long flags; + LIST_HEAD(tofree); /* * partial array is full. Move the existing * set to the per node partial list. */ local_irq_save(flags); unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock(&f->lock); local_irq_restore(flags); + free_delayed(&tofree); oldpage = NULL; pobjects = 0; pages = 0; @@ -2304,7 +2340,22 @@ static bool has_cpu_slab(int cpu, void *info) static void flush_all(struct kmem_cache *s) { + LIST_HEAD(tofree); + int cpu; + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); + for_each_online_cpu(cpu) { + struct slub_free_list *f; + + if (!has_cpu_slab(cpu, s)) + continue; + + f = &per_cpu(slub_free_list, cpu); + raw_spin_lock_irq(&f->lock); + list_splice_init(&f->list, &tofree); + raw_spin_unlock_irq(&f->lock); + free_delayed(&tofree); + } } /* @@ -2359,10 +2410,10 @@ static unsigned long count_partial(struct kmem_cache_node *n, unsigned long x = 0; struct page *page; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) x += get_count(page); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return x; } #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ @@ -2500,8 +2551,10 @@ static inline void *get_freelist(struct kmem_cache *s, struct page *page) * already disabled (which is the case for bulk allocation). */ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, - unsigned long addr, struct kmem_cache_cpu *c) + unsigned long addr, struct kmem_cache_cpu *c, + struct list_head *to_free) { + struct slub_free_list *f; void *freelist; struct page *page; @@ -2561,6 +2614,13 @@ load_freelist: VM_BUG_ON(!c->page->frozen); c->freelist = get_freepointer(s, freelist); c->tid = next_tid(c->tid); + +out: + f = this_cpu_ptr(&slub_free_list); + raw_spin_lock(&f->lock); + list_splice_init(&f->list, to_free); + raw_spin_unlock(&f->lock); + return freelist; new_slab: @@ -2592,7 +2652,7 @@ new_slab: deactivate_slab(s, page, get_freepointer(s, freelist)); c->page = NULL; c->freelist = NULL; - return freelist; + goto out; } /* @@ -2604,6 +2664,7 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, { void *p; unsigned long flags; + LIST_HEAD(tofree); local_irq_save(flags); #ifdef CONFIG_PREEMPT @@ -2615,8 +2676,9 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, c = this_cpu_ptr(s->cpu_slab); #endif - p = ___slab_alloc(s, gfpflags, node, addr, c); + p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); local_irq_restore(flags); + free_delayed(&tofree); return p; } @@ -2802,7 +2864,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, do { if (unlikely(n)) { - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); n = NULL; } prior = page->freelist; @@ -2834,7 +2896,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * Otherwise the list_lock will synchronize with * other processors updating the list of slabs. */ - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); } } @@ -2876,7 +2938,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, add_partial(n, page, DEACTIVATE_TO_TAIL); stat(s, FREE_ADD_PARTIAL); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return; slab_empty: @@ -2891,7 +2953,7 @@ slab_empty: remove_full(s, n, page); } - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); stat(s, FREE_SLAB); discard_slab(s, page); } @@ -3096,6 +3158,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p) { struct kmem_cache_cpu *c; + LIST_HEAD(to_free); int i; /* memcg and kmem_cache debug support */ @@ -3119,7 +3182,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, * of re-populating per CPU c->freelist */ p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, - _RET_IP_, c); + _RET_IP_, c, &to_free); if (unlikely(!p[i])) goto error; @@ -3131,6 +3194,7 @@ int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, } c->tid = next_tid(c->tid); local_irq_enable(); + free_delayed(&to_free); /* Clear memory outside IRQ disabled fastpath loop */ if (unlikely(flags & __GFP_ZERO)) { @@ -3278,7 +3342,7 @@ static void init_kmem_cache_node(struct kmem_cache_node *n) { n->nr_partial = 0; - spin_lock_init(&n->list_lock); + raw_spin_lock_init(&n->list_lock); INIT_LIST_HEAD(&n->partial); #ifdef CONFIG_SLUB_DEBUG atomic_long_set(&n->nr_slabs, 0); @@ -3622,6 +3686,10 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, const char *text) { #ifdef CONFIG_SLUB_DEBUG +#ifdef CONFIG_PREEMPT_RT_BASE + /* XXX move out of irq-off section */ + slab_err(s, page, text, s->name); +#else void *addr = page_address(page); void *p; unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) * @@ -3642,6 +3710,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, slab_unlock(page); kfree(map); #endif +#endif } /* @@ -3655,7 +3724,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) struct page *page, *h; BUG_ON(irqs_disabled()); - spin_lock_irq(&n->list_lock); + raw_spin_lock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &n->partial, lru) { if (!page->inuse) { remove_partial(n, page); @@ -3665,7 +3734,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n) "Objects remaining in %s on __kmem_cache_shutdown()"); } } - spin_unlock_irq(&n->list_lock); + raw_spin_unlock_irq(&n->list_lock); list_for_each_entry_safe(page, h, &discard, lru) discard_slab(s, page); @@ -3908,7 +3977,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = 0; i < SHRINK_PROMOTE_MAX; i++) INIT_LIST_HEAD(promote + i); - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); /* * Build lists of slabs to discard or promote. @@ -3939,7 +4008,7 @@ int __kmem_cache_shrink(struct kmem_cache *s) for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) list_splice(promote + i, &n->partial); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); /* Release empty slabs */ list_for_each_entry_safe(page, t, &discard, lru) @@ -4115,6 +4184,12 @@ void __init kmem_cache_init(void) { static __initdata struct kmem_cache boot_kmem_cache, boot_kmem_cache_node; + int cpu; + + for_each_possible_cpu(cpu) { + raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); + INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); + } if (debug_guardpage_minorder()) slub_max_order = 0; @@ -4323,7 +4398,7 @@ static int validate_slab_node(struct kmem_cache *s, struct page *page; unsigned long flags; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) { validate_slab_slab(s, page, map); @@ -4345,7 +4420,7 @@ static int validate_slab_node(struct kmem_cache *s, s->name, count, atomic_long_read(&n->nr_slabs)); out: - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); return count; } @@ -4533,12 +4608,12 @@ static int list_locations(struct kmem_cache *s, char *buf, if (!atomic_long_read(&n->nr_slabs)) continue; - spin_lock_irqsave(&n->list_lock, flags); + raw_spin_lock_irqsave(&n->list_lock, flags); list_for_each_entry(page, &n->partial, lru) process_slab(&t, s, page, alloc, map); list_for_each_entry(page, &n->full, lru) process_slab(&t, s, page, alloc, map); - spin_unlock_irqrestore(&n->list_lock, flags); + raw_spin_unlock_irqrestore(&n->list_lock, flags); } for (i = 0; i < t.count; i++) { diff --git a/mm/swap.c b/mm/swap.c index 4dcf852e1e6d..69c3a5b24060 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -32,6 +32,7 @@ #include <linux/memcontrol.h> #include <linux/gfp.h> #include <linux/uio.h> +#include <linux/locallock.h> #include <linux/hugetlb.h> #include <linux/page_idle.h> @@ -50,6 +51,8 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); #endif +static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); +DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); /* * This path almost never happens for VM activity - pages are normally @@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page *page) unsigned long flags; get_page(page); - local_irq_save(flags); + local_lock_irqsave(rotate_lock, flags); pvec = this_cpu_ptr(&lru_rotate_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore(rotate_lock, flags); } } @@ -294,12 +297,13 @@ void activate_page(struct page *page) { page = compound_head(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + activate_page_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); - put_cpu_var(activate_page_pvecs); + put_locked_var(swapvec_lock, activate_page_pvecs); } } @@ -326,7 +330,7 @@ void activate_page(struct page *page) static void __lru_cache_activate_page(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); int i; /* @@ -348,7 +352,7 @@ static void __lru_cache_activate_page(struct page *page) } } - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /* @@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed); static void __lru_cache_add(struct page *page) { - struct pagevec *pvec = &get_cpu_var(lru_add_pvec); + struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) __pagevec_lru_add(pvec); - put_cpu_var(lru_add_pvec); + put_locked_var(swapvec_lock, lru_add_pvec); } /** @@ -593,9 +597,15 @@ void lru_add_drain_cpu(int cpu) unsigned long flags; /* No harm done if a racing interrupt already did this */ - local_irq_save(flags); +#ifdef CONFIG_PREEMPT_RT_BASE + local_lock_irqsave_on(rotate_lock, flags, cpu); pagevec_move_tail(pvec); - local_irq_restore(flags); + local_unlock_irqrestore_on(rotate_lock, flags, cpu); +#else + local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); + local_unlock_irqrestore(rotate_lock, flags); +#endif } pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); @@ -627,11 +637,12 @@ void deactivate_file_page(struct page *page) return; if (likely(get_page_unless_zero(page))) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_file_pvecs); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); - put_cpu_var(lru_deactivate_file_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); } } @@ -646,27 +657,31 @@ void deactivate_file_page(struct page *page) void deactivate_page(struct page *page) { if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { - struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); + struct pagevec *pvec = &get_locked_var(swapvec_lock, + lru_deactivate_pvecs); get_page(page); if (!pagevec_add(pvec, page) || PageCompound(page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); - put_cpu_var(lru_deactivate_pvecs); + put_locked_var(swapvec_lock, lru_deactivate_pvecs); } } void lru_add_drain(void) { - lru_add_drain_cpu(get_cpu()); - put_cpu(); + lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); + local_unlock_cpu(swapvec_lock); } -static void lru_add_drain_per_cpu(struct work_struct *dummy) +#ifdef CONFIG_PREEMPT_RT_BASE +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) { - lru_add_drain(); + local_lock_on(swapvec_lock, cpu); + lru_add_drain_cpu(cpu); + local_unlock_on(swapvec_lock, cpu); } -static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); +#else /* * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM @@ -686,6 +701,22 @@ static int __init lru_init(void) } early_initcall(lru_init); +static void lru_add_drain_per_cpu(struct work_struct *dummy) +{ + lru_add_drain(); +} + +static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); +static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work) +{ + struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); + + INIT_WORK(work, lru_add_drain_per_cpu); + queue_work_on(cpu, lru_add_drain_wq, work); + cpumask_set_cpu(cpu, has_work); +} +#endif + void lru_add_drain_all(void) { static DEFINE_MUTEX(lock); @@ -697,21 +728,18 @@ void lru_add_drain_all(void) cpumask_clear(&has_work); for_each_online_cpu(cpu) { - struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); - if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || - need_activate_page_drain(cpu)) { - INIT_WORK(work, lru_add_drain_per_cpu); - queue_work_on(cpu, lru_add_drain_wq, work); - cpumask_set_cpu(cpu, &has_work); - } + need_activate_page_drain(cpu)) + remote_lru_add_drain(cpu, &has_work); } +#ifndef CONFIG_PREEMPT_RT_BASE for_each_cpu(cpu, &has_work) flush_work(&per_cpu(lru_add_drain_work, cpu)); +#endif put_online_cpus(); mutex_unlock(&lock); diff --git a/mm/truncate.c b/mm/truncate.c index 9c809e7d73c3..b7681e888ba0 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -62,9 +62,12 @@ static void clear_exceptional_entry(struct address_space *mapping, * protected by mapping->tree_lock. */ if (!workingset_node_shadows(node) && - !list_empty(&node->private_list)) - list_lru_del(&workingset_shadow_nodes, + !list_empty(&node->private_list)) { + local_lock(workingset_shadow_lock); + list_lru_del(&__workingset_shadow_nodes, &node->private_list); + local_unlock(workingset_shadow_lock); + } __radix_tree_delete_node(&mapping->page_tree, node); unlock: spin_unlock_irq(&mapping->tree_lock); diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 195de42bea1f..b46cb686fde7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) struct vmap_block *vb; struct vmap_area *va; unsigned long vb_idx; - int node, err; + int node, err, cpu; void *vaddr; node = numa_node_id(); @@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int order, gfp_t gfp_mask) BUG_ON(err); radix_tree_preload_end(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = this_cpu_ptr(&vmap_block_queue); spin_lock(&vbq->lock); list_add_tail_rcu(&vb->free_list, &vbq->free); spin_unlock(&vbq->lock); - put_cpu_var(vmap_block_queue); + put_cpu_light(); return vaddr; } @@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) struct vmap_block *vb; void *vaddr = NULL; unsigned int order; + int cpu; BUG_ON(offset_in_page(size)); BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); @@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) order = get_order(size); rcu_read_lock(); - vbq = &get_cpu_var(vmap_block_queue); + cpu = get_cpu_light(); + vbq = this_cpu_ptr(&vmap_block_queue); list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; @@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) break; } - put_cpu_var(vmap_block_queue); + put_cpu_light(); rcu_read_unlock(); /* Allocate new block if nothing was found */ diff --git a/mm/vmstat.c b/mm/vmstat.c index 604f26a4f696..312006d2db50 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *zone, enum zone_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_zone_page_state); @@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, long x; long t; + preempt_disable_rt(); x = delta + __this_cpu_read(*p); t = __this_cpu_read(pcp->stat_threshold); @@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, x = 0; } __this_cpu_write(*p, x); + preempt_enable_rt(); } EXPORT_SYMBOL(__mod_node_page_state); @@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v + overstep, zone, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) @@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_inc_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v > t)) { @@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item) node_page_state_add(v + overstep, pgdat, item); __this_cpu_write(*p, -overstep); } + preempt_enable_rt(); } void __inc_zone_page_state(struct page *page, enum zone_stat_item item) @@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) s8 __percpu *p = pcp->vm_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone, enum zone_stat_item item) zone_page_state_add(v - overstep, zone, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) @@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) s8 __percpu *p = pcp->vm_node_stat_diff + item; s8 v, t; + preempt_disable_rt(); v = __this_cpu_dec_return(*p); t = __this_cpu_read(pcp->stat_threshold); if (unlikely(v < - t)) { @@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item) node_page_state_add(v - overstep, pgdat, item); __this_cpu_write(*p, overstep); } + preempt_enable_rt(); } void __dec_zone_page_state(struct page *page, enum zone_stat_item item) diff --git a/mm/workingset.c b/mm/workingset.c index 4c4f05655e6e..b97b1e87b54c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -334,7 +334,8 @@ out: * point where they would still be useful. */ -struct list_lru workingset_shadow_nodes; +struct list_lru __workingset_shadow_nodes; +DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); static unsigned long count_shadow_nodes(struct shrinker *shrinker, struct shrink_control *sc) @@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker, unsigned long pages; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ - local_irq_disable(); - shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); - local_irq_enable(); + local_lock_irq(workingset_shadow_lock); + shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc); + local_unlock_irq(workingset_shadow_lock); if (sc->memcg) { pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid, @@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolate(struct list_head *item, spin_unlock(&mapping->tree_lock); ret = LRU_REMOVED_RETRY; out: - local_irq_enable(); + local_unlock_irq(workingset_shadow_lock); cond_resched(); - local_irq_disable(); + local_lock_irq(workingset_shadow_lock); spin_lock(lru_lock); return ret; } @@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(struct shrinker *shrinker, unsigned long ret; /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ - local_irq_disable(); - ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, + local_lock_irq(workingset_shadow_lock); + ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc, shadow_lru_isolate, NULL); - local_irq_enable(); + local_unlock_irq(workingset_shadow_lock); return ret; } @@ -492,7 +493,7 @@ static int __init workingset_init(void) pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n", timestamp_bits, max_order, bucket_order); - ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key); + ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key); if (ret) goto err; ret = register_shrinker(&workingset_shadow_shrinker); @@ -500,7 +501,7 @@ static int __init workingset_init(void) goto err_list_lru; return 0; err_list_lru: - list_lru_destroy(&workingset_shadow_nodes); + list_lru_destroy(&__workingset_shadow_nodes); err: return ret; } diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 1689bb58e0d1..e52a8cb6aa5a 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -53,6 +53,7 @@ #include <linux/mount.h> #include <linux/migrate.h> #include <linux/pagemap.h> +#include <linux/locallock.h> #define ZSPAGE_MAGIC 0x58 @@ -70,9 +71,22 @@ */ #define ZS_MAX_ZSPAGE_ORDER 2 #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER) - #define ZS_HANDLE_SIZE (sizeof(unsigned long)) +#ifdef CONFIG_PREEMPT_RT_FULL + +struct zsmalloc_handle { + unsigned long addr; + struct mutex lock; +}; + +#define ZS_HANDLE_ALLOC_SIZE (sizeof(struct zsmalloc_handle)) + +#else + +#define ZS_HANDLE_ALLOC_SIZE (sizeof(unsigned long)) +#endif + /* * Object location (<PFN>, <obj_idx>) is encoded as * as single (unsigned long) handle value. @@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {} static int create_cache(struct zs_pool *pool) { - pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE, + pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_ALLOC_SIZE, 0, 0, NULL); if (!pool->handle_cachep) return 1; @@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool *pool) static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp) { - return (unsigned long)kmem_cache_alloc(pool->handle_cachep, - gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); + void *p; + + p = kmem_cache_alloc(pool->handle_cachep, + gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE)); +#ifdef CONFIG_PREEMPT_RT_FULL + if (p) { + struct zsmalloc_handle *zh = p; + + mutex_init(&zh->lock); + } +#endif + return (unsigned long)p; } +#ifdef CONFIG_PREEMPT_RT_FULL +static struct zsmalloc_handle *zs_get_pure_handle(unsigned long handle) +{ + return (void *)(handle &~((1 << OBJ_TAG_BITS) - 1)); +} +#endif + static void cache_free_handle(struct zs_pool *pool, unsigned long handle) { kmem_cache_free(pool->handle_cachep, (void *)handle); @@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage) static void record_obj(unsigned long handle, unsigned long obj) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + WRITE_ONCE(zh->addr, obj); +#else /* * lsb of @obj represents handle lock while other bits * represent object value the handle is pointing so * updating shouldn't do store tearing. */ WRITE_ONCE(*(unsigned long *)handle, obj); +#endif } /* zpool driver */ @@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc"); /* per-cpu VM mapping areas for zspage accesses that cross page boundaries */ static DEFINE_PER_CPU(struct mapping_area, zs_map_area); +static DEFINE_LOCAL_IRQ_LOCK(zs_map_area_lock); static bool is_zspage_isolated(struct zspage *zspage) { @@ -902,7 +940,13 @@ static unsigned long location_to_obj(struct page *page, unsigned int obj_idx) static unsigned long handle_to_obj(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return zh->addr; +#else return *(unsigned long *)handle; +#endif } static unsigned long obj_to_head(struct page *page, void *obj) @@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct page *page, void *obj) static inline int testpin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_is_locked(&zh->lock); +#else return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static inline int trypin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_trylock(&zh->lock); +#else return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void pin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_lock(&zh->lock); +#else bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void unpin_tag(unsigned long handle) { +#ifdef CONFIG_PREEMPT_RT_FULL + struct zsmalloc_handle *zh = zs_get_pure_handle(handle); + + return mutex_unlock(&zh->lock); +#else bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle); +#endif } static void reset_page(struct page *page) @@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, class = pool->size_class[class_idx]; off = (class->size * obj_idx) & ~PAGE_MASK; - area = &get_cpu_var(zs_map_area); + area = &get_locked_var(zs_map_area_lock, zs_map_area); area->vm_mm = mm; if (off + class->size <= PAGE_SIZE) { /* this object is contained entirely within a page */ @@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) __zs_unmap_object(area, pages, off, class->size); } - put_cpu_var(zs_map_area); + put_locked_var(zs_map_area_lock, zs_map_area); migrate_read_unlock(zspage); unpin_tag(handle); diff --git a/net/core/dev.c b/net/core/dev.c index 7f2caad46a3d..cfed44b8e175 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPUS; static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8); static seqcount_t devnet_rename_seq; +static DEFINE_MUTEX(devnet_rename_mutex); static inline void dev_base_seq_inc(struct net *net) { @@ -211,14 +212,14 @@ static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex) static inline void rps_lock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_lock(&sd->input_pkt_queue.lock); + raw_spin_lock(&sd->input_pkt_queue.raw_lock); #endif } static inline void rps_unlock(struct softnet_data *sd) { #ifdef CONFIG_RPS - spin_unlock(&sd->input_pkt_queue.lock); + raw_spin_unlock(&sd->input_pkt_queue.raw_lock); #endif } @@ -888,7 +889,8 @@ retry: strcpy(name, dev->name); rcu_read_unlock(); if (read_seqcount_retry(&devnet_rename_seq, seq)) { - cond_resched(); + mutex_lock(&devnet_rename_mutex); + mutex_unlock(&devnet_rename_mutex); goto retry; } @@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *dev, const char *newname) if (dev->flags & IFF_UP) return -EBUSY; - write_seqcount_begin(&devnet_rename_seq); + mutex_lock(&devnet_rename_mutex); + __raw_write_seqcount_begin(&devnet_rename_seq); - if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { - write_seqcount_end(&devnet_rename_seq); - return 0; - } + if (strncmp(newname, dev->name, IFNAMSIZ) == 0) + goto outunlock; memcpy(oldname, dev->name, IFNAMSIZ); err = dev_get_valid_name(net, dev, newname); - if (err < 0) { - write_seqcount_end(&devnet_rename_seq); - return err; - } + if (err < 0) + goto outunlock; if (oldname[0] && !strchr(oldname, '%')) netdev_info(dev, "renamed from %s\n", oldname); @@ -1183,11 +1182,12 @@ rollback: if (ret) { memcpy(dev->name, oldname, IFNAMSIZ); dev->name_assign_type = old_assign_type; - write_seqcount_end(&devnet_rename_seq); - return ret; + err = ret; + goto outunlock; } - write_seqcount_end(&devnet_rename_seq); + __raw_write_seqcount_end(&devnet_rename_seq); + mutex_unlock(&devnet_rename_mutex); netdev_adjacent_rename_links(dev, oldname); @@ -1208,7 +1208,8 @@ rollback: /* err >= 0 after dev_alloc_name() or stores the first errno */ if (err >= 0) { err = ret; - write_seqcount_begin(&devnet_rename_seq); + mutex_lock(&devnet_rename_mutex); + __raw_write_seqcount_begin(&devnet_rename_seq); memcpy(dev->name, oldname, IFNAMSIZ); memcpy(oldname, newname, IFNAMSIZ); dev->name_assign_type = old_assign_type; @@ -1221,6 +1222,11 @@ rollback: } return err; + +outunlock: + __raw_write_seqcount_end(&devnet_rename_seq); + mutex_unlock(&devnet_rename_mutex); + return err; } /** @@ -2286,6 +2292,7 @@ static void __netif_reschedule(struct Qdisc *q) sd->output_queue_tailp = &q->next_sched; raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } void __netif_schedule(struct Qdisc *q) @@ -2370,6 +2377,7 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason) __this_cpu_write(softnet_data.completion_queue, skb); raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__dev_kfree_skb_irq); @@ -3111,7 +3119,11 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, * This permits qdisc->running owner to get the lock more * often and dequeue packets faster. */ +#ifdef CONFIG_PREEMPT_RT_FULL + contended = true; +#else contended = qdisc_is_running(q); +#endif if (unlikely(contended)) spin_lock(&q->busylock); @@ -3174,8 +3186,10 @@ static void skb_update_prio(struct sk_buff *skb) #define skb_update_prio(skb) #endif +#ifndef CONFIG_PREEMPT_RT_FULL DEFINE_PER_CPU(int, xmit_recursion); EXPORT_SYMBOL(xmit_recursion); +#endif /** * dev_loopback_xmit - loop back @skb @@ -3409,8 +3423,7 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) int cpu = smp_processor_id(); /* ok because BHs are off */ if (txq->xmit_lock_owner != cpu) { - if (unlikely(__this_cpu_read(xmit_recursion) > - XMIT_RECURSION_LIMIT)) + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) goto recursion_alert; skb = validate_xmit_skb(skb, dev); @@ -3420,9 +3433,9 @@ static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv) HARD_TX_LOCK(dev, txq, cpu); if (!netif_xmit_stopped(txq)) { - __this_cpu_inc(xmit_recursion); + xmit_rec_inc(); skb = dev_hard_start_xmit(skb, dev, txq, &rc); - __this_cpu_dec(xmit_recursion); + xmit_rec_dec(); if (dev_xmit_complete(rc)) { HARD_TX_UNLOCK(dev, txq); goto out; @@ -3796,6 +3809,7 @@ drop: rps_unlock(sd); local_irq_restore(flags); + preempt_check_resched_rt(); atomic_long_inc(&skb->dev->rx_dropped); kfree_skb(skb); @@ -3814,7 +3828,7 @@ static int netif_rx_internal(struct sk_buff *skb) struct rps_dev_flow voidflow, *rflow = &voidflow; int cpu; - preempt_disable(); + migrate_disable(); rcu_read_lock(); cpu = get_rps_cpu(skb->dev, skb, &rflow); @@ -3824,13 +3838,13 @@ static int netif_rx_internal(struct sk_buff *skb) ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); rcu_read_unlock(); - preempt_enable(); + migrate_enable(); } else #endif { unsigned int qtail; - ret = enqueue_to_backlog(skb, get_cpu(), &qtail); - put_cpu(); + ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); + put_cpu_light(); } return ret; } @@ -3864,11 +3878,9 @@ int netif_rx_ni(struct sk_buff *skb) trace_netif_rx_ni_entry(skb); - preempt_disable(); + local_bh_disable(); err = netif_rx_internal(skb); - if (local_softirq_pending()) - do_softirq(); - preempt_enable(); + local_bh_enable(); return err; } @@ -4347,7 +4359,7 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->input_pkt_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } @@ -4357,11 +4369,14 @@ static void flush_backlog(struct work_struct *work) skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); - kfree_skb(skb); + __skb_queue_tail(&sd->tofree_queue, skb); input_queue_head_incr(sd); } } + if (!skb_queue_empty(&sd->tofree_queue)) + raise_softirq_irqoff(NET_RX_SOFTIRQ); local_bh_enable(); + } static void flush_all_backlogs(void) @@ -4852,6 +4867,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) sd->rps_ipi_list = NULL; local_irq_enable(); + preempt_check_resched_rt(); /* Send pending IPI's to kick RPS processing on remote cpus. */ while (remsd) { @@ -4865,6 +4881,7 @@ static void net_rps_action_and_irq_enable(struct softnet_data *sd) } else #endif local_irq_enable(); + preempt_check_resched_rt(); } static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) @@ -4894,7 +4911,9 @@ static int process_backlog(struct napi_struct *napi, int quota) while (again) { struct sk_buff *skb; + local_irq_disable(); while ((skb = __skb_dequeue(&sd->process_queue))) { + local_irq_enable(); rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); @@ -4902,9 +4921,9 @@ static int process_backlog(struct napi_struct *napi, int quota) if (++work >= quota) return work; + local_irq_disable(); } - local_irq_disable(); rps_lock(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { /* @@ -4942,9 +4961,11 @@ void __napi_schedule(struct napi_struct *n) local_irq_save(flags); ____napi_schedule(this_cpu_ptr(&softnet_data), n); local_irq_restore(flags); + preempt_check_resched_rt(); } EXPORT_SYMBOL(__napi_schedule); +#ifndef CONFIG_PREEMPT_RT_FULL /** * __napi_schedule_irqoff - schedule for receive * @n: entry to schedule @@ -4956,6 +4977,7 @@ void __napi_schedule_irqoff(struct napi_struct *n) ____napi_schedule(this_cpu_ptr(&softnet_data), n); } EXPORT_SYMBOL(__napi_schedule_irqoff); +#endif void __napi_complete(struct napi_struct *n) { @@ -5245,13 +5267,21 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + 2; int budget = netdev_budget; + struct sk_buff_head tofree_q; + struct sk_buff *skb; LIST_HEAD(list); LIST_HEAD(repoll); + __skb_queue_head_init(&tofree_q); + local_irq_disable(); + skb_queue_splice_init(&sd->tofree_queue, &tofree_q); list_splice_init(&sd->poll_list, &list); local_irq_enable(); + while ((skb = __skb_dequeue(&tofree_q))) + kfree_skb(skb); + for (;;) { struct napi_struct *n; @@ -5282,7 +5312,7 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) list_splice_tail(&repoll, &list); list_splice(&list, &sd->poll_list); if (!list_empty(&sd->poll_list)) - __raise_softirq_irqoff(NET_RX_SOFTIRQ); + __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); net_rps_action_and_irq_enable(sd); } @@ -8044,16 +8074,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, raise_softirq_irqoff(NET_TX_SOFTIRQ); local_irq_enable(); + preempt_check_resched_rt(); /* Process offline CPU's input_pkt_queue */ while ((skb = __skb_dequeue(&oldsd->process_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } - while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_ni(skb); input_queue_head_incr(oldsd); } + while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { + kfree_skb(skb); + } return NOTIFY_OK; } @@ -8358,8 +8392,9 @@ static int __init net_dev_init(void) INIT_WORK(flush, flush_backlog); - skb_queue_head_init(&sd->input_pkt_queue); - skb_queue_head_init(&sd->process_queue); + skb_queue_head_init_raw(&sd->input_pkt_queue); + skb_queue_head_init_raw(&sd->process_queue); + skb_queue_head_init_raw(&sd->tofree_queue); INIT_LIST_HEAD(&sd->poll_list); sd->output_queue_tailp = &sd->output_queue; #ifdef CONFIG_RPS diff --git a/net/core/filter.c b/net/core/filter.c index 4eb4ce0aeef4..4f09d6a57217 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) { int ret; - if (unlikely(__this_cpu_read(xmit_recursion) > XMIT_RECURSION_LIMIT)) { + if (unlikely(xmit_rec_read() > XMIT_RECURSION_LIMIT)) { net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n"); kfree_skb(skb); return -ENETDOWN; @@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct net_device *dev, struct sk_buff *skb) skb->dev = dev; - __this_cpu_inc(xmit_recursion); + xmit_rec_inc(); ret = dev_queue_xmit(skb); - __this_cpu_dec(xmit_recursion); + xmit_rec_dec(); return ret; } diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cad8e791f28e..2a9364fe62a5 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -84,7 +84,7 @@ struct gen_estimator struct gnet_stats_basic_packed *bstats; struct gnet_stats_rate_est64 *rate_est; spinlock_t *stats_lock; - seqcount_t *running; + net_seqlock_t *running; int ewma_log; u32 last_packets; unsigned long avpps; @@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, - seqcount_t *running, + net_seqlock_t *running, struct nlattr *opt) { struct gen_estimator *est; @@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu_bstats, struct gnet_stats_rate_est64 *rate_est, spinlock_t *stats_lock, - seqcount_t *running, struct nlattr *opt) + net_seqlock_t *running, struct nlattr *opt) { gen_kill_estimator(bstats, rate_est); return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt); diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c index 508e051304fb..bc3b17b78c94 100644 --- a/net/core/gen_stats.c +++ b/net/core/gen_stats.c @@ -130,7 +130,7 @@ __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats, } void -__gnet_stats_copy_basic(const seqcount_t *running, +__gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_stats_basic_packed *bstats, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) @@ -143,10 +143,10 @@ __gnet_stats_copy_basic(const seqcount_t *running, } do { if (running) - seq = read_seqcount_begin(running); + seq = net_seq_begin(running); bstats->bytes = b->bytes; bstats->packets = b->packets; - } while (running && read_seqcount_retry(running, seq)); + } while (running && net_seq_retry(running, seq)); } EXPORT_SYMBOL(__gnet_stats_copy_basic); @@ -164,7 +164,7 @@ EXPORT_SYMBOL(__gnet_stats_copy_basic); * if the room in the socket buffer was not sufficient. */ int -gnet_stats_copy_basic(const seqcount_t *running, +gnet_stats_copy_basic(net_seqlock_t *running, struct gnet_dump *d, struct gnet_stats_basic_cpu __percpu *cpu, struct gnet_stats_basic_packed *b) diff --git a/net/core/skbuff.c b/net/core/skbuff.c index fe008f1bd930..9fa6bea3dd3f 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -64,6 +64,7 @@ #include <linux/errqueue.h> #include <linux/prefetch.h> #include <linux/if_vlan.h> +#include <linux/locallock.h> #include <net/protocol.h> #include <net/dst.h> @@ -360,6 +361,8 @@ struct napi_alloc_cache { static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); +static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); +static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { @@ -367,10 +370,10 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) unsigned long flags; void *data; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = __alloc_page_frag(nc, fragsz, gfp_mask); - local_irq_restore(flags); + local_unlock_irqrestore(netdev_alloc_lock, flags); return data; } @@ -389,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag); static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; + void *data; - return __alloc_page_frag(&nc->page, fragsz, gfp_mask); + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + data = __alloc_page_frag(&nc->page, fragsz, gfp_mask); + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + return data; } void *napi_alloc_frag(unsigned int fragsz) @@ -438,13 +445,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; - local_irq_save(flags); + local_lock_irqsave(netdev_alloc_lock, flags); nc = this_cpu_ptr(&netdev_alloc_cache); data = __alloc_page_frag(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; - local_irq_restore(flags); + local_unlock_irqrestore(netdev_alloc_lock, flags); if (unlikely(!data)) return NULL; @@ -485,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb); struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, gfp_t gfp_mask) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; struct sk_buff *skb; void *data; + bool pfmemalloc; len += NET_SKB_PAD + NET_IP_ALIGN; @@ -505,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); data = __alloc_page_frag(&nc->page, len, gfp_mask); + pfmemalloc = nc->page.pfmemalloc; + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); if (unlikely(!data)) return NULL; @@ -516,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, } /* use OR instead of assignment to avoid clearing of bits in mask */ - if (nc->page.pfmemalloc) + if (pfmemalloc) skb->pfmemalloc = 1; skb->head_frag = 1; @@ -760,23 +771,26 @@ EXPORT_SYMBOL(consume_skb); void __kfree_skb_flush(void) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); /* flush skb_cache if containing objects */ if (nc->skb_count) { kmem_cache_free_bulk(skbuff_head_cache, nc->skb_count, nc->skb_cache); nc->skb_count = 0; } + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); } static inline void _kfree_skb_defer(struct sk_buff *skb) { - struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + struct napi_alloc_cache *nc; /* drop skb->head and call any destructors for packet */ skb_release_all(skb); + nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); /* record skb to CPU local list */ nc->skb_cache[nc->skb_count++] = skb; @@ -791,6 +805,7 @@ static inline void _kfree_skb_defer(struct sk_buff *skb) nc->skb_cache); nc->skb_count = 0; } + put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); } void __kfree_skb_defer(struct sk_buff *skb) { diff --git a/net/core/sock.c b/net/core/sock.c index 231c38d91855..d8e0aed19903 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2494,12 +2494,11 @@ void lock_sock_nested(struct sock *sk, int subclass) if (sk->sk_lock.owned) __lock_sock(sk); sk->sk_lock.owned = 1; - spin_unlock(&sk->sk_lock.slock); + spin_unlock_bh(&sk->sk_lock.slock); /* * The sk_lock has mutex_lock() semantics here: */ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); - local_bh_enable(); } EXPORT_SYMBOL(lock_sock_nested); diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index 48734ee6293f..e6864ff11352 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -69,6 +69,7 @@ #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/fcntl.h> +#include <linux/sysrq.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/inet.h> @@ -77,6 +78,7 @@ #include <linux/string.h> #include <linux/netfilter_ipv4.h> #include <linux/slab.h> +#include <linux/locallock.h> #include <net/snmp.h> #include <net/ip.h> #include <net/route.h> @@ -204,6 +206,8 @@ static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1]; * * On SMP we have one ICMP socket per-cpu. */ +static DEFINE_LOCAL_IRQ_LOCK(icmp_sk_lock); + static struct sock *icmp_sk(struct net *net) { return *this_cpu_ptr(net->ipv4.icmp_sk); @@ -215,12 +219,14 @@ static inline struct sock *icmp_xmit_lock(struct net *net) local_bh_disable(); + local_lock(icmp_sk_lock); sk = icmp_sk(net); if (unlikely(!spin_trylock(&sk->sk_lock.slock))) { /* This can happen if the output path signals a * dst_link_failure() for an outgoing ICMP packet. */ + local_unlock(icmp_sk_lock); local_bh_enable(); return NULL; } @@ -230,6 +236,7 @@ static inline struct sock *icmp_xmit_lock(struct net *net) static inline void icmp_xmit_unlock(struct sock *sk) { spin_unlock_bh(&sk->sk_lock.slock); + local_unlock(icmp_sk_lock); } int sysctl_icmp_msgs_per_sec __read_mostly = 1000; @@ -358,6 +365,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, struct sock *sk; struct sk_buff *skb; + local_lock(icmp_sk_lock); sk = icmp_sk(dev_net((*rt)->dst.dev)); if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param, icmp_param->data_len+icmp_param->head_len, @@ -380,6 +388,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param, skb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk, fl4); } + local_unlock(icmp_sk_lock); } /* @@ -891,6 +900,30 @@ static bool icmp_redirect(struct sk_buff *skb) } /* + * 32bit and 64bit have different timestamp length, so we check for + * the cookie at offset 20 and verify it is repeated at offset 50 + */ +#define CO_POS0 20 +#define CO_POS1 50 +#define CO_SIZE sizeof(int) +#define ICMP_SYSRQ_SIZE 57 + +/* + * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie + * pattern and if it matches send the next byte as a trigger to sysrq. + */ +static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) +{ + int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); + char *p = skb->data; + + if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && + !memcmp(&cookie, p + CO_POS1, CO_SIZE) && + p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) + handle_sysrq(p[CO_POS0 + CO_SIZE]); +} + +/* * Handle ICMP_ECHO ("ping") requests. * * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo @@ -917,6 +950,11 @@ static bool icmp_echo(struct sk_buff *skb) icmp_param.data_len = skb->len; icmp_param.head_len = sizeof(struct icmphdr); icmp_reply(&icmp_param, skb); + + if (skb->len == ICMP_SYSRQ_SIZE && + net->ipv4.sysctl_icmp_echo_sysrq) { + icmp_check_sysrq(net, skb); + } } /* should there be an ICMP stat for ignored echos? */ return true; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 566cfc50f7cf..4b8551d78a3b 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dointvec }, { + .procname = "icmp_echo_sysrq", + .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec + }, + { .procname = "icmp_ignore_bogus_error_responses", .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, .maxlen = sizeof(int), diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 6988566dc72f..672fffcde28c 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -62,6 +62,7 @@ #include <linux/init.h> #include <linux/times.h> #include <linux/slab.h> +#include <linux/locallock.h> #include <net/net_namespace.h> #include <net/icmp.h> @@ -568,6 +569,7 @@ void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) } EXPORT_SYMBOL(tcp_v4_send_check); +static DEFINE_LOCAL_IRQ_LOCK(tcp_sk_lock); /* * This routine will send an RST to the other tcp. * @@ -695,6 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; + + local_lock(tcp_sk_lock); local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, @@ -704,6 +708,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); local_bh_enable(); + local_unlock(tcp_sk_lock); #ifdef CONFIG_TCP_MD5SIG out: @@ -779,6 +784,7 @@ static void tcp_v4_send_ack(struct net *net, if (oif) arg.bound_dev_if = oif; arg.tos = tos; + local_lock(tcp_sk_lock); local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, @@ -787,6 +793,7 @@ static void tcp_v4_send_ack(struct net *net, __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_bh_enable(); + local_unlock(tcp_sk_lock); } static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 439e597fd374..ca0daeaff370 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -4229,7 +4229,7 @@ void ieee80211_rx_napi(struct ieee80211_hw *hw, struct ieee80211_sta *pubsta, struct ieee80211_supported_band *sband; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - WARN_ON_ONCE(softirq_count() == 0); + WARN_ON_ONCE_NONRT(softirq_count() == 0); if (WARN_ON(status->band >= NUM_NL80211_BANDS)) goto drop; diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 004af030ef1a..b64f751bda45 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c @@ -22,12 +22,18 @@ #include <linux/proc_fs.h> #include <linux/mutex.h> #include <linux/slab.h> +#include <linux/locallock.h> #include <linux/rcupdate.h> #include <net/net_namespace.h> #include <net/sock.h> #include "nf_internals.h" +#ifdef CONFIG_PREEMPT_RT_BASE +DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); +EXPORT_PER_CPU_SYMBOL(xt_write_lock); +#endif + static DEFINE_MUTEX(afinfo_mutex); const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index b17f9097c6fe..dd181bdadfb8 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -63,6 +63,7 @@ #include <linux/if_packet.h> #include <linux/wireless.h> #include <linux/kernel.h> +#include <linux/delay.h> #include <linux/kmod.h> #include <linux/slab.h> #include <linux/vmalloc.h> @@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expired(unsigned long data) if (BLOCK_NUM_PKTS(pbd)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } @@ -956,7 +957,7 @@ static void prb_retire_current_block(struct tpacket_kbdq_core *pkc, if (!(status & TP_STATUS_BLK_TMO)) { while (atomic_read(&pkc->blk_fill_in_prog)) { /* Waiting for skb_copy_bits to finish... */ - cpu_relax(); + cpu_chill(); } } prb_close_block(pkc, pbd, po, status); diff --git a/net/rds/ib_rdma.c b/net/rds/ib_rdma.c index 977f69886c00..f3e7a36b0396 100644 --- a/net/rds/ib_rdma.c +++ b/net/rds/ib_rdma.c @@ -34,6 +34,7 @@ #include <linux/slab.h> #include <linux/rculist.h> #include <linux/llist.h> +#include <linux/delay.h> #include "rds_single_path.h" #include "ib_mr.h" @@ -210,7 +211,7 @@ static inline void wait_clean_list_grace(void) for_each_online_cpu(cpu) { flag = &per_cpu(clean_list_grace, cpu); while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) - cpu_relax(); + cpu_chill(); } } diff --git a/net/rxrpc/security.c b/net/rxrpc/security.c index 7d921e56e715..13df56a738e5 100644 --- a/net/rxrpc/security.c +++ b/net/rxrpc/security.c @@ -19,9 +19,6 @@ #include <keys/rxrpc-type.h> #include "ar-internal.h" -static LIST_HEAD(rxrpc_security_methods); -static DECLARE_RWSEM(rxrpc_security_sem); - static const struct rxrpc_security *rxrpc_security_types[] = { [RXRPC_SECURITY_NONE] = &rxrpc_no_security, #ifdef CONFIG_RXKAD diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 195a3b2d9afc..7bf29e3fd71c 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -978,7 +978,7 @@ static struct Qdisc *qdisc_create(struct net_device *dev, rcu_assign_pointer(sch->stab, stab); } if (tca[TCA_RATE]) { - seqcount_t *running; + net_seqlock_t *running; err = -EOPNOTSUPP; if (sch->flags & TCQ_F_MQROOT) diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 9016c8baf2aa..d925f0e63679 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = { .ops = &noop_qdisc_ops, .q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock), .dev_queue = &noop_netdev_queue, +#ifdef CONFIG_PREEMPT_RT_BASE + .running = __SEQLOCK_UNLOCKED(noop_qdisc.running), +#else .running = SEQCNT_ZERO(noop_qdisc.running), +#endif .busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock), }; EXPORT_SYMBOL(noop_qdisc); @@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, lockdep_set_class(&sch->busylock, dev->qdisc_tx_busylock ?: &qdisc_tx_busylock); +#ifdef CONFIG_PREEMPT_RT_BASE + seqlock_init(&sch->running); + lockdep_set_class(&sch->running.seqcount, + dev->qdisc_running_key ?: &qdisc_running_key); + lockdep_set_class(&sch->running.lock, + dev->qdisc_running_key ?: &qdisc_running_key); +#else seqcount_init(&sch->running); lockdep_set_class(&sch->running, dev->qdisc_running_key ?: &qdisc_running_key); +#endif sch->ops = ops; sch->enqueue = ops->enqueue; @@ -926,7 +938,7 @@ void dev_deactivate_many(struct list_head *head) /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) while (some_qdisc_is_busy(dev)) - yield(); + msleep(1); } void dev_deactivate(struct net_device *dev) diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 9c9db55a0c1e..e6583b018a72 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c @@ -396,7 +396,7 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) goto out; } - cpu = get_cpu(); + cpu = get_cpu_light(); pool = svc_pool_for_cpu(xprt->xpt_server, cpu); atomic_long_inc(&pool->sp_stats.packets); @@ -432,7 +432,7 @@ redo_search: atomic_long_inc(&pool->sp_stats.threads_woken); wake_up_process(rqstp->rq_task); - put_cpu(); + put_cpu_light(); goto out; } rcu_read_unlock(); @@ -453,7 +453,7 @@ redo_search: goto redo_search; } rqstp = NULL; - put_cpu(); + put_cpu_light(); out: trace_svc_xprt_do_enqueue(xprt, rqstp); } diff --git a/scripts/mkcompile_h b/scripts/mkcompile_h index 6fdc97ef6023..523e0420d7f0 100755 --- a/scripts/mkcompile_h +++ b/scripts/mkcompile_h @@ -4,7 +4,8 @@ TARGET=$1 ARCH=$2 SMP=$3 PREEMPT=$4 -CC=$5 +RT=$5 +CC=$6 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } @@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION" CONFIG_FLAGS="" if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi +if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" # Truncate to maximum length diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 9d33c1e85c79..3d307bda86f9 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) { if (!substream->pcm->nonatomic) - local_irq_disable(); + local_irq_disable_nort(); snd_pcm_stream_lock(substream); } EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); @@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) { snd_pcm_stream_unlock(substream); if (!substream->pcm->nonatomic) - local_irq_enable(); + local_irq_enable_nort(); } EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); @@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) { unsigned long flags = 0; if (!substream->pcm->nonatomic) - local_irq_save(flags); + local_irq_save_nort(flags); snd_pcm_stream_lock(substream); return flags; } @@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, { snd_pcm_stream_unlock(substream); if (!substream->pcm->nonatomic) - local_irq_restore(flags); + local_irq_restore_nort(flags); } EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); |