summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-06-16 12:33:25 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2017-06-16 12:33:25 +0200
commit62225af26cb9575945c764c73d73be4360532ec5 (patch)
tree9b320f25074befa6ece5d5887287219fe204e74f
parente435c24ff8bc5e5ba909c337419b0bcd9e61de55 (diff)
downloadlinux-rt-62225af26cb9575945c764c73d73be4360532ec5.tar.gz
[ANNOUNCE] v4.11.5-rt1v4.11.5-rt1-patches
Dear RT folks! I'm pleased to announce the v4.11.5-rt1 patch set. The release has been delayed due to the hotplug rework that was started before the final v4.11 release. However the new code has not been stabilized yet and it was decided to bring back the old patches before delaying the v4.11-RT release any longer. We will try to complete the hotplug rework (for RT) in the v4.11 cycle. Changes since v4.9.39-rt21: - rebase to v4.11.5 Known issues - CPU hotplug got a little better but can deadlock. You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.11.5-rt1 The RT patch against v4.11.5 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patch-4.11.5-rt1.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.11/older/patches-4.11.5-rt1.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch240
-rw-r--r--patches/0001-futex-Avoid-freeing-an-active-timer.patch7
-rw-r--r--patches/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch16
-rw-r--r--patches/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch53
-rw-r--r--patches/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch74
-rw-r--r--patches/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch21
-rw-r--r--patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch62
-rw-r--r--patches/0002-arm-Adjust-system_state-check.patch37
-rw-r--r--patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch46
-rw-r--r--patches/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch11
-rw-r--r--patches/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch2
-rw-r--r--patches/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch33
-rw-r--r--patches/0002-workqueue-Provide-work_on_cpu_safe.patch84
-rw-r--r--patches/0003-arm64-Adjust-system_state-check.patch38
-rw-r--r--patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch218
-rw-r--r--patches/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch7
-rw-r--r--patches/0003-futex-Remove-rt_mutex_deadlock_account_.patch20
-rw-r--r--patches/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch129
-rw-r--r--patches/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch7
-rw-r--r--patches/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch7
-rw-r--r--patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch96
-rw-r--r--patches/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch20
-rw-r--r--patches/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch76
-rw-r--r--patches/0004-rtmutex-Clean-up.patch25
-rw-r--r--patches/0004-x86-smp-Adjust-system_state-check.patch34
-rw-r--r--patches/0005-futex-Change-locking-rules.patch34
-rw-r--r--patches/0005-metag-Adjust-system_state-check.patch36
-rw-r--r--patches/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch89
-rw-r--r--patches/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch42
-rw-r--r--patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch116
-rw-r--r--patches/0006-futex-Cleanup-refcounting.patch10
-rw-r--r--patches/0006-padata-Make-padata_alloc-static.patch95
-rw-r--r--patches/0006-powerpc-Adjust-system_state-check.patch39
-rw-r--r--patches/0006-sched-tracing-Update-trace_sched_pi_setprio.patch7
-rw-r--r--patches/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch118
-rw-r--r--patches/0007-ACPI-Adjust-system_state-check.patch38
-rw-r--r--patches/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch45
-rw-r--r--patches/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch6
-rw-r--r--patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch95
-rw-r--r--patches/0007-rtmutex-Fix-PI-chain-order-integrity.patch19
-rw-r--r--patches/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch193
-rw-r--r--patches/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch38
-rw-r--r--patches/0008-mm-Adjust-system_state-check.patch42
-rw-r--r--patches/0008-rtmutex-Fix-more-prio-comparisons.patch15
-rw-r--r--patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch44
-rw-r--r--patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch108
-rw-r--r--patches/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch209
-rw-r--r--patches/0009-cpufreq-pasemi-Adjust-system_state-check.patch38
-rw-r--r--patches/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch6
-rw-r--r--patches/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch7
-rw-r--r--patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch73
-rw-r--r--patches/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch120
-rw-r--r--patches/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch8
-rw-r--r--patches/0010-iommu-vt-d-Adjust-system_state-checks.patch47
-rw-r--r--patches/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch124
-rw-r--r--patches/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch14
-rw-r--r--patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch88
-rw-r--r--patches/0012-async-Adjust-system_state-checks.patch61
-rw-r--r--patches/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch129
-rw-r--r--patches/0012-futex-Futex_unlock_pi-determinism.patch6
-rw-r--r--patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch89
-rw-r--r--patches/0013-crypto-N2-Replace-racy-task-affinity-logic.patch95
-rw-r--r--patches/0013-extable-Adjust-system_state-checks.patch36
-rw-r--r--patches/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch10
-rw-r--r--patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch64
-rw-r--r--patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch74
-rw-r--r--patches/0014-printk-Adjust-system_state-checks.patch35
-rw-r--r--patches/0015-mm-vmscan-Adjust-system_state-checks.patch39
-rw-r--r--patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch49
-rw-r--r--patches/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch60
-rw-r--r--patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch56
-rw-r--r--patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch41
-rw-r--r--patches/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch74
-rw-r--r--patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch37
-rw-r--r--patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch81
-rw-r--r--patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch92
-rw-r--r--patches/0021-PCI-Replace-the-racy-recursion-prevention.patch135
-rw-r--r--patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch69
-rw-r--r--patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch309
-rw-r--r--patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch221
-rw-r--r--patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch264
-rw-r--r--patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch66
-rw-r--r--patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch54
-rw-r--r--patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch65
-rw-r--r--patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch193
-rw-r--r--patches/0030-sched-Provide-is_percpu_thread-helper.patch43
-rw-r--r--patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch194
-rw-r--r--patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch93
-rw-r--r--patches/ARM-enable-irq-in-translation-section-permission-fau.patch4
-rw-r--r--patches/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch43
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch35
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch2
-rw-r--r--patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch8
-rw-r--r--patches/Revert-random-invalidate-batched-entropy-after-crng-.patch161
-rw-r--r--patches/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch217
-rw-r--r--patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch2
-rw-r--r--patches/add_migrate_disable.patch255
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch58
-rw-r--r--patches/arm-convert-boot-lock-to-raw.patch8
-rw-r--r--patches/arm-preempt-lazy-support.patch6
-rw-r--r--patches/arm64-xen--Make-XEN-depend-on-non-rt.patch2
-rw-r--r--patches/ata-disable-interrupts-if-non-rt.patch16
-rw-r--r--patches/block-blk-mq-use-swait.patch30
-rw-r--r--patches/block-mq-don-t-complete-requests-via-IPI.patch36
-rw-r--r--patches/block-mq-drop-preempt-disable.patch10
-rw-r--r--patches/block-mq-use-cpu_light.patch2
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/block-use-cpu-chill.patch22
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch12
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch24
-rw-r--r--patches/char-random-don-t-print-that-the-init-is-done.patch166
-rw-r--r--patches/completion-use-simple-wait-queues.patch62
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch2
-rw-r--r--patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch12
-rw-r--r--patches/cpu-rt-rework-cpu-down.patch44
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch12
-rw-r--r--patches/cpu_down_move_migrate_enable_back.patch10
-rw-r--r--patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch2
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch4
-rw-r--r--patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch42
-rw-r--r--patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch4
-rw-r--r--patches/debugobjects-rt.patch2
-rw-r--r--patches/delayacct-use-raw_spinlocks.patch81
-rw-r--r--patches/dm-make-rt-aware.patch2
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch35
-rw-r--r--patches/drivers-net-8139-disable-irq-nosync.patch2
-rw-r--r--patches/drivers-net-vortex-fix-locking-issues.patch2
-rw-r--r--patches/drivers-random-reduce-preempt-disabled-region.patch4
-rw-r--r--patches/drivers-tty-fix-omap-lock-crap.patch4
-rw-r--r--patches/drivers-tty-pl011-irq-disable-madness.patch4
-rw-r--r--patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch20
-rw-r--r--patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch2
-rw-r--r--patches/drm-i915-init-spinlock-properly-on-RT.patch26
-rw-r--r--patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch4
-rw-r--r--patches/fs-aio-simple-simple-work.patch2
-rw-r--r--patches/fs-block-rt-support.patch2
-rw-r--r--patches/fs-dcache-include-wait.h.patch23
-rw-r--r--patches/fs-dcache-init-in_lookup_hashtable.patch2
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch8
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch18
-rw-r--r--patches/fs-namespace-preemption-fix.patch2
-rw-r--r--patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch8
-rw-r--r--patches/fs-ntfs-disable-interrupt-non-rt.patch4
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch18
-rw-r--r--patches/ftrace-Fix-trace-header-alignment.patch4
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch10
-rw-r--r--patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch2
-rw-r--r--patches/futex-requeue-pi-fix.patch4
-rw-r--r--patches/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch4
-rw-r--r--patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch6
-rw-r--r--patches/futex-workaround-migrate_disable-enable-in-different.patch4
-rw-r--r--patches/genirq-disable-irqpoll-on-rt.patch4
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch29
-rw-r--r--patches/genirq-force-threading.patch6
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/gpu_don_t_check_for_the_lock_owner.patch32
-rw-r--r--patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch2
-rw-r--r--patches/hotplug-light-get-online-cpus.patch83
-rw-r--r--patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch2
-rw-r--r--patches/hotplug-use-migrate-disable.patch10
-rw-r--r--patches/hrtimer-enfore-64byte-alignment.patch2
-rw-r--r--patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch47
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch38
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch4
-rw-r--r--patches/ide-use-nort-local-irq-variants.patch8
-rw-r--r--patches/idr-use-local-lock-for-protection.patch123
-rw-r--r--patches/introduce_migrate_disable_cpu_light.patch4
-rw-r--r--patches/iommu-amd--Use-WARN_ON_NORT.patch4
-rw-r--r--patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch10
-rw-r--r--patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch6
-rw-r--r--patches/ipc-sem-rework-semaphore-wakeups.patch69
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch4
-rw-r--r--patches/irqwork-Move-irq-safe-work-to-irq-context.patch4
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch8
-rw-r--r--patches/jump-label-rt.patch2
-rw-r--r--patches/kconfig-disable-a-few-options-rt.patch2
-rw-r--r--patches/kernel-SRCU-provide-a-static-initializer.patch4
-rw-r--r--patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch12
-rw-r--r--patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch18
-rw-r--r--patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch33
-rw-r--r--patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch2
-rw-r--r--patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch4
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch760
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch4
-rw-r--r--patches/kgb-serial-hackaround.patch33
-rw-r--r--patches/latency-hist.patch87
-rw-r--r--patches/latencyhist-disable-jump-labels.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch4
-rw-r--r--patches/lockdep-Fix-per-cpu-static-objects.patch8
-rw-r--r--patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch24
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch72
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch12
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch41
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch86
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch16
-rw-r--r--patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch2
-rw-r--r--patches/mm-page-alloc-use-local-lock-on-target-cpu.patch2
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch18
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch133
-rw-r--r--patches/mm-perform-lru_add_drain_all-remotely.patch40
-rw-r--r--patches/mm-protect-activate-switch-mm.patch8
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch28
-rw-r--r--patches/mm-scatterlist-dont-disable-irqs-on-RT.patch28
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch180
-rw-r--r--patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch16
-rw-r--r--patches/mmci-remove-bogus-irq-save.patch4
-rw-r--r--patches/mutex-no-spin-on-rt.patch4
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch10
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch43
-rw-r--r--patches/net-add-a-lock-around-icmp_sk.patch53
-rw-r--r--patches/net-add-back-the-missing-serialization-in-ip_send_un.patch19
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch10
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch20
-rw-r--r--patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch27
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch10
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch12
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch22
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch8
-rw-r--r--patches/net-use-cpu-chill.patch4
-rw-r--r--patches/net-wireless-warn-nort.patch2
-rw-r--r--patches/net_disable_NET_RX_BUSY_POLL.patch28
-rw-r--r--patches/oleg-signal-rt-fix.patch22
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch18
-rw-r--r--patches/pci-access-use-__wake_up_all_locked.patch25
-rw-r--r--patches/percpu_ida-use-locklocks.patch18
-rw-r--r--patches/perf-make-swevent-hrtimer-irqsafe.patch2
-rw-r--r--patches/peter_zijlstra-frob-rcu.patch2
-rw-r--r--patches/peterz-srcu-crypto-chain.patch2
-rw-r--r--patches/pid.h-include-atomic.h.patch2
-rw-r--r--patches/pinctrl-qcom-Use-raw-spinlock-variants.patch252
-rw-r--r--patches/ping-sysrq.patch51
-rw-r--r--patches/posix-timers-no-broadcast.patch4
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch213
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch34
-rw-r--r--patches/preempt-lazy-support.patch156
-rw-r--r--patches/preempt-nort-rt-variants.patch4
-rw-r--r--patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch2
-rw-r--r--patches/printk-kill.patch43
-rw-r--r--patches/printk-rt-aware.patch49
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch38
-rw-r--r--patches/radix-tree-use-local-locks.patch88
-rw-r--r--patches/random-avoid-preempt_disable-ed-section.patch67
-rw-r--r--patches/random-make-it-work-on-rt.patch14
-rw-r--r--patches/rbtree-include-rcu.h-because-we-use-it.patch13
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch40
-rw-r--r--patches/rcu-disable-rcu-fast-no-hz-on-rt.patch2
-rw-r--r--patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch2
-rw-r--r--patches/rcu-make-RCU_BOOST-default-on-RT.patch2
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch52
-rw-r--r--patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch60
-rw-r--r--patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch2
-rw-r--r--patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch2
-rw-r--r--patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch2
-rw-r--r--patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch18
-rw-r--r--patches/rt-add-rt-locks.patch281
-rw-r--r--patches/rt-drop_mutex_disable_on_not_debug.patch32
-rw-r--r--patches/rt-introduce-cpu-chill.patch4
-rw-r--r--patches/rt-locking-Reenable-migration-accross-schedule.patch8
-rw-r--r--patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch2
-rw-r--r--patches/rtmutex-Make-lock_killable-work.patch4
-rw-r--r--patches/rtmutex-Provide-locked-slowpath.patch25
-rw-r--r--patches/rtmutex-Provide-rt_mutex_lock_state.patch6
-rw-r--r--patches/rtmutex-add-a-first-shot-of-ww_mutex.patch124
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch28
-rw-r--r--patches/rtmutex-lock-killable.patch2
-rw-r--r--patches/rtmutex-trylock-is-okay-on-RT.patch2
-rw-r--r--patches/rtmutex_dont_include_rcu.patch4
-rw-r--r--patches/rwsem-rt-Lift-single-reader-restriction.patch14
-rw-r--r--patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch2
-rw-r--r--patches/sched-Remove-TASK_ALL.patch12
-rw-r--r--patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch4
-rw-r--r--patches/sched-delay-put-task.patch29
-rw-r--r--patches/sched-disable-rt-group-sched-on-rt.patch2
-rw-r--r--patches/sched-disable-ttwu-queue.patch2
-rw-r--r--patches/sched-limit-nr-migrate.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch28
-rw-r--r--patches/sched-rt-mutex-wakeup.patch31
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/scsi-fcoe-rt-aware.patch14
-rw-r--r--patches/seqlock-prevent-rt-starvation.patch27
-rw-r--r--patches/series109
-rw-r--r--patches/signal-fix-up-rcu-wreckage.patch4
-rw-r--r--patches/signal-revert-ptrace-preempt-magic.patch2
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch53
-rw-r--r--patches/skbufhead-raw-lock.patch22
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch8
-rw-r--r--patches/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch8
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch6
-rw-r--r--patches/softirq-preempt-fix-3-re.patch26
-rw-r--r--patches/softirq-split-locks.patch44
-rw-r--r--patches/softirq-wake-the-timer-softirq-if-needed.patch20
-rw-r--r--patches/sparc64-use-generic-rwsem-spinlocks-rt.patch2
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch18
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch6
-rw-r--r--patches/thermal-Defer-thermal-wakups-to-threads.patch97
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch18
-rw-r--r--patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch6
-rw-r--r--patches/timer-hrtimer-check-properly-for-a-running-timer.patch2
-rw-r--r--patches/timer-make-the-base-lock-raw.patch32
-rw-r--r--patches/timers-Don-t-wake-ktimersoftd-on-every-tick.patch228
-rw-r--r--patches/timers-prepare-for-full-preemption.patch34
-rw-r--r--patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch6
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch2
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch5
-rw-r--r--patches/user-use-local-irq-nort.patch2
-rw-r--r--patches/wait.h-include-atomic.h.patch4
-rw-r--r--patches/work-queue-work-around-irqsafe-timer-optimization.patch2
-rw-r--r--patches/work-simple-Simple-work-queue-implemenation.patch4
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch49
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch14
-rw-r--r--patches/workqueue-use-locallock.patch42
-rw-r--r--patches/workqueue-use-rcu.patch50
-rw-r--r--patches/x86-UV-raw_spinlock-conversion.patch31
-rw-r--r--patches/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch43
-rw-r--r--patches/x86-crypto-reduce-preempt-disabled-regions.patch49
-rw-r--r--patches/x86-io-apic-migra-no-unmask.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-timer-hrtimer.patch130
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch8
-rw-r--r--patches/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch38
-rw-r--r--patches/x86-preempt-lazy.patch38
-rw-r--r--patches/x86-use-gen-rwsem-spinlocks-rt.patch2
341 files changed, 9925 insertions, 3649 deletions
diff --git a/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
new file mode 100644
index 000000000000..3352c259f1f6
--- /dev/null
+++ b/patches/0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
@@ -0,0 +1,240 @@
+From 8f553c498e1772cccb39a114da4a498d22992758 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:12 +0200
+Subject: [PATCH 01/32] cpu/hotplug: Provide cpus_read|write_[un]lock()
+
+The counting 'rwsem' hackery of get|put_online_cpus() is going to be
+replaced by percpu rwsem.
+
+Rename the functions to make it clear that it's locking and not some
+refcount style interface. These new functions will be used for the
+preparatory patches which make the code ready for the percpu rwsem
+conversion.
+
+Rename all instances in the cpu hotplug code while at it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.080397752@linutronix.de
+---
+ include/linux/cpu.h | 32 ++++++++++++++++++--------------
+ kernel/cpu.c | 36 ++++++++++++++++++------------------
+ 2 files changed, 36 insertions(+), 32 deletions(-)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index f92081234afd..055876003914 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -99,26 +99,30 @@ static inline void cpu_maps_update_done(void)
+ extern struct bus_type cpu_subsys;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+-/* Stop CPUs going up and down. */
+-
+-extern void cpu_hotplug_begin(void);
+-extern void cpu_hotplug_done(void);
+-extern void get_online_cpus(void);
+-extern void put_online_cpus(void);
++extern void cpus_write_lock(void);
++extern void cpus_write_unlock(void);
++extern void cpus_read_lock(void);
++extern void cpus_read_unlock(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+ int cpu_down(unsigned int cpu);
+
+-#else /* CONFIG_HOTPLUG_CPU */
++#else /* CONFIG_HOTPLUG_CPU */
+
+-static inline void cpu_hotplug_begin(void) {}
+-static inline void cpu_hotplug_done(void) {}
+-#define get_online_cpus() do { } while (0)
+-#define put_online_cpus() do { } while (0)
+-#define cpu_hotplug_disable() do { } while (0)
+-#define cpu_hotplug_enable() do { } while (0)
+-#endif /* CONFIG_HOTPLUG_CPU */
++static inline void cpus_write_lock(void) { }
++static inline void cpus_write_unlock(void) { }
++static inline void cpus_read_lock(void) { }
++static inline void cpus_read_unlock(void) { }
++static inline void cpu_hotplug_disable(void) { }
++static inline void cpu_hotplug_enable(void) { }
++#endif /* !CONFIG_HOTPLUG_CPU */
++
++/* Wrappers which go away once all code is converted */
++static inline void cpu_hotplug_begin(void) { cpus_write_lock(); }
++static inline void cpu_hotplug_done(void) { cpus_write_unlock(); }
++static inline void get_online_cpus(void) { cpus_read_lock(); }
++static inline void put_online_cpus(void) { cpus_read_unlock(); }
+
+ #ifdef CONFIG_PM_SLEEP_SMP
+ extern int freeze_secondary_cpus(int primary);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 9ae6fbe5b5cf..d3221ae5b474 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -235,7 +235,7 @@ static struct {
+ #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+
+
+-void get_online_cpus(void)
++void cpus_read_lock(void)
+ {
+ might_sleep();
+ if (cpu_hotplug.active_writer == current)
+@@ -245,9 +245,9 @@ void get_online_cpus(void)
+ atomic_inc(&cpu_hotplug.refcount);
+ mutex_unlock(&cpu_hotplug.lock);
+ }
+-EXPORT_SYMBOL_GPL(get_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_lock);
+
+-void put_online_cpus(void)
++void cpus_read_unlock(void)
+ {
+ int refcount;
+
+@@ -264,7 +264,7 @@ void put_online_cpus(void)
+ cpuhp_lock_release();
+
+ }
+-EXPORT_SYMBOL_GPL(put_online_cpus);
++EXPORT_SYMBOL_GPL(cpus_read_unlock);
+
+ /*
+ * This ensures that the hotplug operation can begin only when the
+@@ -288,7 +288,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
+ * get_online_cpus() not an api which is called all that often.
+ *
+ */
+-void cpu_hotplug_begin(void)
++void cpus_write_lock(void)
+ {
+ DEFINE_WAIT(wait);
+
+@@ -306,7 +306,7 @@ void cpu_hotplug_begin(void)
+ finish_wait(&cpu_hotplug.wq, &wait);
+ }
+
+-void cpu_hotplug_done(void)
++void cpus_write_unlock(void)
+ {
+ cpu_hotplug.active_writer = NULL;
+ mutex_unlock(&cpu_hotplug.lock);
+@@ -773,7 +773,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ if (!cpu_present(cpu))
+ return -EINVAL;
+
+- cpu_hotplug_begin();
++ cpus_write_lock();
+
+ cpuhp_tasks_frozen = tasks_frozen;
+
+@@ -811,7 +811,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
+ }
+
+ out:
+- cpu_hotplug_done();
++ cpus_write_unlock();
+ return ret;
+ }
+
+@@ -893,7 +893,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ struct task_struct *idle;
+ int ret = 0;
+
+- cpu_hotplug_begin();
++ cpus_write_lock();
+
+ if (!cpu_present(cpu)) {
+ ret = -EINVAL;
+@@ -941,7 +941,7 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
+ target = min((int)target, CPUHP_BRINGUP_CPU);
+ ret = cpuhp_up_callbacks(cpu, st, target);
+ out:
+- cpu_hotplug_done();
++ cpus_write_unlock();
+ return ret;
+ }
+
+@@ -1424,7 +1424,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ if (sp->multi_instance == false)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !sp->startup.multi)
+@@ -1453,7 +1453,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ hlist_add_head(node, &sp->list);
+ unlock:
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+@@ -1486,7 +1486,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ if (cpuhp_cb_check(state) || !name)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ ret = cpuhp_store_callbacks(state, name, startup, teardown,
+@@ -1522,7 +1522,7 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ }
+ out:
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ /*
+ * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+ * dynamically allocated state in case of success.
+@@ -1544,7 +1544,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ if (!sp->multi_instance)
+ return -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !cpuhp_get_teardown_cb(state))
+@@ -1565,7 +1565,7 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ remove:
+ hlist_del(node);
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+
+ return 0;
+ }
+@@ -1587,7 +1587,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+
+ BUG_ON(cpuhp_cb_check(state));
+
+- get_online_cpus();
++ cpus_read_lock();
+
+ mutex_lock(&cpuhp_state_mutex);
+ if (sp->multi_instance) {
+@@ -1615,7 +1615,7 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+ remove:
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+ mutex_unlock(&cpuhp_state_mutex);
+- put_online_cpus();
++ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL(__cpuhp_remove_state);
+
+--
+2.11.0
+
diff --git a/patches/0001-futex-Avoid-freeing-an-active-timer.patch b/patches/0001-futex-Avoid-freeing-an-active-timer.patch
index ba12159f5aea..617dcc5378d1 100644
--- a/patches/0001-futex-Avoid-freeing-an-active-timer.patch
+++ b/patches/0001-futex-Avoid-freeing-an-active-timer.patch
@@ -1,8 +1,7 @@
+From 97181f9bd57405b879403763284537e27d46963d Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Mon, 10 Apr 2017 18:03:36 +0200
-Subject: [PATCH] futex: Avoid freeing an active timer
-
-Upstream commit 97181f9bd57405b879403763284537e27d46963d
+Subject: [PATCH 1/4] futex: Avoid freeing an active timer
Alexander reported a hrtimer debug_object splat:
@@ -38,7 +37,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2734,8 +2734,10 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2736,8 +2736,10 @@ static int futex_lock_pi(u32 __user *uad
out_put_key:
put_futex_key(&q.key);
out:
diff --git a/patches/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch b/patches/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
index 128cf8001839..90911b152235 100644
--- a/patches/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
+++ b/patches/0001-futex-Cleanup-variable-names-for-futex_top_waiter.patch
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1120,14 +1120,14 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1122,14 +1122,14 @@ static int attach_to_pi_owner(u32 uval,
static int lookup_pi_state(u32 uval, struct futex_hash_bucket *hb,
union futex_key *key, struct futex_pi_state **ps)
{
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are the first waiter - try to look up the owner based on
-@@ -1174,7 +1174,7 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1176,7 +1176,7 @@ static int futex_lock_pi_atomic(u32 __us
struct task_struct *task, int set_waiters)
{
u32 uval, newval, vpid = task_pid_vnr(task);
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
/*
-@@ -1200,9 +1200,9 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1202,9 +1202,9 @@ static int futex_lock_pi_atomic(u32 __us
* Lookup existing state first. If it exists, try to attach to
* its pi_state.
*/
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No waiter and user TID is 0. We are here because the
-@@ -1292,11 +1292,11 @@ static void mark_wake_futex(struct wake_
+@@ -1294,11 +1294,11 @@ static void mark_wake_futex(struct wake_
q->lock_ptr = NULL;
}
@@ -77,9 +77,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- struct futex_pi_state *pi_state = this->pi_state;
+ struct futex_pi_state *pi_state = top_waiter->pi_state;
u32 uninitialized_var(curval), newval;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
bool deboost;
-@@ -1317,11 +1317,11 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1319,11 +1319,11 @@ static int wake_futex_pi(u32 __user *uad
/*
* It is possible that the next waiter (the one that brought
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We pass it to the next owner. The WAITERS bit is always
-@@ -2631,7 +2631,7 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2633,7 +2633,7 @@ static int futex_unlock_pi(u32 __user *u
u32 uninitialized_var(curval), uval, vpid = task_pid_vnr(current);
union futex_key key = FUTEX_KEY_INIT;
struct futex_hash_bucket *hb;
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
retry:
-@@ -2655,9 +2655,9 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2657,9 +2657,9 @@ static int futex_unlock_pi(u32 __user *u
* all and we at least want to know if user space fiddled
* with the futex value instead of blindly unlocking.
*/
diff --git a/patches/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch b/patches/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
new file mode 100644
index 000000000000..5a79929a7790
--- /dev/null
+++ b/patches/0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
@@ -0,0 +1,53 @@
+From 048c9b954e20396e0c45ee778466994d1be2e612 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:27 +0200
+Subject: [PATCH 01/13] ia64/topology: Remove cpus_allowed manipulation
+
+The CPU hotplug callback fiddles with the cpus_allowed pointer to pin the
+calling thread on the plugged CPU. That's already guaranteed by the hotplug
+core code.
+
+Remove it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: linux-ia64@vger.kernel.org
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.174518069@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/ia64/kernel/topology.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+--- a/arch/ia64/kernel/topology.c
++++ b/arch/ia64/kernel/topology.c
+@@ -355,18 +355,12 @@ static int cache_add_dev(unsigned int cp
+ unsigned long i, j;
+ struct cache_info *this_object;
+ int retval = 0;
+- cpumask_t oldmask;
+
+ if (all_cpu_cache_info[cpu].kobj.parent)
+ return 0;
+
+- oldmask = current->cpus_allowed;
+- retval = set_cpus_allowed_ptr(current, cpumask_of(cpu));
+- if (unlikely(retval))
+- return retval;
+
+ retval = cpu_cache_sysfs_init(cpu);
+- set_cpus_allowed_ptr(current, &oldmask);
+ if (unlikely(retval < 0))
+ return retval;
+
diff --git a/patches/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch b/patches/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
new file mode 100644
index 000000000000..25730fb718c4
--- /dev/null
+++ b/patches/0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
@@ -0,0 +1,74 @@
+From 8fb12156b8db61af3d49f3e5e104568494581d1f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:32 +0200
+Subject: [PATCH 01/17] init: Pin init task to the boot CPU, initially
+
+Some of the boot code in init_kernel_freeable() which runs before SMP
+bringup assumes (rightfully) that it runs on the boot CPU and therefore can
+use smp_processor_id() in preemptible context.
+
+That works so far because the smp_processor_id() check starts to be
+effective after smp bringup. That's just wrong. Starting with SMP bringup
+and the ability to move threads around, smp_processor_id() in preemptible
+context is broken.
+
+Aside of that it does not make sense to allow init to run on all CPUs
+before sched_smp_init() has been run.
+
+Pin the init to the boot CPU so the existing code can continue to use
+smp_processor_id() without triggering the checks when the enabling of those
+checks starts earlier.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184734.943149935@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ init/main.c | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -389,6 +389,7 @@ static __initdata DECLARE_COMPLETION(kth
+
+ static noinline void __ref rest_init(void)
+ {
++ struct task_struct *tsk;
+ int pid;
+
+ rcu_scheduler_starting();
+@@ -397,7 +398,17 @@ static noinline void __ref rest_init(voi
+ * the init task will end up wanting to create kthreads, which, if
+ * we schedule it before we create kthreadd, will OOPS.
+ */
+- kernel_thread(kernel_init, NULL, CLONE_FS);
++ pid = kernel_thread(kernel_init, NULL, CLONE_FS);
++ /*
++ * Pin init on the boot CPU. Task migration is not properly working
++ * until sched_init_smp() has been run. It will set the allowed
++ * CPUs for init to the non isolated CPUs.
++ */
++ rcu_read_lock();
++ tsk = find_task_by_pid_ns(pid, &init_pid_ns);
++ set_cpus_allowed_ptr(tsk, cpumask_of(smp_processor_id()));
++ rcu_read_unlock();
++
+ numa_default_policy();
+ pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ rcu_read_lock();
+@@ -1011,10 +1022,6 @@ static noinline void __init kernel_init_
+ * init can allocate pages on any node
+ */
+ set_mems_allowed(node_states[N_MEMORY]);
+- /*
+- * init can run on any cpu.
+- */
+- set_cpus_allowed_ptr(current, cpu_all_mask);
+
+ cad_pid = task_pid(current);
+
diff --git a/patches/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch b/patches/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
index af8e91fd2de6..ede43101e953 100644
--- a/patches/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
+++ b/patches/0001-rtmutex-Deboost-before-waking-up-the-top-waiter.patch
@@ -1,8 +1,7 @@
+From 2a1c6029940675abb2217b590512dbf691867ec4 Mon Sep 17 00:00:00 2001
From: Xunlei Pang <xlpang@redhat.com>
Date: Thu, 23 Mar 2017 15:56:07 +0100
-Subject: [PATCH] rtmutex: Deboost before waking up the top waiter
-
-Upstream commit 2a1c6029940675abb2217b590512dbf691867ec4
+Subject: [PATCH 1/9] rtmutex: Deboost before waking up the top waiter
We should deboost before waking the high-priority task, such that we
don't run two tasks with the same "state" (priority, deadline,
@@ -44,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1458,10 +1458,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1460,10 +1460,7 @@ static int wake_futex_pi(u32 __user *uad
out_unlock:
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
@@ -58,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -371,24 +371,6 @@ static void __rt_mutex_adjust_prio(struc
+@@ -373,24 +373,6 @@ static void __rt_mutex_adjust_prio(struc
}
/*
@@ -83,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Deadlock detection is conditional:
*
* If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
-@@ -1049,6 +1031,7 @@ static void mark_wakeup_next_waiter(stru
+@@ -1051,6 +1033,7 @@ static void mark_wakeup_next_waiter(stru
* lock->wait_lock.
*/
rt_mutex_dequeue_pi(current, waiter);
@@ -91,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* As we are waking up the top waiter, and the waiter stays
-@@ -1391,6 +1374,16 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1393,6 +1376,16 @@ static bool __sched rt_mutex_slowunlock(
*/
mark_wakeup_next_waiter(wake_q, lock);
@@ -108,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
/* check PI boosting */
-@@ -1440,6 +1433,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1442,6 +1435,18 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
return slowfn(lock);
}
@@ -127,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
-@@ -1453,11 +1458,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1455,11 +1460,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
deboost = slowfn(lock, &wake_q);
@@ -140,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1570,6 +1571,13 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1572,6 +1573,13 @@ bool __sched __rt_mutex_futex_unlock(str
}
mark_wakeup_next_waiter(wake_q, lock);
@@ -154,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return true; /* deboost and wakeups */
}
-@@ -1582,10 +1590,7 @@ void __sched rt_mutex_futex_unlock(struc
+@@ -1584,10 +1592,7 @@ void __sched rt_mutex_futex_unlock(struc
deboost = __rt_mutex_futex_unlock(lock, &wake_q);
raw_spin_unlock_irq(&lock->wait_lock);
diff --git a/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch b/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
new file mode 100644
index 000000000000..4d1f4d4e226a
--- /dev/null
+++ b/patches/0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
@@ -0,0 +1,62 @@
+From 45aea321678856687927c53972321ebfab77759a Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Wed, 24 May 2017 08:52:02 +0200
+Subject: [PATCH] sched/clock: Fix early boot preempt assumption in
+ __set_sched_clock_stable()
+
+The more strict early boot preemption warnings found that
+__set_sched_clock_stable() was incorrectly assuming we'd still be
+running on a single CPU:
+
+ BUG: using smp_processor_id() in preemptible [00000000] code: swapper/0/1
+ caller is debug_smp_processor_id+0x1c/0x1e
+ CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.12.0-rc2-00108-g1c3c5ea #1
+ Call Trace:
+ dump_stack+0x110/0x192
+ check_preemption_disabled+0x10c/0x128
+ ? set_debug_rodata+0x25/0x25
+ debug_smp_processor_id+0x1c/0x1e
+ sched_clock_init_late+0x27/0x87
+ [...]
+
+Fix it by disabling IRQs.
+
+Reported-by: kernel test robot <xiaolong.ye@intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: lkp@01.org
+Cc: tipbuild@zytor.com
+Link: http://lkml.kernel.org/r/20170524065202.v25vyu7pvba5mhpd@hirez.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/sched/clock.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/kernel/sched/clock.c
++++ b/kernel/sched/clock.c
+@@ -126,12 +126,19 @@ int sched_clock_stable(void)
+
+ static void __set_sched_clock_stable(void)
+ {
+- struct sched_clock_data *scd = this_scd();
++ struct sched_clock_data *scd;
+
+ /*
++ * Since we're still unstable and the tick is already running, we have
++ * to disable IRQs in order to get a consistent scd->tick* reading.
++ */
++ local_irq_disable();
++ scd = this_scd();
++ /*
+ * Attempt to make the (initial) unstable->stable transition continuous.
+ */
+ __sched_clock_offset = (scd->tick_gtod + __gtod_offset) - (scd->tick_raw);
++ local_irq_enable();
+
+ printk(KERN_INFO "sched_clock: Marking stable (%lld, %lld)->(%lld, %lld)\n",
+ scd->tick_gtod, __gtod_offset,
diff --git a/patches/0002-arm-Adjust-system_state-check.patch b/patches/0002-arm-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..89388ec06c6f
--- /dev/null
+++ b/patches/0002-arm-Adjust-system_state-check.patch
@@ -0,0 +1,37 @@
+From 5976a66913a8bf42465d96776fd37fb5631edc19 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:33 +0200
+Subject: [PATCH 02/17] arm: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in ipi_cpu_stop() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170516184735.020718977@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/arm/kernel/smp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -555,8 +555,7 @@ static DEFINE_RAW_SPINLOCK(stop_lock);
+ */
+ static void ipi_cpu_stop(unsigned int cpu)
+ {
+- if (system_state == SYSTEM_BOOTING ||
+- system_state == SYSTEM_RUNNING) {
++ if (system_state <= SYSTEM_RUNNING) {
+ raw_spin_lock(&stop_lock);
+ pr_crit("CPU%u: stopping\n", cpu);
+ dump_stack();
diff --git a/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch b/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
new file mode 100644
index 000000000000..8739b951c37d
--- /dev/null
+++ b/patches/0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
@@ -0,0 +1,46 @@
+From ade3f680a76b474d9f5375a9b1d100ee787bf469 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:13 +0200
+Subject: [PATCH 02/32] cpu/hotplug: Provide lockdep_assert_cpus_held()
+
+Provide a stub function which can be used in places where existing
+get_online_cpus() calls are moved to call sites.
+
+This stub is going to be filled by the final conversion of the hotplug
+locking mechanism to a percpu rwsem.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.161282442@linutronix.de
+---
+ include/linux/cpu.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 055876003914..af4d660798e5 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -103,6 +103,7 @@ extern void cpus_write_lock(void);
+ extern void cpus_write_unlock(void);
+ extern void cpus_read_lock(void);
+ extern void cpus_read_unlock(void);
++static inline void lockdep_assert_cpus_held(void) { }
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+@@ -114,6 +115,7 @@ static inline void cpus_write_lock(void) { }
+ static inline void cpus_write_unlock(void) { }
+ static inline void cpus_read_lock(void) { }
+ static inline void cpus_read_unlock(void) { }
++static inline void lockdep_assert_cpus_held(void) { }
+ static inline void cpu_hotplug_disable(void) { }
+ static inline void cpu_hotplug_enable(void) { }
+ #endif /* !CONFIG_HOTPLUG_CPU */
+--
+2.11.0
+
diff --git a/patches/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch b/patches/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
index 816047dfb27a..374ace45a5af 100644
--- a/patches/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
+++ b/patches/0002-futex-Fix-small-and-harmless-looking-inconsistencies.patch
@@ -1,8 +1,7 @@
+From 94ffac5d847cfd790bb37b7cef1cad803743985e Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Fri, 7 Apr 2017 09:04:07 +0200
-Subject: [PATCH] futex: Fix small (and harmless looking) inconsistencies
-
-Upstream commit 94ffac5d847cfd790bb37b7cef1cad803743985e
+Subject: [PATCH 2/4] futex: Fix small (and harmless looking) inconsistencies
During (post-commit) review Darren spotted a few minor things. One
(harmless AFAICT) type inconsistency and a comment that wasn't as
@@ -22,7 +21,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1023,7 +1023,8 @@ static int attach_to_pi_state(u32 __user
+@@ -1025,7 +1025,8 @@ static int attach_to_pi_state(u32 __user
struct futex_pi_state **ps)
{
pid_t pid = uval & FUTEX_TID_MASK;
@@ -32,7 +31,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org>
/*
* Userspace might have messed up non-PI and PI futexes [3]
-@@ -1439,6 +1440,11 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1441,6 +1442,11 @@ static int wake_futex_pi(u32 __user *uad
if (ret)
goto out_unlock;
@@ -44,7 +43,7 @@ Signed-off-by: Ingo Molnar <mingo@kernel.org>
raw_spin_lock(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
list_del_init(&pi_state->list);
-@@ -1450,9 +1456,6 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1452,9 +1458,6 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
diff --git a/patches/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch b/patches/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
index 29c184a5184d..8f8f323fb6ab 100644
--- a/patches/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
+++ b/patches/0002-futex-Use-smp_store_release-in-mark_wake_futex.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1288,8 +1288,7 @@ static void mark_wake_futex(struct wake_
+@@ -1290,8 +1290,7 @@ static void mark_wake_futex(struct wake_
* memory barrier is required here to prevent the following
* store to lock_ptr from getting ahead of the plist_del.
*/
diff --git a/patches/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch b/patches/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
index bb100a5d8afa..46d5775c9712 100644
--- a/patches/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
+++ b/patches/0002-sched-rtmutex-deadline-Fix-a-PI-crash-for-deadline-t.patch
@@ -1,8 +1,7 @@
+From e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22 Mon Sep 17 00:00:00 2001
From: Xunlei Pang <xlpang@redhat.com>
Date: Thu, 23 Mar 2017 15:56:08 +0100
-Subject: [PATCH] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
-
-Upstream commit e96a7705e7d3fef96aec9b590c63b2f6f7d2ba22
+Subject: [PATCH 2/9] sched/rtmutex/deadline: Fix a PI crash for deadline tasks
A crash happened while I was playing with deadline PI rtmutex.
@@ -60,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -164,6 +164,7 @@ extern struct task_group root_task_group
+@@ -181,6 +181,7 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
@@ -70,18 +69,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define INIT_RT_MUTEXES(tsk)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1723,6 +1723,8 @@ struct task_struct {
- /* PI waiters blocked on a rt_mutex held by this task */
- struct rb_root pi_waiters;
- struct rb_node *pi_waiters_leftmost;
+@@ -779,6 +779,8 @@ struct task_struct {
+ /* PI waiters blocked on a rt_mutex held by this task: */
+ struct rb_root pi_waiters;
+ struct rb_node *pi_waiters_leftmost;
+ /* Updated under owner's pi_lock and rq lock */
-+ struct task_struct *pi_top_task;
- /* Deadlock detection and priority inheritance handling */
- struct rt_mutex_waiter *pi_blocked_on;
++ struct task_struct *pi_top_task;
+ /* Deadlock detection and priority inheritance handling: */
+ struct rt_mutex_waiter *pi_blocked_on;
#endif
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
-@@ -19,6 +19,7 @@ static inline int rt_task(struct task_st
+@@ -21,6 +21,7 @@ static inline int rt_task(struct task_st
extern int rt_mutex_getprio(struct task_struct *p);
extern void rt_mutex_setprio(struct task_struct *p, int prio);
extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
@@ -91,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1417,6 +1417,7 @@ static void rt_mutex_init_task(struct ta
+@@ -1438,6 +1438,7 @@ static void rt_mutex_init_task(struct ta
#ifdef CONFIG_RT_MUTEXES
p->pi_waiters = RB_ROOT;
p->pi_waiters_leftmost = NULL;
@@ -101,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -321,6 +321,19 @@ rt_mutex_dequeue_pi(struct task_struct *
+@@ -323,6 +323,19 @@ rt_mutex_dequeue_pi(struct task_struct *
}
/*
@@ -121,7 +120,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Calculate task priority from the waiter tree priority
*
* Return task->normal_prio when the waiter tree is empty or when
-@@ -335,12 +348,12 @@ int rt_mutex_getprio(struct task_struct
+@@ -337,12 +350,12 @@ int rt_mutex_getprio(struct task_struct
task->normal_prio);
}
@@ -138,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -349,12 +362,12 @@ struct task_struct *rt_mutex_get_top_tas
+@@ -351,12 +364,12 @@ struct task_struct *rt_mutex_get_top_tas
*/
int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
{
@@ -157,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3669,6 +3669,8 @@ void rt_mutex_setprio(struct task_struct
+@@ -3712,6 +3712,8 @@ void rt_mutex_setprio(struct task_struct
goto out_unlock;
}
diff --git a/patches/0002-workqueue-Provide-work_on_cpu_safe.patch b/patches/0002-workqueue-Provide-work_on_cpu_safe.patch
new file mode 100644
index 000000000000..c134fbc223b5
--- /dev/null
+++ b/patches/0002-workqueue-Provide-work_on_cpu_safe.patch
@@ -0,0 +1,84 @@
+From 0e8d6a9336b487a1dd6f1991ff376e669d4c87c6 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:28 +0200
+Subject: [PATCH 02/13] workqueue: Provide work_on_cpu_safe()
+
+work_on_cpu() is not protected against CPU hotplug. For code which requires
+to be either executed on an online CPU or to fail if the CPU is not
+available the callsite would have to protect against CPU hotplug.
+
+Provide a function which does get/put_online_cpus() around the call to
+work_on_cpu() and fails the call with -ENODEV if the target CPU is not
+online.
+
+Preparatory patch to convert several racy task affinity manipulations.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.262610721@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ include/linux/workqueue.h | 5 +++++
+ kernel/workqueue.c | 23 +++++++++++++++++++++++
+ 2 files changed, 28 insertions(+)
+
+--- a/include/linux/workqueue.h
++++ b/include/linux/workqueue.h
+@@ -608,8 +608,13 @@ static inline long work_on_cpu(int cpu,
+ {
+ return fn(arg);
+ }
++static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++{
++ return fn(arg);
++}
+ #else
+ long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
++long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -4735,6 +4735,29 @@ long work_on_cpu(int cpu, long (*fn)(voi
+ return wfc.ret;
+ }
+ EXPORT_SYMBOL_GPL(work_on_cpu);
++
++/**
++ * work_on_cpu_safe - run a function in thread context on a particular cpu
++ * @cpu: the cpu to run on
++ * @fn: the function to run
++ * @arg: the function argument
++ *
++ * Disables CPU hotplug and calls work_on_cpu(). The caller must not hold
++ * any locks which would prevent @fn from completing.
++ *
++ * Return: The value @fn returns.
++ */
++long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
++{
++ long ret = -ENODEV;
++
++ get_online_cpus();
++ if (cpu_online(cpu))
++ ret = work_on_cpu(cpu, fn, arg);
++ put_online_cpus();
++ return ret;
++}
++EXPORT_SYMBOL_GPL(work_on_cpu_safe);
+ #endif /* CONFIG_SMP */
+
+ #ifdef CONFIG_FREEZER
diff --git a/patches/0003-arm64-Adjust-system_state-check.patch b/patches/0003-arm64-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..72f3d175f194
--- /dev/null
+++ b/patches/0003-arm64-Adjust-system_state-check.patch
@@ -0,0 +1,38 @@
+From ef284f5ca5f102bf855e599305c0c16d6e844635 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:34 +0200
+Subject: [PATCH 03/17] arm64: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in smp_send_stop() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Link: http://lkml.kernel.org/r/20170516184735.112589728@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/arm64/kernel/smp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/smp.c
++++ b/arch/arm64/kernel/smp.c
+@@ -915,8 +915,7 @@ void smp_send_stop(void)
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+- if (system_state == SYSTEM_BOOTING ||
+- system_state == SYSTEM_RUNNING)
++ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+ smp_cross_call(&mask, IPI_CPU_STOP);
+ }
diff --git a/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch b/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
new file mode 100644
index 000000000000..6b98814eb2f1
--- /dev/null
+++ b/patches/0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
@@ -0,0 +1,218 @@
+From 71def423fe3da0d40ad3427a4cd5f9edc53bff67 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:14 +0200
+Subject: [PATCH 03/32] cpu/hotplug: Provide
+ cpuhp_setup/remove_state[_nocalls]_cpuslocked()
+
+Some call sites of cpuhp_setup/remove_state[_nocalls]() are within a
+cpus_read locked region.
+
+cpuhp_setup/remove_state[_nocalls]() call cpus_read_lock() as well, which
+is possible in the current implementation but prevents converting the
+hotplug locking to a percpu rwsem.
+
+Provide locked versions of the interfaces to avoid nested calls to
+cpus_read_lock().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.239600868@linutronix.de
+---
+ include/linux/cpuhotplug.h | 29 ++++++++++++++++++++++++++++
+ kernel/cpu.c | 47 +++++++++++++++++++++++++++++++++++-----------
+ 2 files changed, 65 insertions(+), 11 deletions(-)
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 0f2a80377520..4fac564dde70 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -153,6 +153,11 @@ int __cpuhp_setup_state(enum cpuhp_state state, const char *name, bool invoke,
+ int (*startup)(unsigned int cpu),
+ int (*teardown)(unsigned int cpu), bool multi_instance);
+
++int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state, const char *name,
++ bool invoke,
++ int (*startup)(unsigned int cpu),
++ int (*teardown)(unsigned int cpu),
++ bool multi_instance);
+ /**
+ * cpuhp_setup_state - Setup hotplug state callbacks with calling the callbacks
+ * @state: The state for which the calls are installed
+@@ -171,6 +176,15 @@ static inline int cpuhp_setup_state(enum cpuhp_state state,
+ return __cpuhp_setup_state(state, name, true, startup, teardown, false);
+ }
+
++static inline int cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
++ const char *name,
++ int (*startup)(unsigned int cpu),
++ int (*teardown)(unsigned int cpu))
++{
++ return __cpuhp_setup_state_cpuslocked(state, name, true, startup,
++ teardown, false);
++}
++
+ /**
+ * cpuhp_setup_state_nocalls - Setup hotplug state callbacks without calling the
+ * callbacks
+@@ -191,6 +205,15 @@ static inline int cpuhp_setup_state_nocalls(enum cpuhp_state state,
+ false);
+ }
+
++static inline int cpuhp_setup_state_nocalls_cpuslocked(enum cpuhp_state state,
++ const char *name,
++ int (*startup)(unsigned int cpu),
++ int (*teardown)(unsigned int cpu))
++{
++ return __cpuhp_setup_state_cpuslocked(state, name, false, startup,
++ teardown, false);
++}
++
+ /**
+ * cpuhp_setup_state_multi - Add callbacks for multi state
+ * @state: The state for which the calls are installed
+@@ -250,6 +273,7 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
+ }
+
+ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
++void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
+
+ /**
+ * cpuhp_remove_state - Remove hotplug state callbacks and invoke the teardown
+@@ -273,6 +297,11 @@ static inline void cpuhp_remove_state_nocalls(enum cpuhp_state state)
+ __cpuhp_remove_state(state, false);
+ }
+
++static inline void cpuhp_remove_state_nocalls_cpuslocked(enum cpuhp_state state)
++{
++ __cpuhp_remove_state_cpuslocked(state, false);
++}
++
+ /**
+ * cpuhp_remove_multi_state - Remove hotplug multi state callback
+ * @state: The state for which the calls are removed
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index d3221ae5b474..dc27c5a28153 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1459,7 +1459,7 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+
+ /**
+- * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
++ * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
+ * @state: The state to setup
+ * @invoke: If true, the startup function is invoked for cpus where
+ * cpu state >= @state
+@@ -1468,25 +1468,27 @@ EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
+ * @multi_instance: State is set up for multiple instances which get
+ * added afterwards.
+ *
++ * The caller needs to hold cpus read locked while calling this function.
+ * Returns:
+ * On success:
+ * Positive state number if @state is CPUHP_AP_ONLINE_DYN
+ * 0 for all other states
+ * On failure: proper (negative) error code
+ */
+-int __cpuhp_setup_state(enum cpuhp_state state,
+- const char *name, bool invoke,
+- int (*startup)(unsigned int cpu),
+- int (*teardown)(unsigned int cpu),
+- bool multi_instance)
++int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
++ const char *name, bool invoke,
++ int (*startup)(unsigned int cpu),
++ int (*teardown)(unsigned int cpu),
++ bool multi_instance)
+ {
+ int cpu, ret = 0;
+ bool dynstate;
+
++ lockdep_assert_cpus_held();
++
+ if (cpuhp_cb_check(state) || !name)
+ return -EINVAL;
+
+- cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ ret = cpuhp_store_callbacks(state, name, startup, teardown,
+@@ -1522,7 +1524,6 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ }
+ out:
+ mutex_unlock(&cpuhp_state_mutex);
+- cpus_read_unlock();
+ /*
+ * If the requested state is CPUHP_AP_ONLINE_DYN, return the
+ * dynamically allocated state in case of success.
+@@ -1531,6 +1532,22 @@ int __cpuhp_setup_state(enum cpuhp_state state,
+ return state;
+ return ret;
+ }
++EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
++
++int __cpuhp_setup_state(enum cpuhp_state state,
++ const char *name, bool invoke,
++ int (*startup)(unsigned int cpu),
++ int (*teardown)(unsigned int cpu),
++ bool multi_instance)
++{
++ int ret;
++
++ cpus_read_lock();
++ ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
++ teardown, multi_instance);
++ cpus_read_unlock();
++ return ret;
++}
+ EXPORT_SYMBOL(__cpuhp_setup_state);
+
+ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+@@ -1572,22 +1589,23 @@ int __cpuhp_state_remove_instance(enum cpuhp_state state,
+ EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
+
+ /**
+- * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
++ * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
+ * @state: The state to remove
+ * @invoke: If true, the teardown function is invoked for cpus where
+ * cpu state >= @state
+ *
++ * The caller needs to hold cpus read locked while calling this function.
+ * The teardown callback is currently not allowed to fail. Think
+ * about module removal!
+ */
+-void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
++void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
+ {
+ struct cpuhp_step *sp = cpuhp_get_step(state);
+ int cpu;
+
+ BUG_ON(cpuhp_cb_check(state));
+
+- cpus_read_lock();
++ lockdep_assert_cpus_held();
+
+ mutex_lock(&cpuhp_state_mutex);
+ if (sp->multi_instance) {
+@@ -1615,6 +1633,13 @@ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
+ remove:
+ cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
+ mutex_unlock(&cpuhp_state_mutex);
++}
++EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
++
++void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
++{
++ cpus_read_lock();
++ __cpuhp_remove_state_cpuslocked(state, invoke);
+ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL(__cpuhp_remove_state);
+--
+2.11.0
+
diff --git a/patches/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch b/patches/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
index 31e15d85c04b..311c2d0ddc7b 100644
--- a/patches/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
+++ b/patches/0003-futex-Clarify-mark_wake_futex-memory-barrier-usage.patch
@@ -1,8 +1,7 @@
+From 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd Mon Sep 17 00:00:00 2001
From: "Darren Hart (VMware)" <dvhart@infradead.org>
Date: Fri, 14 Apr 2017 15:31:38 -0700
-Subject: [PATCH] futex: Clarify mark_wake_futex memory barrier usage
-
-Upstream commit 38fcd06e9b7f6855db1f3ebac5e18b8fdb467ffd
+Subject: [PATCH 3/4] futex: Clarify mark_wake_futex memory barrier usage
Clarify the scenario described in mark_wake_futex requiring the
smp_store_release(). Update the comment to explicitly refer to the
@@ -19,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1378,10 +1378,11 @@ static void mark_wake_futex(struct wake_
+@@ -1380,10 +1380,11 @@ static void mark_wake_futex(struct wake_
wake_q_add(wake_q, p);
__unqueue_futex(q);
/*
diff --git a/patches/0003-futex-Remove-rt_mutex_deadlock_account_.patch b/patches/0003-futex-Remove-rt_mutex_deadlock_account_.patch
index 630982fb1310..41e427d6ac53 100644
--- a/patches/0003-futex-Remove-rt_mutex_deadlock_account_.patch
+++ b/patches/0003-futex-Remove-rt_mutex_deadlock_account_.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
-@@ -173,12 +173,3 @@ void debug_rt_mutex_init(struct rt_mutex
+@@ -174,12 +174,3 @@ void debug_rt_mutex_init(struct rt_mutex
lock->name = name;
}
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -936,8 +936,6 @@ static int try_to_take_rt_mutex(struct r
+@@ -938,8 +938,6 @@ static int try_to_take_rt_mutex(struct r
*/
rt_mutex_set_owner(lock, task);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
}
-@@ -1340,8 +1338,6 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1342,8 +1340,6 @@ static bool __sched rt_mutex_slowunlock(
debug_rt_mutex_unlock(lock);
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We must be careful here if the fast path is enabled. If we
* have no waiters queued we cannot set owner to NULL here
-@@ -1407,11 +1403,10 @@ rt_mutex_fastlock(struct rt_mutex *lock,
+@@ -1409,11 +1405,10 @@ rt_mutex_fastlock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk))
{
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int
-@@ -1423,21 +1418,19 @@ rt_mutex_timed_fastlock(struct rt_mutex
+@@ -1425,21 +1420,19 @@ rt_mutex_timed_fastlock(struct rt_mutex
enum rtmutex_chainwalk chwalk))
{
if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
@@ -114,10 +114,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return slowfn(lock);
}
-@@ -1447,19 +1440,18 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1449,19 +1442,18 @@ rt_mutex_fastunlock(struct rt_mutex *loc
struct wake_q_head *wqh))
{
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
+ bool deboost;
- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) {
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1570,10 +1562,9 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
+@@ -1572,10 +1564,9 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wqh)
{
@@ -155,7 +155,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rt_mutex_slowunlock(lock, wqh);
}
-@@ -1631,7 +1622,6 @@ void rt_mutex_init_proxy_locked(struct r
+@@ -1637,7 +1628,6 @@ void rt_mutex_init_proxy_locked(struct r
__rt_mutex_init(lock, NULL);
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1647,7 +1637,6 @@ void rt_mutex_proxy_unlock(struct rt_mut
+@@ -1657,7 +1647,6 @@ void rt_mutex_proxy_unlock(struct rt_mut
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
diff --git a/patches/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch b/patches/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..80d6d4dcd823
--- /dev/null
+++ b/patches/0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,129 @@
+From 67cb85fdcee7fbc61c09c00360d1a4ae37641db4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:29 +0200
+Subject: [PATCH 03/13] ia64/salinfo: Replace racy task affinity logic
+
+Some of the file operations in /proc/sal require to run code on the
+requested cpu. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: linux-ia64@vger.kernel.org
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.341863457@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/ia64/kernel/salinfo.c | 31 ++++++++++++-------------------
+ 1 file changed, 12 insertions(+), 19 deletions(-)
+
+--- a/arch/ia64/kernel/salinfo.c
++++ b/arch/ia64/kernel/salinfo.c
+@@ -179,14 +179,14 @@ struct salinfo_platform_oemdata_parms {
+ const u8 *efi_guid;
+ u8 **oemdata;
+ u64 *oemdata_size;
+- int ret;
+ };
+
+-static void
++static long
+ salinfo_platform_oemdata_cpu(void *context)
+ {
+ struct salinfo_platform_oemdata_parms *parms = context;
+- parms->ret = salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
++
++ return salinfo_platform_oemdata(parms->efi_guid, parms->oemdata, parms->oemdata_size);
+ }
+
+ static void
+@@ -380,16 +380,7 @@ salinfo_log_release(struct inode *inode,
+ return 0;
+ }
+
+-static void
+-call_on_cpu(int cpu, void (*fn)(void *), void *arg)
+-{
+- cpumask_t save_cpus_allowed = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+- (*fn)(arg);
+- set_cpus_allowed_ptr(current, &save_cpus_allowed);
+-}
+-
+-static void
++static long
+ salinfo_log_read_cpu(void *context)
+ {
+ struct salinfo_data *data = context;
+@@ -399,6 +390,7 @@ salinfo_log_read_cpu(void *context)
+ /* Clear corrected errors as they are read from SAL */
+ if (rh->severity == sal_log_severity_corrected)
+ ia64_sal_clear_state_info(data->type);
++ return 0;
+ }
+
+ static void
+@@ -430,7 +422,7 @@ salinfo_log_new_read(int cpu, struct sal
+ spin_unlock_irqrestore(&data_saved_lock, flags);
+
+ if (!data->saved_num)
+- call_on_cpu(cpu, salinfo_log_read_cpu, data);
++ work_on_cpu_safe(cpu, salinfo_log_read_cpu, data);
+ if (!data->log_size) {
+ data->state = STATE_NO_DATA;
+ cpumask_clear_cpu(cpu, &data->cpu_event);
+@@ -459,11 +451,13 @@ salinfo_log_read(struct file *file, char
+ return simple_read_from_buffer(buffer, count, ppos, buf, bufsize);
+ }
+
+-static void
++static long
+ salinfo_log_clear_cpu(void *context)
+ {
+ struct salinfo_data *data = context;
++
+ ia64_sal_clear_state_info(data->type);
++ return 0;
+ }
+
+ static int
+@@ -486,7 +480,7 @@ salinfo_log_clear(struct salinfo_data *d
+ rh = (sal_log_record_header_t *)(data->log_buffer);
+ /* Corrected errors have already been cleared from SAL */
+ if (rh->severity != sal_log_severity_corrected)
+- call_on_cpu(cpu, salinfo_log_clear_cpu, data);
++ work_on_cpu_safe(cpu, salinfo_log_clear_cpu, data);
+ /* clearing a record may make a new record visible */
+ salinfo_log_new_read(cpu, data);
+ if (data->state == STATE_LOG_RECORD) {
+@@ -531,9 +525,8 @@ salinfo_log_write(struct file *file, con
+ .oemdata = &data->oemdata,
+ .oemdata_size = &data->oemdata_size
+ };
+- call_on_cpu(cpu, salinfo_platform_oemdata_cpu, &parms);
+- if (parms.ret)
+- count = parms.ret;
++ count = work_on_cpu_safe(cpu, salinfo_platform_oemdata_cpu,
++ &parms);
+ } else
+ data->oemdata_size = 0;
+ } else
diff --git a/patches/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch b/patches/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
index 35405b0e351a..ed71fb48348c 100644
--- a/patches/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
+++ b/patches/0003-sched-deadline-rtmutex-Dont-miss-the-dl_runtime-dl_p.patch
@@ -1,10 +1,9 @@
+From 85e2d4f992868ad78dc8bb2c077b652fcfb3661a Mon Sep 17 00:00:00 2001
From: Xunlei Pang <xlpang@redhat.com>
Date: Thu, 23 Mar 2017 15:56:09 +0100
-Subject: [PATCH] sched/deadline/rtmutex: Dont miss the
+Subject: [PATCH 3/9] sched/deadline/rtmutex: Dont miss the
dl_runtime/dl_period update
-Upstream commit 85e2d4f992868ad78dc8bb2c077b652fcfb3661a
-
Currently dl tasks will actually return at the very beginning
of rt_mutex_adjust_prio_chain() in !detect_deadlock cases:
@@ -42,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -603,7 +603,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -605,7 +605,7 @@ static int rt_mutex_adjust_prio_chain(st
* enabled we continue, but stop the requeueing in the chain
* walk.
*/
diff --git a/patches/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch b/patches/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
index 6d2ab127ca28..a47fbc0f9b56 100644
--- a/patches/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
+++ b/patches/0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
@@ -1,8 +1,7 @@
+From 59cd42c29618c45cd3c56da43402b14f611888dd Mon Sep 17 00:00:00 2001
From: "Darren Hart (VMware)" <dvhart@infradead.org>
Date: Fri, 14 Apr 2017 15:46:08 -0700
-Subject: [PATCH] MAINTAINERS: Add FUTEX SUBSYSTEM
-
-Upstream commit 59cd42c29618c45cd3c56da43402b14f611888dd
+Subject: [PATCH 4/4] MAINTAINERS: Add FUTEX SUBSYSTEM
Add a MAINTAINERS block for the FUTEX SUBSYSTEM which includes the core
kernel code, include headers, testing code, and Documentation. Excludes
@@ -23,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/MAINTAINERS
+++ b/MAINTAINERS
-@@ -5196,6 +5196,23 @@ F: fs/fuse/
+@@ -5420,6 +5420,23 @@ F: fs/fuse/
F: include/uapi/linux/fuse.h
F: Documentation/filesystems/fuse.txt
diff --git a/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch b/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
new file mode 100644
index 000000000000..1cb080124b7d
--- /dev/null
+++ b/patches/0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
@@ -0,0 +1,96 @@
+From 9805c6733349ea3ccd22cf75b8ebaabb5290e310 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:15 +0200
+Subject: [PATCH 04/32] cpu/hotplug: Add
+ __cpuhp_state_add_instance_cpuslocked()
+
+Add cpuslocked() variants for the multi instance registration so this can
+be called from a cpus_read_lock() protected region.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.321782217@linutronix.de
+---
+ include/linux/cpuhotplug.h | 9 +++++++++
+ kernel/cpu.c | 18 +++++++++++++++---
+ 2 files changed, 24 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
+index 4fac564dde70..df3d2719a796 100644
+--- a/include/linux/cpuhotplug.h
++++ b/include/linux/cpuhotplug.h
+@@ -240,6 +240,8 @@ static inline int cpuhp_setup_state_multi(enum cpuhp_state state,
+
+ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ bool invoke);
++int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
++ struct hlist_node *node, bool invoke);
+
+ /**
+ * cpuhp_state_add_instance - Add an instance for a state and invoke startup
+@@ -272,6 +274,13 @@ static inline int cpuhp_state_add_instance_nocalls(enum cpuhp_state state,
+ return __cpuhp_state_add_instance(state, node, false);
+ }
+
++static inline int
++cpuhp_state_add_instance_nocalls_cpuslocked(enum cpuhp_state state,
++ struct hlist_node *node)
++{
++ return __cpuhp_state_add_instance_cpuslocked(state, node, false);
++}
++
+ void __cpuhp_remove_state(enum cpuhp_state state, bool invoke);
+ void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke);
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index dc27c5a28153..e4389ac55b65 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -1413,18 +1413,20 @@ static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
+ }
+ }
+
+-int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+- bool invoke)
++int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
++ struct hlist_node *node,
++ bool invoke)
+ {
+ struct cpuhp_step *sp;
+ int cpu;
+ int ret;
+
++ lockdep_assert_cpus_held();
++
+ sp = cpuhp_get_step(state);
+ if (sp->multi_instance == false)
+ return -EINVAL;
+
+- cpus_read_lock();
+ mutex_lock(&cpuhp_state_mutex);
+
+ if (!invoke || !sp->startup.multi)
+@@ -1453,6 +1455,16 @@ int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
+ hlist_add_head(node, &sp->list);
+ unlock:
+ mutex_unlock(&cpuhp_state_mutex);
++ return ret;
++}
++
++int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
++ bool invoke)
++{
++ int ret;
++
++ cpus_read_lock();
++ ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
+ cpus_read_unlock();
+ return ret;
+ }
+--
+2.11.0
+
diff --git a/patches/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch b/patches/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
index 5f39524b167b..66306249c23b 100644
--- a/patches/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
+++ b/patches/0004-futex-rt_mutex-Provide-futex-specific-rt_mutex-API.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -914,7 +914,7 @@ void exit_pi_state_list(struct task_stru
+@@ -916,7 +916,7 @@ void exit_pi_state_list(struct task_stru
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock(&hb->lock);
-@@ -1362,20 +1362,18 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1364,20 +1364,18 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -2251,7 +2249,7 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2253,7 +2251,7 @@ static int fixup_owner(u32 __user *uaddr
* task acquired the rt_mutex after we removed ourself from the
* rt_mutex waiters list.
*/
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
locked = 1;
goto out;
}
-@@ -2566,7 +2564,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2568,7 +2566,7 @@ static int futex_lock_pi(u32 __user *uad
if (!trylock) {
ret = rt_mutex_timed_futex_lock(&q.pi_state->pi_mutex, to);
} else {
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Fixup the trylock return value: */
ret = ret ? 0 : -EWOULDBLOCK;
}
-@@ -2589,7 +2587,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2591,7 +2589,7 @@ static int futex_lock_pi(u32 __user *uad
* it and return the fault to userspace.
*/
if (ret && (rt_mutex_owner(&q.pi_state->pi_mutex) == current))
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Unqueue and drop the lock */
unqueue_me_pi(&q);
-@@ -2896,7 +2894,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2898,7 +2896,7 @@ static int futex_wait_requeue_pi(u32 __u
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current)
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
-@@ -2936,7 +2934,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2938,7 +2936,7 @@ static int futex_wait_requeue_pi(u32 __u
* userspace.
*/
if (ret && rt_mutex_owner(pi_mutex) == current)
@@ -122,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unqueue_me_pi(&q);
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1486,15 +1486,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup
+@@ -1488,15 +1488,23 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_interrup
/*
* Futex variant with full deadlock detection.
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1553,19 +1561,38 @@ void __sched rt_mutex_unlock(struct rt_m
+@@ -1555,19 +1563,38 @@ void __sched rt_mutex_unlock(struct rt_m
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
- if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
- return false;
-+ WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_q);
+ bool deboost;
- return rt_mutex_slowunlock(lock, wqh);
diff --git a/patches/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch b/patches/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..41a9a46f629d
--- /dev/null
+++ b/patches/0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,76 @@
+From 9feb42ac88b516e378b9782e82b651ca5bed95c4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 6 Apr 2017 14:56:18 +0200
+Subject: [PATCH 04/13] ia64/sn/hwperf: Replace racy task affinity logic
+
+sn_hwperf_op_cpu() which is invoked from an ioctl requires to run code on
+the requested cpu. This is achieved by temporarily setting the affinity of
+the calling user space thread to the requested CPU and reset it to the
+original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: linux-ia64@vger.kernel.org
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704122251450.2548@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/ia64/sn/kernel/sn2/sn_hwperf.c | 17 +++++++++--------
+ 1 file changed, 9 insertions(+), 8 deletions(-)
+
+--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
++++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+@@ -598,12 +598,17 @@ static void sn_hwperf_call_sal(void *inf
+ op_info->ret = r;
+ }
+
++static long sn_hwperf_call_sal_work(void *info)
++{
++ sn_hwperf_call_sal(info);
++ return 0;
++}
++
+ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info)
+ {
+ u32 cpu;
+ u32 use_ipi;
+ int r = 0;
+- cpumask_t save_allowed;
+
+ cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32;
+ use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK;
+@@ -629,13 +634,9 @@ static int sn_hwperf_op_cpu(struct sn_hw
+ /* use an interprocessor interrupt to call SAL */
+ smp_call_function_single(cpu, sn_hwperf_call_sal,
+ op_info, 1);
+- }
+- else {
+- /* migrate the task before calling SAL */
+- save_allowed = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+- sn_hwperf_call_sal(op_info);
+- set_cpus_allowed_ptr(current, &save_allowed);
++ } else {
++ /* Call on the target CPU */
++ work_on_cpu_safe(cpu, sn_hwperf_call_sal_work, op_info);
+ }
+ }
+ r = op_info->ret;
diff --git a/patches/0004-rtmutex-Clean-up.patch b/patches/0004-rtmutex-Clean-up.patch
index 0b03e873a043..b3f244c9879e 100644
--- a/patches/0004-rtmutex-Clean-up.patch
+++ b/patches/0004-rtmutex-Clean-up.patch
@@ -1,8 +1,7 @@
+From aa2bfe55366552cb7e93e8709d66e698d79ccc47 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 23 Mar 2017 15:56:10 +0100
-Subject: [PATCH] rtmutex: Clean up
-
-Upstream commit aa2bfe55366552cb7e93e8709d66e698d79ccc47
+Subject: [PATCH 4/9] rtmutex: Clean up
Previous patches changed the meaning of the return value of
rt_mutex_slowunlock(); update comments and code to reflect this.
@@ -25,16 +24,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1392,7 +1392,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1394,7 +1394,7 @@ static int wake_futex_pi(u32 __user *uad
{
u32 uninitialized_var(curval), newval;
struct task_struct *new_owner;
- bool deboost = false;
+ bool postunlock = false;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
int ret = 0;
-@@ -1453,12 +1453,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1455,12 +1455,13 @@ static int wake_futex_pi(u32 __user *uad
/*
* We've updated the uservalue, this unlock cannot fail.
*/
@@ -52,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1328,7 +1328,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1330,7 +1330,8 @@ static inline int rt_mutex_slowtrylock(s
/*
* Slow path to release a rt-mutex.
@@ -62,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
struct wake_q_head *wake_q)
-@@ -1399,8 +1400,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1401,8 +1402,7 @@ static bool __sched rt_mutex_slowunlock(
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
@@ -72,7 +71,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1447,15 +1447,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1449,15 +1449,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
}
/*
@@ -91,10 +90,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static inline void
-@@ -1464,14 +1463,12 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1466,14 +1465,12 @@ rt_mutex_fastunlock(struct rt_mutex *loc
struct wake_q_head *wqh))
{
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
- bool deboost;
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
@@ -108,7 +107,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1591,19 +1588,20 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1593,19 +1590,20 @@ bool __sched __rt_mutex_futex_unlock(str
*/
preempt_disable();
@@ -118,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
{
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
- bool deboost;
+ bool postunlock;
diff --git a/patches/0004-x86-smp-Adjust-system_state-check.patch b/patches/0004-x86-smp-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..eadb43fddb24
--- /dev/null
+++ b/patches/0004-x86-smp-Adjust-system_state-check.patch
@@ -0,0 +1,34 @@
+From 719b3680d1f789c1e3054e3fcb26bfff07c3c623 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:35 +0200
+Subject: [PATCH 04/17] x86/smp: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in announce_cpu() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20170516184735.191715856@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/x86/kernel/smpboot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -863,7 +863,7 @@ static void announce_cpu(int cpu, int ap
+ if (cpu == 1)
+ printk(KERN_INFO "x86: Booting SMP configuration:\n");
+
+- if (system_state == SYSTEM_BOOTING) {
++ if (system_state < SYSTEM_RUNNING) {
+ if (node != current_node) {
+ if (current_node > (-1))
+ pr_cont("\n");
diff --git a/patches/0005-futex-Change-locking-rules.patch b/patches/0005-futex-Change-locking-rules.patch
index a6a3f0ad08fe..2c9de0cb97da 100644
--- a/patches/0005-futex-Change-locking-rules.patch
+++ b/patches/0005-futex-Change-locking-rules.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -971,6 +971,39 @@ void exit_pi_state_list(struct task_stru
+@@ -973,6 +973,39 @@ void exit_pi_state_list(struct task_stru
*
* [10] There is no transient state which leaves owner and user space
* TID out of sync.
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
/*
-@@ -978,10 +1011,12 @@ void exit_pi_state_list(struct task_stru
+@@ -980,10 +1013,12 @@ void exit_pi_state_list(struct task_stru
* the pi_state against the user space value. If correct, attach to
* it.
*/
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Userspace might have messed up non-PI and PI futexes [3]
-@@ -989,9 +1024,34 @@ static int attach_to_pi_state(u32 uval,
+@@ -991,9 +1026,34 @@ static int attach_to_pi_state(u32 uval,
if (unlikely(!pi_state))
return -EINVAL;
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Handle the owner died case:
*/
if (uval & FUTEX_OWNER_DIED) {
-@@ -1006,11 +1066,11 @@ static int attach_to_pi_state(u32 uval,
+@@ -1008,11 +1068,11 @@ static int attach_to_pi_state(u32 uval,
* is not 0. Inconsistent state. [5]
*/
if (pid)
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1022,14 +1082,14 @@ static int attach_to_pi_state(u32 uval,
+@@ -1024,14 +1084,14 @@ static int attach_to_pi_state(u32 uval,
* Take a ref on the state and return success. [6]
*/
if (!pid)
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1038,11 +1098,29 @@ static int attach_to_pi_state(u32 uval,
+@@ -1040,11 +1100,29 @@ static int attach_to_pi_state(u32 uval,
* user space TID. [9/10]
*/
if (pid != task_pid_vnr(pi_state->owner))
@@ -189,7 +189,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1093,6 +1171,9 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1095,6 +1173,9 @@ static int attach_to_pi_owner(u32 uval,
/*
* No existing pi state. First waiter. [2]
@@ -199,7 +199,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
pi_state = alloc_pi_state();
-@@ -1117,7 +1198,8 @@ static int attach_to_pi_owner(u32 uval,
+@@ -1119,7 +1200,8 @@ static int attach_to_pi_owner(u32 uval,
return 0;
}
@@ -209,7 +209,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
union futex_key *key, struct futex_pi_state **ps)
{
struct futex_q *top_waiter = futex_top_waiter(hb, key);
-@@ -1127,7 +1209,7 @@ static int lookup_pi_state(u32 uval, str
+@@ -1129,7 +1211,7 @@ static int lookup_pi_state(u32 uval, str
* attach to the pi_state when the validation succeeds.
*/
if (top_waiter)
@@ -218,7 +218,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We are the first waiter - try to look up the owner based on
-@@ -1146,7 +1228,7 @@ static int lock_pi_update_atomic(u32 __u
+@@ -1148,7 +1230,7 @@ static int lock_pi_update_atomic(u32 __u
if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)))
return -EFAULT;
@@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return curval != uval ? -EAGAIN : 0;
}
-@@ -1202,7 +1284,7 @@ static int futex_lock_pi_atomic(u32 __us
+@@ -1204,7 +1286,7 @@ static int futex_lock_pi_atomic(u32 __us
*/
top_waiter = futex_top_waiter(hb, key);
if (top_waiter)
@@ -236,7 +236,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No waiter and user TID is 0. We are here because the
-@@ -1334,6 +1416,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1336,6 +1418,7 @@ static int wake_futex_pi(u32 __user *uad
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) {
ret = -EFAULT;
@@ -244,7 +244,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else if (curval != uval) {
/*
* If a unconditional UNLOCK_PI operation (user space did not
-@@ -1346,6 +1429,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1348,6 +1431,7 @@ static int wake_futex_pi(u32 __user *uad
else
ret = -EINVAL;
}
@@ -252,7 +252,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret) {
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
return ret;
-@@ -1821,7 +1905,7 @@ static int futex_requeue(u32 __user *uad
+@@ -1823,7 +1907,7 @@ static int futex_requeue(u32 __user *uad
* If that call succeeds then we have pi_state and an
* initial refcount on it.
*/
@@ -261,7 +261,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
switch (ret) {
-@@ -2120,10 +2204,13 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2122,10 +2206,13 @@ static int fixup_pi_state_owner(u32 __us
{
u32 newtid = task_pid_vnr(newowner) | FUTEX_WAITERS;
struct futex_pi_state *pi_state = q->pi_state;
@@ -276,7 +276,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Owner died? */
if (!pi_state->owner)
newtid |= FUTEX_OWNER_DIED;
-@@ -2139,11 +2226,10 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2141,11 +2228,10 @@ static int fixup_pi_state_owner(u32 __us
* because we can fault here. Imagine swapped out pages or a fork
* that marked all the anonymous memory readonly for cow.
*
@@ -292,7 +292,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
retry:
if (get_futex_value_locked(&uval, uaddr))
-@@ -2164,47 +2250,60 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2166,47 +2252,60 @@ static int fixup_pi_state_owner(u32 __us
* itself.
*/
if (pi_state->owner != NULL) {
diff --git a/patches/0005-metag-Adjust-system_state-check.patch b/patches/0005-metag-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..61fd1559307c
--- /dev/null
+++ b/patches/0005-metag-Adjust-system_state-check.patch
@@ -0,0 +1,36 @@
+From dcd2e4734b428709984e2fa35ebbd6cccc246d47 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:36 +0200
+Subject: [PATCH 05/17] metag: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in stop_this_cpu() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: James Hogan <james.hogan@imgtec.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.283420315@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/metag/kernel/smp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/arch/metag/kernel/smp.c
++++ b/arch/metag/kernel/smp.c
+@@ -567,8 +567,7 @@ static void stop_this_cpu(void *data)
+ {
+ unsigned int cpu = smp_processor_id();
+
+- if (system_state == SYSTEM_BOOTING ||
+- system_state == SYSTEM_RUNNING) {
++ if (system_state <= SYSTEM_RUNNING) {
+ spin_lock(&stop_lock);
+ pr_crit("CPU%u: stopping\n", cpu);
+ dump_stack();
diff --git a/patches/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch b/patches/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
new file mode 100644
index 000000000000..a6d685b80eff
--- /dev/null
+++ b/patches/0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
@@ -0,0 +1,89 @@
+From 6d11b87d55eb75007a3721c2de5938f5bbf607fb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:31 +0200
+Subject: [PATCH 05/13] powerpc/smp: Replace open coded task affinity logic
+
+Init task invokes smp_ops->setup_cpu() from smp_cpus_done(). Init task can
+run on any online CPU at this point, but the setup_cpu() callback requires
+to be invoked on the boot CPU. This is achieved by temporarily setting the
+affinity of the calling user space thread to the requested CPU and reset it
+to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+That's actually not a problem in this context as neither CPU hotplug nor
+affinity settings can happen, but the access to task_struct::cpus_allowed
+is about to restricted.
+
+Replace it with a call to work_on_cpu_safe() which achieves the same result.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.518053336@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/powerpc/kernel/smp.c | 26 +++++++++++---------------
+ 1 file changed, 11 insertions(+), 15 deletions(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -787,24 +787,21 @@ static struct sched_domain_topology_leve
+ { NULL, },
+ };
+
+-void __init smp_cpus_done(unsigned int max_cpus)
++static __init long smp_setup_cpu_workfn(void *data __always_unused)
+ {
+- cpumask_var_t old_mask;
++ smp_ops->setup_cpu(boot_cpuid);
++ return 0;
++}
+
+- /* We want the setup_cpu() here to be called from CPU 0, but our
+- * init thread may have been "borrowed" by another CPU in the meantime
+- * se we pin us down to CPU 0 for a short while
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++ /*
++ * We want the setup_cpu() here to be called on the boot CPU, but
++ * init might run on any CPU, so make sure it's invoked on the boot
++ * CPU.
+ */
+- alloc_cpumask_var(&old_mask, GFP_NOWAIT);
+- cpumask_copy(old_mask, &current->cpus_allowed);
+- set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid));
+-
+ if (smp_ops && smp_ops->setup_cpu)
+- smp_ops->setup_cpu(boot_cpuid);
+-
+- set_cpus_allowed_ptr(current, old_mask);
+-
+- free_cpumask_var(old_mask);
++ work_on_cpu_safe(boot_cpuid, smp_setup_cpu_workfn, NULL);
+
+ if (smp_ops && smp_ops->bringup_done)
+ smp_ops->bringup_done();
+@@ -812,7 +809,6 @@ void __init smp_cpus_done(unsigned int m
+ dump_numa_cpu_topology();
+
+ set_sched_topology(powerpc_topology);
+-
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
diff --git a/patches/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch b/patches/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
index aa609e94c800..03f34c2b6a2f 100644
--- a/patches/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
+++ b/patches/0005-sched-rtmutex-Refactor-rt_mutex_setprio.patch
@@ -1,8 +1,7 @@
+From acd58620e415aee4a43a808d7d2fd87259ee0001 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 23 Mar 2017 15:56:11 +0100
-Subject: [PATCH] sched/rtmutex: Refactor rt_mutex_setprio()
-
-Upstream commit acd58620e415aee4a43a808d7d2fd87259ee0001
+Subject: [PATCH 5/9] sched/rtmutex: Refactor rt_mutex_setprio()
With the introduction of SCHED_DEADLINE the whole notion that priority
is a single number is gone, therefore the @prio argument to
@@ -35,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
-@@ -16,28 +16,20 @@ static inline int rt_task(struct task_st
+@@ -18,28 +18,20 @@ static inline int rt_task(struct task_st
}
#ifdef CONFIG_RT_MUTEXES
@@ -74,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -320,67 +320,16 @@ rt_mutex_dequeue_pi(struct task_struct *
+@@ -322,67 +322,16 @@ rt_mutex_dequeue_pi(struct task_struct *
RB_CLEAR_NODE(&waiter->pi_tree_entry);
}
@@ -148,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -740,7 +689,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -742,7 +691,7 @@ static int rt_mutex_adjust_prio_chain(st
*/
rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
rt_mutex_enqueue_pi(task, waiter);
@@ -157,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (prerequeue_top_waiter == waiter) {
/*
-@@ -756,7 +705,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -758,7 +707,7 @@ static int rt_mutex_adjust_prio_chain(st
rt_mutex_dequeue_pi(task, waiter);
waiter = rt_mutex_top_waiter(lock);
rt_mutex_enqueue_pi(task, waiter);
@@ -166,7 +165,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* Nothing changed. No need to do any priority
-@@ -964,7 +913,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -966,7 +915,7 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -175,7 +174,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
-@@ -986,7 +935,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -988,7 +937,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_dequeue_pi(owner, top_waiter);
rt_mutex_enqueue_pi(owner, waiter);
@@ -184,7 +183,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (owner->pi_blocked_on)
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
-@@ -1038,13 +987,14 @@ static void mark_wakeup_next_waiter(stru
+@@ -1040,13 +989,14 @@ static void mark_wakeup_next_waiter(stru
waiter = rt_mutex_top_waiter(lock);
/*
@@ -204,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* As we are waking up the top waiter, and the waiter stays
-@@ -1056,9 +1006,19 @@ static void mark_wakeup_next_waiter(stru
+@@ -1058,9 +1008,19 @@ static void mark_wakeup_next_waiter(stru
*/
lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
@@ -226,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1093,7 +1053,7 @@ static void remove_waiter(struct rt_mute
+@@ -1095,7 +1055,7 @@ static void remove_waiter(struct rt_mute
if (rt_mutex_has_waiters(lock))
rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
@@ -235,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Store the lock on which owner is blocked or NULL */
next_lock = task_blocked_on_lock(owner);
-@@ -1132,8 +1092,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1134,8 +1094,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -245,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&task->pi_lock, flags);
return;
}
-@@ -1387,17 +1346,6 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1389,17 +1348,6 @@ static bool __sched rt_mutex_slowunlock(
* Queue the next waiter for wakeup once we release the wait_lock.
*/
mark_wakeup_next_waiter(wake_q, lock);
@@ -265,7 +264,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return true; /* call rt_mutex_postunlock() */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3629,10 +3629,25 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3671,10 +3671,25 @@ EXPORT_SYMBOL(default_wake_function);
#ifdef CONFIG_RT_MUTEXES
@@ -293,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* This function changes the 'effective' priority of a task. It does
* not touch ->normal_prio like __setscheduler().
-@@ -3640,16 +3655,40 @@ EXPORT_SYMBOL(default_wake_function);
+@@ -3682,17 +3697,41 @@ EXPORT_SYMBOL(default_wake_function);
* Used by the rt_mutex code to implement priority inheritance
* logic. Call site only calls if the priority of the task changed.
*/
@@ -317,6 +316,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ return;
rq = __task_rq_lock(p, &rf);
+ update_rq_clock(rq);
+ /*
+ * Set under pi_lock && rq->lock, such that the value can be used under
+ * either lock.
@@ -337,7 +337,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Idle task boosting is a nono in general. There is one
-@@ -3669,9 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+@@ -3712,9 +3751,7 @@ void rt_mutex_setprio(struct task_struct
goto out_unlock;
}
@@ -348,7 +348,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldprio = p->prio;
if (oldprio == prio)
-@@ -3695,7 +3732,6 @@ void rt_mutex_setprio(struct task_struct
+@@ -3738,7 +3775,6 @@ void rt_mutex_setprio(struct task_struct
* running task
*/
if (dl_prio(prio)) {
@@ -356,7 +356,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!dl_prio(p->normal_prio) ||
(pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
p->dl.dl_boosted = 1;
-@@ -3732,6 +3768,11 @@ void rt_mutex_setprio(struct task_struct
+@@ -3776,6 +3812,11 @@ void rt_mutex_setprio(struct task_struct
balance_callback(rq);
preempt_enable();
}
@@ -368,7 +368,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
void set_user_nice(struct task_struct *p, long nice)
-@@ -3976,10 +4017,9 @@ static void __setscheduler(struct rq *rq
+@@ -4022,10 +4063,9 @@ static void __setscheduler(struct rq *rq
* Keep a potential priority boosting if called from
* sched_setscheduler().
*/
@@ -381,7 +381,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (dl_prio(p->prio))
p->sched_class = &dl_sched_class;
-@@ -4266,7 +4306,7 @@ static int __sched_setscheduler(struct t
+@@ -4312,7 +4352,7 @@ static int __sched_setscheduler(struct t
* the runqueue. This will be done when the task deboost
* itself.
*/
diff --git a/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch b/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch
new file mode 100644
index 000000000000..8e5d24bebe43
--- /dev/null
+++ b/patches/0005-stop_machine-Provide-stop_machine_cpuslocked.patch
@@ -0,0 +1,116 @@
+From fe5595c074005bd94f0c7d1644175941149f6768 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:16 +0200
+Subject: [PATCH 05/32] stop_machine: Provide stop_machine_cpuslocked()
+
+Some call sites of stop_machine() are within a get_online_cpus() protected
+region.
+
+stop_machine() calls get_online_cpus() as well, which is possible in the
+current implementation but prevents converting the hotplug locking to a
+percpu rwsem.
+
+Provide stop_machine_cpuslocked() to avoid nested calls to get_online_cpus().
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.400700852@linutronix.de
+---
+ include/linux/stop_machine.h | 26 +++++++++++++++++++++++---
+ kernel/stop_machine.c | 11 +++++++----
+ 2 files changed, 30 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
+index 3cc9632dcc2a..3d60275e3ba9 100644
+--- a/include/linux/stop_machine.h
++++ b/include/linux/stop_machine.h
+@@ -116,15 +116,29 @@ static inline int try_stop_cpus(const struct cpumask *cpumask,
+ * @fn() runs.
+ *
+ * This can be thought of as a very heavy write lock, equivalent to
+- * grabbing every spinlock in the kernel. */
++ * grabbing every spinlock in the kernel.
++ *
++ * Protects against CPU hotplug.
++ */
+ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
+
++/**
++ * stop_machine_cpuslocked: freeze the machine on all CPUs and run this function
++ * @fn: the function to run
++ * @data: the data ptr for the @fn()
++ * @cpus: the cpus to run the @fn() on (NULL = any online cpu)
++ *
++ * Same as above. Must be called from with in a cpus_read_lock() protected
++ * region. Avoids nested calls to cpus_read_lock().
++ */
++int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus);
++
+ int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus);
+ #else /* CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
+-static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+- const struct cpumask *cpus)
++static inline int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
++ const struct cpumask *cpus)
+ {
+ unsigned long flags;
+ int ret;
+@@ -134,6 +148,12 @@ static inline int stop_machine(cpu_stop_fn_t fn, void *data,
+ return ret;
+ }
+
++static inline int stop_machine(cpu_stop_fn_t fn, void *data,
++ const struct cpumask *cpus)
++{
++ return stop_machine_cpuslocked(fn, data, cpus);
++}
++
+ static inline int stop_machine_from_inactive_cpu(cpu_stop_fn_t fn, void *data,
+ const struct cpumask *cpus)
+ {
+diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
+index 1eb82661ecdb..b7591261652d 100644
+--- a/kernel/stop_machine.c
++++ b/kernel/stop_machine.c
+@@ -552,7 +552,8 @@ static int __init cpu_stop_init(void)
+ }
+ early_initcall(cpu_stop_init);
+
+-static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
++int stop_machine_cpuslocked(cpu_stop_fn_t fn, void *data,
++ const struct cpumask *cpus)
+ {
+ struct multi_stop_data msdata = {
+ .fn = fn,
+@@ -561,6 +562,8 @@ static int __stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cp
+ .active_cpus = cpus,
+ };
+
++ lockdep_assert_cpus_held();
++
+ if (!stop_machine_initialized) {
+ /*
+ * Handle the case where stop_machine() is called
+@@ -590,9 +593,9 @@ int stop_machine(cpu_stop_fn_t fn, void *data, const struct cpumask *cpus)
+ int ret;
+
+ /* No CPUs can come up or down during this. */
+- get_online_cpus();
+- ret = __stop_machine(fn, data, cpus);
+- put_online_cpus();
++ cpus_read_lock();
++ ret = stop_machine_cpuslocked(fn, data, cpus);
++ cpus_read_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(stop_machine);
+--
+2.11.0
+
diff --git a/patches/0006-futex-Cleanup-refcounting.patch b/patches/0006-futex-Cleanup-refcounting.patch
index e1e7b05733c8..566a1356f606 100644
--- a/patches/0006-futex-Cleanup-refcounting.patch
+++ b/patches/0006-futex-Cleanup-refcounting.patch
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -800,7 +800,7 @@ static int refill_pi_state_cache(void)
+@@ -802,7 +802,7 @@ static int refill_pi_state_cache(void)
return 0;
}
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct futex_pi_state *pi_state = current->pi_state_cache;
-@@ -810,6 +810,11 @@ static struct futex_pi_state * alloc_pi_
+@@ -812,6 +812,11 @@ static struct futex_pi_state * alloc_pi_
return pi_state;
}
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Drops a reference to the pi_state object and frees or caches it
* when the last reference is gone.
-@@ -854,7 +859,7 @@ static void put_pi_state(struct futex_pi
+@@ -856,7 +861,7 @@ static void put_pi_state(struct futex_pi
* Look up the task based on what TID userspace gave us.
* We dont trust it.
*/
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *p;
-@@ -1101,7 +1106,7 @@ static int attach_to_pi_state(u32 __user
+@@ -1103,7 +1108,7 @@ static int attach_to_pi_state(u32 __user
goto out_einval;
out_attach:
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
*ps = pi_state;
return 0;
-@@ -1988,7 +1993,7 @@ static int futex_requeue(u32 __user *uad
+@@ -1990,7 +1995,7 @@ static int futex_requeue(u32 __user *uad
* refcount on the pi_state and store the pointer in
* the futex_q object of the waiter.
*/
diff --git a/patches/0006-padata-Make-padata_alloc-static.patch b/patches/0006-padata-Make-padata_alloc-static.patch
new file mode 100644
index 000000000000..2ffdd6f8f55e
--- /dev/null
+++ b/patches/0006-padata-Make-padata_alloc-static.patch
@@ -0,0 +1,95 @@
+From 9596695ee1e7eedd743c43811fe68299eb005b5c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:17 +0200
+Subject: [PATCH 06/32] padata: Make padata_alloc() static
+
+No users outside of padata.c
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/20170524081547.491457256@linutronix.de
+---
+ include/linux/padata.h | 3 ---
+ kernel/padata.c | 32 ++++++++++++++++----------------
+ 2 files changed, 16 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/padata.h b/include/linux/padata.h
+index 0f9e567d5e15..2f9c1f93b1ce 100644
+--- a/include/linux/padata.h
++++ b/include/linux/padata.h
+@@ -166,9 +166,6 @@ struct padata_instance {
+
+ extern struct padata_instance *padata_alloc_possible(
+ struct workqueue_struct *wq);
+-extern struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+- const struct cpumask *pcpumask,
+- const struct cpumask *cbcpumask);
+ extern void padata_free(struct padata_instance *pinst);
+ extern int padata_do_parallel(struct padata_instance *pinst,
+ struct padata_priv *padata, int cb_cpu);
+diff --git a/kernel/padata.c b/kernel/padata.c
+index ac8f1e524836..0c708f648853 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -934,19 +934,6 @@ static struct kobj_type padata_attr_type = {
+ };
+
+ /**
+- * padata_alloc_possible - Allocate and initialize padata instance.
+- * Use the cpu_possible_mask for serial and
+- * parallel workers.
+- *
+- * @wq: workqueue to use for the allocated padata instance
+- */
+-struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
+-{
+- return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
+-}
+-EXPORT_SYMBOL(padata_alloc_possible);
+-
+-/**
+ * padata_alloc - allocate and initialize a padata instance and specify
+ * cpumasks for serial and parallel workers.
+ *
+@@ -954,9 +941,9 @@ EXPORT_SYMBOL(padata_alloc_possible);
+ * @pcpumask: cpumask that will be used for padata parallelization
+ * @cbcpumask: cpumask that will be used for padata serialization
+ */
+-struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+- const struct cpumask *pcpumask,
+- const struct cpumask *cbcpumask)
++static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
++ const struct cpumask *pcpumask,
++ const struct cpumask *cbcpumask)
+ {
+ struct padata_instance *pinst;
+ struct parallel_data *pd = NULL;
+@@ -1011,6 +998,19 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ }
+
+ /**
++ * padata_alloc_possible - Allocate and initialize padata instance.
++ * Use the cpu_possible_mask for serial and
++ * parallel workers.
++ *
++ * @wq: workqueue to use for the allocated padata instance
++ */
++struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
++{
++ return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
++}
++EXPORT_SYMBOL(padata_alloc_possible);
++
++/**
+ * padata_free - free a padata instance
+ *
+ * @padata_inst: padata instance to free
+--
+2.11.0
+
diff --git a/patches/0006-powerpc-Adjust-system_state-check.patch b/patches/0006-powerpc-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..9e668c663c3a
--- /dev/null
+++ b/patches/0006-powerpc-Adjust-system_state-check.patch
@@ -0,0 +1,39 @@
+From a8fcfc1917681ba1ccc23a429543a67aad8bfd00 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:37 +0200
+Subject: [PATCH 06/17] powerpc: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in smp_generic_cpu_bootable() to handle the
+extra states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: http://lkml.kernel.org/r/20170516184735.359536998@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ arch/powerpc/kernel/smp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/kernel/smp.c
++++ b/arch/powerpc/kernel/smp.c
+@@ -98,7 +98,7 @@ int smp_generic_cpu_bootable(unsigned in
+ /* Special case - we inhibit secondary thread startup
+ * during boot if the user requests it.
+ */
+- if (system_state == SYSTEM_BOOTING && cpu_has_feature(CPU_FTR_SMT)) {
++ if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) {
+ if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0)
+ return 0;
+ if (smt_enabled_at_boot
diff --git a/patches/0006-sched-tracing-Update-trace_sched_pi_setprio.patch b/patches/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
index bb65607617a1..8a6ea453a154 100644
--- a/patches/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
+++ b/patches/0006-sched-tracing-Update-trace_sched_pi_setprio.patch
@@ -1,8 +1,7 @@
+From b91473ff6e979c0028f02f90e40c844959c736d8 Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 23 Mar 2017 15:56:12 +0100
-Subject: [PATCH] sched,tracing: Update trace_sched_pi_setprio()
-
-Upstream commit b91473ff6e979c0028f02f90e40c844959c736d8
+Subject: [PATCH 6/9] sched,tracing: Update trace_sched_pi_setprio()
Pass the PI donor task, instead of a numerical priority.
@@ -97,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3708,7 +3708,7 @@ void rt_mutex_setprio(struct task_struct
+@@ -3751,7 +3751,7 @@ void rt_mutex_setprio(struct task_struct
goto out_unlock;
}
diff --git a/patches/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch b/patches/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..9e3bde0280f6
--- /dev/null
+++ b/patches/0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,118 @@
+From ea875ec94eafb858990f3fe9528501f983105653 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 13 Apr 2017 10:17:07 +0200
+Subject: [PATCH 06/13] sparc/sysfs: Replace racy task affinity logic
+
+The mmustat_enable sysfs file accessor functions must run code on the
+target CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU and overwriting the new affinity setting.
+
+Replace it by using work_on_cpu() which guarantees to run the code on the
+requested CPU.
+
+Protection against CPU hotplug is not required as the open sysfs file
+already prevents the removal from the CPU offline callback. Using the
+hotplug protected version would actually be wrong because it would deadlock
+against a CPU hotplug operation of the CPU associated to the sysfs file in
+progress.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: David S. Miller <davem@davemloft.net>
+Cc: fenghua.yu@intel.com
+Cc: tony.luck@intel.com
+Cc: herbert@gondor.apana.org.au
+Cc: rjw@rjwysocki.net
+Cc: peterz@infradead.org
+Cc: benh@kernel.crashing.org
+Cc: bigeasy@linutronix.de
+Cc: jiangshanlai@gmail.com
+Cc: sparclinux@vger.kernel.org
+Cc: viresh.kumar@linaro.org
+Cc: mpe@ellerman.id.au
+Cc: tj@kernel.org
+Cc: lenb@kernel.org
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131001270.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ arch/sparc/kernel/sysfs.c | 39 +++++++++++----------------------------
+ 1 file changed, 11 insertions(+), 28 deletions(-)
+
+--- a/arch/sparc/kernel/sysfs.c
++++ b/arch/sparc/kernel/sysfs.c
+@@ -98,27 +98,7 @@ static struct attribute_group mmu_stat_g
+ .name = "mmu_stats",
+ };
+
+-/* XXX convert to rusty's on_one_cpu */
+-static unsigned long run_on_cpu(unsigned long cpu,
+- unsigned long (*func)(unsigned long),
+- unsigned long arg)
+-{
+- cpumask_t old_affinity;
+- unsigned long ret;
+-
+- cpumask_copy(&old_affinity, &current->cpus_allowed);
+- /* should return -EINVAL to userspace */
+- if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+- return 0;
+-
+- ret = func(arg);
+-
+- set_cpus_allowed_ptr(current, &old_affinity);
+-
+- return ret;
+-}
+-
+-static unsigned long read_mmustat_enable(unsigned long junk)
++static long read_mmustat_enable(void *data __maybe_unused)
+ {
+ unsigned long ra = 0;
+
+@@ -127,11 +107,11 @@ static unsigned long read_mmustat_enable
+ return ra != 0;
+ }
+
+-static unsigned long write_mmustat_enable(unsigned long val)
++static long write_mmustat_enable(void *data)
+ {
+- unsigned long ra, orig_ra;
++ unsigned long ra, orig_ra, *val = data;
+
+- if (val)
++ if (*val)
+ ra = __pa(&per_cpu(mmu_stats, smp_processor_id()));
+ else
+ ra = 0UL;
+@@ -142,7 +122,8 @@ static unsigned long write_mmustat_enabl
+ static ssize_t show_mmustat_enable(struct device *s,
+ struct device_attribute *attr, char *buf)
+ {
+- unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0);
++ long val = work_on_cpu(s->id, read_mmustat_enable, NULL);
++
+ return sprintf(buf, "%lx\n", val);
+ }
+
+@@ -150,13 +131,15 @@ static ssize_t store_mmustat_enable(stru
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+ {
+- unsigned long val, err;
+- int ret = sscanf(buf, "%lu", &val);
++ unsigned long val;
++ long err;
++ int ret;
+
++ ret = sscanf(buf, "%lu", &val);
+ if (ret != 1)
+ return -EINVAL;
+
+- err = run_on_cpu(s->id, write_mmustat_enable, val);
++ err = work_on_cpu(s->id, write_mmustat_enable, &val);
+ if (err)
+ return -EIO;
+
diff --git a/patches/0007-ACPI-Adjust-system_state-check.patch b/patches/0007-ACPI-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..6859f34757a3
--- /dev/null
+++ b/patches/0007-ACPI-Adjust-system_state-check.patch
@@ -0,0 +1,38 @@
+From 9762b33dc31c67e34b36ba4e787e64084b3136ff Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:38 +0200
+Subject: [PATCH 07/17] ACPI: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Make the decision whether a pci root is hotplugged depend on SYSTEM_RUNNING
+instead of !SYSTEM_BOOTING. It makes no sense to cover states greater than
+SYSTEM_RUNNING as there are not hotplug events on reboot and poweroff.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Len Brown <lenb@kernel.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
+Link: http://lkml.kernel.org/r/20170516184735.446455652@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ drivers/acpi/pci_root.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/acpi/pci_root.c
++++ b/drivers/acpi/pci_root.c
+@@ -523,7 +523,7 @@ static int acpi_pci_root_add(struct acpi
+ struct acpi_pci_root *root;
+ acpi_handle handle = device->handle;
+ int no_aspm = 0;
+- bool hotadd = system_state != SYSTEM_BOOTING;
++ bool hotadd = system_state == SYSTEM_RUNNING;
+
+ root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
+ if (!root)
diff --git a/patches/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch b/patches/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
new file mode 100644
index 000000000000..0e155d91d008
--- /dev/null
+++ b/patches/0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
@@ -0,0 +1,45 @@
+From a5cbdf693a60d5b86d4d21dfedd90f17754eb273 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:33 +0200
+Subject: [PATCH 07/13] ACPI/processor: Fix error handling in
+ __acpi_processor_start()
+
+When acpi_install_notify_handler() fails the cooling device stays
+registered and the sysfs files created via acpi_pss_perf_init() are
+leaked and the function returns success.
+
+Undo acpi_pss_perf_init() and return a proper error code.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: linux-acpi@vger.kernel.org
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.695499645@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/acpi/processor_driver.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -251,6 +251,9 @@ static int __acpi_processor_start(struct
+ if (ACPI_SUCCESS(status))
+ return 0;
+
++ result = -ENODEV;
++ acpi_pss_perf_exit(pr, device);
++
+ err_power_exit:
+ acpi_processor_power_exit(pr);
+ return result;
diff --git a/patches/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch b/patches/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
index c07c8076e29b..9d764e763197 100644
--- a/patches/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
+++ b/patches/0007-futex-Rework-inconsistent-rt_mutex-futex_q-state.patch
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1402,12 +1402,19 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1404,12 +1404,19 @@ static int wake_futex_pi(u32 __user *uad
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
/*
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We pass it to the next owner. The WAITERS bit is always
-@@ -2330,7 +2337,6 @@ static long futex_wait_restart(struct re
+@@ -2332,7 +2339,6 @@ static long futex_wait_restart(struct re
*/
static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked)
{
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
if (locked) {
-@@ -2344,43 +2350,15 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2346,43 +2352,15 @@ static int fixup_owner(u32 __user *uaddr
}
/*
diff --git a/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch b/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
new file mode 100644
index 000000000000..a0b5d93465a6
--- /dev/null
+++ b/patches/0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
@@ -0,0 +1,95 @@
+From c5a81c8ff816d89941fe86961b286765d6ca2f5f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:18 +0200
+Subject: [PATCH 07/32] padata: Avoid nested calls to cpus_read_lock() in
+ pcrypt_init_padata()
+
+pcrypt_init_padata()
+ cpus_read_lock()
+ padata_alloc_possible()
+ padata_alloc()
+ cpus_read_lock()
+
+The nested call to cpus_read_lock() works with the current implementation,
+but prevents the conversion to a percpu rwsem.
+
+The other caller of padata_alloc_possible() is pcrypt_init_padata() which
+calls from a cpus_read_lock() protected region as well.
+
+Remove the cpus_read_lock() call in padata_alloc() and document the
+calling convention.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Steffen Klassert <steffen.klassert@secunet.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-crypto@vger.kernel.org
+Link: http://lkml.kernel.org/r/20170524081547.571278910@linutronix.de
+---
+ kernel/padata.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/kernel/padata.c b/kernel/padata.c
+index 0c708f648853..868f947166d7 100644
+--- a/kernel/padata.c
++++ b/kernel/padata.c
+@@ -940,6 +940,8 @@ static struct kobj_type padata_attr_type = {
+ * @wq: workqueue to use for the allocated padata instance
+ * @pcpumask: cpumask that will be used for padata parallelization
+ * @cbcpumask: cpumask that will be used for padata serialization
++ *
++ * Must be called from a cpus_read_lock() protected region
+ */
+ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ const struct cpumask *pcpumask,
+@@ -952,7 +954,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ if (!pinst)
+ goto err;
+
+- get_online_cpus();
+ if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
+ goto err_free_inst;
+ if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
+@@ -976,14 +977,12 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+
+ pinst->flags = 0;
+
+- put_online_cpus();
+-
+ BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
+ kobject_init(&pinst->kobj, &padata_attr_type);
+ mutex_init(&pinst->lock);
+
+ #ifdef CONFIG_HOTPLUG_CPU
+- cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
++ cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
+ #endif
+ return pinst;
+
+@@ -992,7 +991,6 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ free_cpumask_var(pinst->cpumask.cbcpu);
+ err_free_inst:
+ kfree(pinst);
+- put_online_cpus();
+ err:
+ return NULL;
+ }
+@@ -1003,9 +1001,12 @@ static struct padata_instance *padata_alloc(struct workqueue_struct *wq,
+ * parallel workers.
+ *
+ * @wq: workqueue to use for the allocated padata instance
++ *
++ * Must be called from a cpus_read_lock() protected region
+ */
+ struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
+ {
++ lockdep_assert_cpus_held();
+ return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
+ }
+ EXPORT_SYMBOL(padata_alloc_possible);
+--
+2.11.0
+
diff --git a/patches/0007-rtmutex-Fix-PI-chain-order-integrity.patch b/patches/0007-rtmutex-Fix-PI-chain-order-integrity.patch
index 0f3bd10d747c..4514f5ee65df 100644
--- a/patches/0007-rtmutex-Fix-PI-chain-order-integrity.patch
+++ b/patches/0007-rtmutex-Fix-PI-chain-order-integrity.patch
@@ -1,8 +1,7 @@
+From e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 23 Mar 2017 15:56:13 +0100
-Subject: [PATCH] rtmutex: Fix PI chain order integrity
-
-Upstream commit e0aad5b44ff5d28ac1d6ae70cdf84ca228e889dc
+Subject: [PATCH 7/9] rtmutex: Fix PI chain order integrity
rt_mutex_waiter::prio is a copy of task_struct::prio which is updated
during the PI chain walk, such that the PI chain order isn't messed up
@@ -37,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -236,8 +236,7 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+@@ -238,8 +238,7 @@ rt_mutex_waiter_less(struct rt_mutex_wai
* then right waiter has a dl_prio() too.
*/
if (dl_prio(left->prio))
@@ -47,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -648,7 +647,26 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -650,7 +649,26 @@ static int rt_mutex_adjust_prio_chain(st
/* [7] Requeue the waiter in the lock waiter tree. */
rt_mutex_dequeue(lock, waiter);
@@ -74,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_enqueue(lock, waiter);
/* [8] Release the task */
-@@ -775,6 +793,8 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -777,6 +795,8 @@ static int rt_mutex_adjust_prio_chain(st
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
{
@@ -83,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Before testing whether we can acquire @lock, we set the
* RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
-@@ -900,6 +920,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -902,6 +922,8 @@ static int task_blocks_on_rt_mutex(struc
struct rt_mutex *next_lock;
int chain_walk = 0, res;
@@ -92,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Early deadlock detection. We really don't want the task to
* enqueue on itself just to untangle the mess later. It's not
-@@ -917,6 +939,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -919,6 +941,7 @@ static int task_blocks_on_rt_mutex(struc
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
@@ -100,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get the top priority waiter on the lock */
if (rt_mutex_has_waiters(lock))
-@@ -1034,6 +1057,8 @@ static void remove_waiter(struct rt_mute
+@@ -1036,6 +1059,8 @@ static void remove_waiter(struct rt_mute
struct task_struct *owner = rt_mutex_owner(lock);
struct rt_mutex *next_lock;
@@ -111,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
current->pi_blocked_on = NULL;
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -33,6 +33,7 @@ struct rt_mutex_waiter {
+@@ -34,6 +34,7 @@ struct rt_mutex_waiter {
struct rt_mutex *deadlock_lock;
#endif
int prio;
diff --git a/patches/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch b/patches/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..e793161acc51
--- /dev/null
+++ b/patches/0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,193 @@
+From 8153f9ac43897f9f4786b30badc134fcc1a4fb11 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:34 +0200
+Subject: [PATCH 08/13] ACPI/processor: Replace racy task affinity logic
+
+acpi_processor_get_throttling() requires to invoke the getter function on
+the target CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+acpi_processor_get_throttling() is invoked in two ways:
+
+1) The CPU online callback, which is already running on the target CPU and
+ obviously protected against hotplug and not affected by affinity
+ settings.
+
+2) The ACPI driver probe function, which is not protected against hotplug
+ during modprobe.
+
+Switch it over to work_on_cpu() and protect the probe function against CPU
+hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: linux-acpi@vger.kernel.org
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.785920903@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/acpi/processor_driver.c | 7 +++-
+ drivers/acpi/processor_throttling.c | 62 ++++++++++++++++++++----------------
+ 2 files changed, 42 insertions(+), 27 deletions(-)
+
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -262,11 +262,16 @@ static int __acpi_processor_start(struct
+ static int acpi_processor_start(struct device *dev)
+ {
+ struct acpi_device *device = ACPI_COMPANION(dev);
++ int ret;
+
+ if (!device)
+ return -ENODEV;
+
+- return __acpi_processor_start(device);
++ /* Protect against concurrent CPU hotplug operations */
++ get_online_cpus();
++ ret = __acpi_processor_start(device);
++ put_online_cpus();
++ return ret;
+ }
+
+ static int acpi_processor_stop(struct device *dev)
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -62,8 +62,8 @@ struct acpi_processor_throttling_arg {
+ #define THROTTLING_POSTCHANGE (2)
+
+ static int acpi_processor_get_throttling(struct acpi_processor *pr);
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+- int state, bool force);
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force, bool direct);
+
+ static int acpi_processor_update_tsd_coord(void)
+ {
+@@ -891,7 +891,8 @@ static int acpi_processor_get_throttling
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+ "Invalid throttling state, reset\n"));
+ state = 0;
+- ret = acpi_processor_set_throttling(pr, state, true);
++ ret = __acpi_processor_set_throttling(pr, state, true,
++ true);
+ if (ret)
+ return ret;
+ }
+@@ -901,36 +902,31 @@ static int acpi_processor_get_throttling
+ return 0;
+ }
+
+-static int acpi_processor_get_throttling(struct acpi_processor *pr)
++static long __acpi_processor_get_throttling(void *data)
+ {
+- cpumask_var_t saved_mask;
+- int ret;
++ struct acpi_processor *pr = data;
++
++ return pr->throttling.acpi_processor_get_throttling(pr);
++}
+
++static int acpi_processor_get_throttling(struct acpi_processor *pr)
++{
+ if (!pr)
+ return -EINVAL;
+
+ if (!pr->flags.throttling)
+ return -ENODEV;
+
+- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
+- return -ENOMEM;
+-
+ /*
+- * Migrate task to the cpu pointed by pr.
++ * This is either called from the CPU hotplug callback of
++ * processor_driver or via the ACPI probe function. In the latter
++ * case the CPU is not guaranteed to be online. Both call sites are
++ * protected against CPU hotplug.
+ */
+- cpumask_copy(saved_mask, &current->cpus_allowed);
+- /* FIXME: use work_on_cpu() */
+- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
+- /* Can't migrate to the target pr->id CPU. Exit */
+- free_cpumask_var(saved_mask);
++ if (!cpu_online(pr->id))
+ return -ENODEV;
+- }
+- ret = pr->throttling.acpi_processor_get_throttling(pr);
+- /* restore the previous state */
+- set_cpus_allowed_ptr(current, saved_mask);
+- free_cpumask_var(saved_mask);
+
+- return ret;
++ return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
+ }
+
+ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+@@ -1080,8 +1076,15 @@ static long acpi_processor_throttling_fn
+ arg->target_state, arg->force);
+ }
+
+-int acpi_processor_set_throttling(struct acpi_processor *pr,
+- int state, bool force)
++static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
++{
++ if (direct)
++ return fn(arg);
++ return work_on_cpu(cpu, fn, arg);
++}
++
++static int __acpi_processor_set_throttling(struct acpi_processor *pr,
++ int state, bool force, bool direct)
+ {
+ int ret = 0;
+ unsigned int i;
+@@ -1130,7 +1133,8 @@ int acpi_processor_set_throttling(struct
+ arg.pr = pr;
+ arg.target_state = state;
+ arg.force = force;
+- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
++ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn, &arg,
++ direct);
+ } else {
+ /*
+ * When the T-state coordination is SW_ALL or HW_ALL,
+@@ -1163,8 +1167,8 @@ int acpi_processor_set_throttling(struct
+ arg.pr = match_pr;
+ arg.target_state = state;
+ arg.force = force;
+- ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+- &arg);
++ ret = call_on_cpu(pr->id, acpi_processor_throttling_fn,
++ &arg, direct);
+ }
+ }
+ /*
+@@ -1182,6 +1186,12 @@ int acpi_processor_set_throttling(struct
+ return ret;
+ }
+
++int acpi_processor_set_throttling(struct acpi_processor *pr, int state,
++ bool force)
++{
++ return __acpi_processor_set_throttling(pr, state, force, false);
++}
++
+ int acpi_processor_get_throttling_info(struct acpi_processor *pr)
+ {
+ int result = 0;
diff --git a/patches/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch b/patches/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
index 53d4c2257a8a..aba723367584 100644
--- a/patches/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
+++ b/patches/0008-futex-Pull-rt_mutex_futex_unlock-out-from-under-hb-l.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -919,10 +919,12 @@ void exit_pi_state_list(struct task_stru
+@@ -921,10 +921,12 @@ void exit_pi_state_list(struct task_stru
pi_state->owner = NULL;
raw_spin_unlock_irq(&curr->pi_lock);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_lock_irq(&curr->pi_lock);
}
raw_spin_unlock_irq(&curr->pi_lock);
-@@ -1035,6 +1037,11 @@ static int attach_to_pi_state(u32 __user
+@@ -1037,6 +1039,11 @@ static int attach_to_pi_state(u32 __user
* has dropped the hb->lock in between queue_me() and unqueue_me_pi(),
* which in turn means that futex_lock_pi() still has a reference on
* our pi_state.
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
WARN_ON(!atomic_read(&pi_state->refcount));
-@@ -1378,48 +1385,40 @@ static void mark_wake_futex(struct wake_
+@@ -1380,48 +1387,40 @@ static void mark_wake_futex(struct wake_
smp_store_release(&q->lock_ptr, NULL);
}
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
u32 uninitialized_var(curval), newval;
+ struct task_struct *new_owner;
+ bool deboost = false;
- WAKE_Q(wake_q);
+ DEFINE_WAKE_Q(wake_q);
- bool deboost;
int ret = 0;
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);
-@@ -1442,10 +1441,8 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1444,10 +1443,8 @@ static int wake_futex_pi(u32 __user *uad
ret = -EINVAL;
}
@@ -156,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_lock(&pi_state->owner->pi_lock);
WARN_ON(list_empty(&pi_state->list));
-@@ -1463,15 +1460,15 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1465,15 +1462,15 @@ static int wake_futex_pi(u32 __user *uad
*/
deboost = __rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
@@ -174,7 +174,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2230,7 +2227,8 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2232,7 +2229,8 @@ static int fixup_pi_state_owner(u32 __us
/*
* We are here either because we stole the rtmutex from the
* previous highest priority waiter or we are the highest priority
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to replace the newowner TID in the user space variable.
* This must be atomic as we have to preserve the owner died bit here.
*
-@@ -2247,7 +2245,7 @@ static int fixup_pi_state_owner(u32 __us
+@@ -2249,7 +2247,7 @@ static int fixup_pi_state_owner(u32 __us
if (get_futex_value_locked(&uval, uaddr))
goto handle_fault;
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
newval = (uval & FUTEX_OWNER_DIED) | newtid;
if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))
-@@ -2343,6 +2341,10 @@ static int fixup_owner(u32 __user *uaddr
+@@ -2345,6 +2343,10 @@ static int fixup_owner(u32 __user *uaddr
/*
* Got the lock. We might not be the anticipated owner if we
* did a lock-steal - fix up the PI-state in that case:
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (q->pi_state->owner != current)
ret = fixup_pi_state_owner(uaddr, q, current);
-@@ -2582,6 +2584,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2584,6 +2586,7 @@ static int futex_lock_pi(u32 __user *uad
ktime_t *time, int trylock)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -212,7 +212,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2668,12 +2671,19 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2670,12 +2673,19 @@ static int futex_lock_pi(u32 __user *uad
* If fixup_owner() faulted and was unable to handle the fault, unlock
* it and return the fault to userspace.
*/
@@ -234,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out_put_key;
out_unlock_put_key:
-@@ -2736,10 +2746,36 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2738,10 +2748,36 @@ static int futex_unlock_pi(u32 __user *u
*/
top_waiter = futex_top_waiter(hb, &key);
if (top_waiter) {
@@ -274,7 +274,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (!ret)
goto out_putkey;
-@@ -2754,7 +2790,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2756,7 +2792,6 @@ static int futex_unlock_pi(u32 __user *u
* setting the FUTEX_WAITERS bit. Try again.
*/
if (ret == -EAGAIN) {
@@ -282,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
put_futex_key(&key);
goto retry;
}
-@@ -2762,7 +2797,7 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2764,7 +2799,7 @@ static int futex_unlock_pi(u32 __user *u
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
@@ -291,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2772,8 +2807,10 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2774,8 +2809,10 @@ static int futex_unlock_pi(u32 __user *u
* preserve the WAITERS bit not the OWNER_DIED one. We are the
* owner.
*/
@@ -303,7 +303,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If uval has changed, let user space handle it.
-@@ -2787,7 +2824,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2789,7 +2826,6 @@ static int futex_unlock_pi(u32 __user *u
return ret;
pi_faulted:
@@ -311,7 +311,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
put_futex_key(&key);
ret = fault_in_user_writeable(uaddr);
-@@ -2891,6 +2927,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2893,6 +2929,7 @@ static int futex_wait_requeue_pi(u32 __u
u32 __user *uaddr2)
{
struct hrtimer_sleeper timeout, *to = NULL;
@@ -319,7 +319,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex_waiter rt_waiter;
struct futex_hash_bucket *hb;
union futex_key key2 = FUTEX_KEY_INIT;
-@@ -2975,8 +3012,10 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2977,8 +3014,10 @@ static int futex_wait_requeue_pi(u32 __u
if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current);
@@ -332,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Drop the reference to the pi state which
* the requeue_pi() code acquired for us.
-@@ -3015,13 +3054,20 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3017,13 +3056,20 @@ static int futex_wait_requeue_pi(u32 __u
* the fault, unlock the rt_mutex and return the fault to
* userspace.
*/
diff --git a/patches/0008-mm-Adjust-system_state-check.patch b/patches/0008-mm-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..bc80d72e2112
--- /dev/null
+++ b/patches/0008-mm-Adjust-system_state-check.patch
@@ -0,0 +1,42 @@
+From 8cdde385c7a33afbe13fd71351da0968540fa566 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:39 +0200
+Subject: [PATCH 08/17] mm: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+get_nid_for_pfn() checks for system_state == BOOTING to decide whether to
+use early_pfn_to_nid() when CONFIG_DEFERRED_STRUCT_PAGE_INIT=y.
+
+That check is dubious, because the switch to state RUNNING happes way after
+page_alloc_init_late() has been invoked.
+
+Change the check to less than RUNNING state so it covers the new
+intermediate states as well.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.528279534@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ drivers/base/node.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/base/node.c
++++ b/drivers/base/node.c
+@@ -377,7 +377,7 @@ static int __ref get_nid_for_pfn(unsigne
+ if (!pfn_valid_within(pfn))
+ return -1;
+ #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+- if (system_state == SYSTEM_BOOTING)
++ if (system_state < SYSTEM_RUNNING)
+ return early_pfn_to_nid(pfn);
+ #endif
+ page = pfn_to_page(pfn);
diff --git a/patches/0008-rtmutex-Fix-more-prio-comparisons.patch b/patches/0008-rtmutex-Fix-more-prio-comparisons.patch
index b3567f0ca4b5..d8e690b4742b 100644
--- a/patches/0008-rtmutex-Fix-more-prio-comparisons.patch
+++ b/patches/0008-rtmutex-Fix-more-prio-comparisons.patch
@@ -1,8 +1,7 @@
+From 19830e55247cddb3f46f1bf60b8e245593491bea Mon Sep 17 00:00:00 2001
From: Peter Zijlstra <peterz@infradead.org>
Date: Thu, 23 Mar 2017 15:56:14 +0100
-Subject: [PATCH] rtmutex: Fix more prio comparisons
-
-Upstream commit 19830e55247cddb3f46f1bf60b8e245593491bea
+Subject: [PATCH 8/9] rtmutex: Fix more prio comparisons
There was a pure ->prio comparison left in try_to_wake_rt_mutex(),
convert it to use rt_mutex_waiter_less(), noting that greater-or-equal
@@ -32,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -222,6 +222,12 @@ static inline bool unlock_rt_mutex_safe(
+@@ -224,6 +224,12 @@ static inline bool unlock_rt_mutex_safe(
}
#endif
@@ -45,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
struct rt_mutex_waiter *right)
-@@ -241,6 +247,25 @@ rt_mutex_waiter_less(struct rt_mutex_wai
+@@ -243,6 +249,25 @@ rt_mutex_waiter_less(struct rt_mutex_wai
return 0;
}
@@ -71,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
-@@ -551,7 +576,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -553,7 +578,7 @@ static int rt_mutex_adjust_prio_chain(st
* enabled we continue, but stop the requeueing in the chain
* walk.
*/
@@ -80,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!detect_deadlock)
goto out_unlock_pi;
else
-@@ -854,7 +879,8 @@ static int try_to_take_rt_mutex(struct r
+@@ -856,7 +881,8 @@ static int try_to_take_rt_mutex(struct r
* the top waiter priority (kernel view),
* @task lost.
*/
@@ -90,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
/*
-@@ -1117,7 +1143,7 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1119,7 +1145,7 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
diff --git a/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch b/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
new file mode 100644
index 000000000000..60c81a794fda
--- /dev/null
+++ b/patches/0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
@@ -0,0 +1,44 @@
+From 547efeadd42a3c75e41e33c0637cba100fc18289 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:19 +0200
+Subject: [PATCH 08/32] x86/mtrr: Remove get_online_cpus() from
+ mtrr_save_state()
+
+mtrr_save_state() is invoked from native_cpu_up() which is in the context
+of a CPU hotplug operation and therefor calling get_online_cpus() is
+pointless.
+
+While this works in the current get_online_cpus() implementation it
+prevents from converting the hotplug locking to percpu rwsems.
+
+Remove it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.651378834@linutronix.de
+---
+ arch/x86/kernel/cpu/mtrr/main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
+index 2bce84d91c2b..c5bb63be4ba1 100644
+--- a/arch/x86/kernel/cpu/mtrr/main.c
++++ b/arch/x86/kernel/cpu/mtrr/main.c
+@@ -807,10 +807,8 @@ void mtrr_save_state(void)
+ if (!mtrr_enabled())
+ return;
+
+- get_online_cpus();
+ first_cpu = cpumask_first(cpu_online_mask);
+ smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
+- put_online_cpus();
+ }
+
+ void set_mtrr_aps_delayed_init(void)
+--
+2.11.0
+
diff --git a/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch b/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
new file mode 100644
index 000000000000..cdda01224aec
--- /dev/null
+++ b/patches/0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
@@ -0,0 +1,108 @@
+From a92551e41d5a7b563ae440496bc5ca19d205231d Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:20 +0200
+Subject: [PATCH 09/32] cpufreq: Use cpuhp_setup_state_nocalls_cpuslocked()
+
+cpufreq holds get_online_cpus() while invoking cpuhp_setup_state_nocalls()
+to make subsys_interface_register() and the registration of hotplug calls
+atomic versus cpu hotplug.
+
+cpuhp_setup_state_nocalls() invokes get_online_cpus() as well. This is
+correct, but prevents the conversion of the hotplug locking to a percpu
+rwsem.
+
+Use cpuhp_setup/remove_state_nocalls_cpuslocked() to avoid the nested
+call. Convert *_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: linux-pm@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081547.731628408@linutronix.de
+---
+ drivers/cpufreq/cpufreq.c | 21 +++++++++++----------
+ 1 file changed, 11 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
+index 0e3f6496524d..6001369f9aeb 100644
+--- a/drivers/cpufreq/cpufreq.c
++++ b/drivers/cpufreq/cpufreq.c
+@@ -887,7 +887,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ struct freq_attr *fattr = to_attr(attr);
+ ssize_t ret = -EINVAL;
+
+- get_online_cpus();
++ cpus_read_lock();
+
+ if (cpu_online(policy->cpu)) {
+ down_write(&policy->rwsem);
+@@ -895,7 +895,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
+ up_write(&policy->rwsem);
+ }
+
+- put_online_cpus();
++ cpus_read_unlock();
+
+ return ret;
+ }
+@@ -2441,7 +2441,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ pr_debug("trying to register driver %s\n", driver_data->name);
+
+ /* Protect against concurrent CPU online/offline. */
+- get_online_cpus();
++ cpus_read_lock();
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+ if (cpufreq_driver) {
+@@ -2473,9 +2473,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ goto err_if_unreg;
+ }
+
+- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "cpufreq:online",
+- cpuhp_cpufreq_online,
+- cpuhp_cpufreq_offline);
++ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
++ "cpufreq:online",
++ cpuhp_cpufreq_online,
++ cpuhp_cpufreq_offline);
+ if (ret < 0)
+ goto err_if_unreg;
+ hp_online = ret;
+@@ -2493,7 +2494,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
+ cpufreq_driver = NULL;
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+ out:
+- put_online_cpus();
++ cpus_read_unlock();
+ return ret;
+ }
+ EXPORT_SYMBOL_GPL(cpufreq_register_driver);
+@@ -2516,17 +2517,17 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
+ pr_debug("unregistering driver %s\n", driver->name);
+
+ /* Protect against concurrent cpu hotplug */
+- get_online_cpus();
++ cpus_read_lock();
+ subsys_interface_unregister(&cpufreq_interface);
+ remove_boost_sysfs_file();
+- cpuhp_remove_state_nocalls(hp_online);
++ cpuhp_remove_state_nocalls_cpuslocked(hp_online);
+
+ write_lock_irqsave(&cpufreq_driver_lock, flags);
+
+ cpufreq_driver = NULL;
+
+ write_unlock_irqrestore(&cpufreq_driver_lock, flags);
+- put_online_cpus();
++ cpus_read_unlock();
+
+ return 0;
+ }
+--
+2.11.0
+
diff --git a/patches/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch b/patches/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..c396649cc1b0
--- /dev/null
+++ b/patches/0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,209 @@
+From 38f05ed04beb276f780fcd2b5c0b78c76d0b3c0c Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:55:03 +0200
+Subject: [PATCH 09/13] cpufreq/ia64: Replace racy task affinity logic
+
+The get() and target() callbacks must run on the affected cpu. This is
+achieved by temporarily setting the affinity of the calling thread to the
+requested CPU and reset it to the original affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU and overwriting the new affinity setting.
+
+Replace it by work_on_cpu(). All call pathes which invoke the callbacks are
+already protected against CPU hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: linux-pm@vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704122231100.2548@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/cpufreq/ia64-acpi-cpufreq.c | 92 +++++++++++++++---------------------
+ 1 file changed, 39 insertions(+), 53 deletions(-)
+
+--- a/drivers/cpufreq/ia64-acpi-cpufreq.c
++++ b/drivers/cpufreq/ia64-acpi-cpufreq.c
+@@ -34,6 +34,11 @@ struct cpufreq_acpi_io {
+ unsigned int resume;
+ };
+
++struct cpufreq_acpi_req {
++ unsigned int cpu;
++ unsigned int state;
++};
++
+ static struct cpufreq_acpi_io *acpi_io_data[NR_CPUS];
+
+ static struct cpufreq_driver acpi_cpufreq_driver;
+@@ -83,8 +88,7 @@ processor_get_pstate (
+ static unsigned
+ extract_clock (
+ struct cpufreq_acpi_io *data,
+- unsigned value,
+- unsigned int cpu)
++ unsigned value)
+ {
+ unsigned long i;
+
+@@ -98,60 +102,43 @@ extract_clock (
+ }
+
+
+-static unsigned int
++static long
+ processor_get_freq (
+- struct cpufreq_acpi_io *data,
+- unsigned int cpu)
++ void *arg)
+ {
+- int ret = 0;
+- u32 value = 0;
+- cpumask_t saved_mask;
+- unsigned long clock_freq;
++ struct cpufreq_acpi_req *req = arg;
++ unsigned int cpu = req->cpu;
++ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
++ u32 value;
++ int ret;
+
+ pr_debug("processor_get_freq\n");
+-
+- saved_mask = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+ if (smp_processor_id() != cpu)
+- goto migrate_end;
++ return -EAGAIN;
+
+ /* processor_get_pstate gets the instantaneous frequency */
+ ret = processor_get_pstate(&value);
+-
+ if (ret) {
+- set_cpus_allowed_ptr(current, &saved_mask);
+ pr_warn("get performance failed with error %d\n", ret);
+- ret = 0;
+- goto migrate_end;
++ return ret;
+ }
+- clock_freq = extract_clock(data, value, cpu);
+- ret = (clock_freq*1000);
+-
+-migrate_end:
+- set_cpus_allowed_ptr(current, &saved_mask);
+- return ret;
++ return 1000 * extract_clock(data, value);
+ }
+
+
+-static int
++static long
+ processor_set_freq (
+- struct cpufreq_acpi_io *data,
+- struct cpufreq_policy *policy,
+- int state)
++ void *arg)
+ {
+- int ret = 0;
+- u32 value = 0;
+- cpumask_t saved_mask;
+- int retval;
++ struct cpufreq_acpi_req *req = arg;
++ unsigned int cpu = req->cpu;
++ struct cpufreq_acpi_io *data = acpi_io_data[cpu];
++ int ret, state = req->state;
++ u32 value;
+
+ pr_debug("processor_set_freq\n");
+-
+- saved_mask = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(policy->cpu));
+- if (smp_processor_id() != policy->cpu) {
+- retval = -EAGAIN;
+- goto migrate_end;
+- }
++ if (smp_processor_id() != cpu)
++ return -EAGAIN;
+
+ if (state == data->acpi_data.state) {
+ if (unlikely(data->resume)) {
+@@ -159,8 +146,7 @@ processor_set_freq (
+ data->resume = 0;
+ } else {
+ pr_debug("Already at target state (P%d)\n", state);
+- retval = 0;
+- goto migrate_end;
++ return 0;
+ }
+ }
+
+@@ -171,7 +157,6 @@ processor_set_freq (
+ * First we write the target state's 'control' value to the
+ * control_register.
+ */
+-
+ value = (u32) data->acpi_data.states[state].control;
+
+ pr_debug("Transitioning to state: 0x%08x\n", value);
+@@ -179,17 +164,11 @@ processor_set_freq (
+ ret = processor_set_pstate(value);
+ if (ret) {
+ pr_warn("Transition failed with error %d\n", ret);
+- retval = -ENODEV;
+- goto migrate_end;
++ return -ENODEV;
+ }
+
+ data->acpi_data.state = state;
+-
+- retval = 0;
+-
+-migrate_end:
+- set_cpus_allowed_ptr(current, &saved_mask);
+- return (retval);
++ return 0;
+ }
+
+
+@@ -197,11 +176,13 @@ static unsigned int
+ acpi_cpufreq_get (
+ unsigned int cpu)
+ {
+- struct cpufreq_acpi_io *data = acpi_io_data[cpu];
++ struct cpufreq_acpi_req req;
++ long ret;
+
+- pr_debug("acpi_cpufreq_get\n");
++ req.cpu = cpu;
++ ret = work_on_cpu(cpu, processor_get_freq, &req);
+
+- return processor_get_freq(data, cpu);
++ return ret > 0 ? (unsigned int) ret : 0;
+ }
+
+
+@@ -210,7 +191,12 @@ acpi_cpufreq_target (
+ struct cpufreq_policy *policy,
+ unsigned int index)
+ {
+- return processor_set_freq(acpi_io_data[policy->cpu], policy, index);
++ struct cpufreq_acpi_req req;
++
++ req.cpu = policy->cpu;
++ req.state = index;
++
++ return work_on_cpu(req.cpu, processor_set_freq, &req);
+ }
+
+ static int
diff --git a/patches/0009-cpufreq-pasemi-Adjust-system_state-check.patch b/patches/0009-cpufreq-pasemi-Adjust-system_state-check.patch
new file mode 100644
index 000000000000..8085857d07be
--- /dev/null
+++ b/patches/0009-cpufreq-pasemi-Adjust-system_state-check.patch
@@ -0,0 +1,38 @@
+From d04e31a23c3c828456cb5613f391ce4ac4e5765f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:40 +0200
+Subject: [PATCH 09/17] cpufreq/pasemi: Adjust system_state check
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in pas_cpufreq_cpu_exit() to handle the extra
+states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: http://lkml.kernel.org/r/20170516184735.620023128@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ drivers/cpufreq/pasemi-cpufreq.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/cpufreq/pasemi-cpufreq.c
++++ b/drivers/cpufreq/pasemi-cpufreq.c
+@@ -226,7 +226,7 @@ static int pas_cpufreq_cpu_exit(struct c
+ * We don't support CPU hotplug. Don't unmap after the system
+ * has already made it to a running state.
+ */
+- if (system_state != SYSTEM_BOOTING)
++ if (system_state >= SYSTEM_RUNNING)
+ return 0;
+
+ if (sdcasr_mapbase)
diff --git a/patches/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch b/patches/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
index 10b1039f290e..052a7c41ac43 100644
--- a/patches/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
+++ b/patches/0009-futex-rt_mutex-Introduce-rt_mutex_init_waiter.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2954,10 +2954,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2956,10 +2956,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(ret != 0))
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1151,6 +1151,14 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1153,6 +1153,14 @@ void rt_mutex_adjust_pi(struct task_stru
next_lock, NULL, task);
}
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
* @lock: the rt_mutex to take
-@@ -1233,9 +1241,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1235,9 +1243,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
diff --git a/patches/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch b/patches/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
index a676922cc7ee..bd28dcbafb94 100644
--- a/patches/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
+++ b/patches/0009-rtmutex-Plug-preempt-count-leak-in-rt_mutex_futex_un.patch
@@ -1,10 +1,9 @@
+From def34eaae5ce04b324e48e1bfac873091d945213 Mon Sep 17 00:00:00 2001
From: Mike Galbraith <efault@gmx.de>
Date: Wed, 5 Apr 2017 10:08:27 +0200
-Subject: [PATCH] rtmutex: Plug preempt count leak in
+Subject: [PATCH 9/9] rtmutex: Plug preempt count leak in
rt_mutex_futex_unlock()
-Upstream commit def34eaae5ce04b324e48e1bfac873091d945213
-
mark_wakeup_next_waiter() already disables preemption, doing so again
leaves us with an unpaired preempt_disable().
@@ -21,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1579,13 +1579,13 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1581,13 +1581,13 @@ bool __sched __rt_mutex_futex_unlock(str
return false; /* done */
}
diff --git a/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch b/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
new file mode 100644
index 000000000000..04af41d371ff
--- /dev/null
+++ b/patches/0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
@@ -0,0 +1,73 @@
+From 419af25fa4d0974fd758a668c08c369c19392a47 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:21 +0200
+Subject: [PATCH 10/32] KVM/PPC/Book3S HV: Use
+ cpuhp_setup_state_nocalls_cpuslocked()
+
+kvmppc_alloc_host_rm_ops() holds get_online_cpus() while invoking
+cpuhp_setup_state_nocalls().
+
+cpuhp_setup_state_nocalls() invokes get_online_cpus() as well. This is
+correct, but prevents the conversion of the hotplug locking to a percpu
+rwsem.
+
+Use cpuhp_setup_state_nocalls_cpuslocked() to avoid the nested
+call. Convert *_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: kvm@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: kvm-ppc@vger.kernel.org
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: linuxppc-dev@lists.ozlabs.org
+Cc: Alexander Graf <agraf@suse.com>
+Link: http://lkml.kernel.org/r/20170524081547.809616236@linutronix.de
+---
+ arch/powerpc/kvm/book3s_hv.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
+index 42b7a4fd57d9..48a6bd160011 100644
+--- a/arch/powerpc/kvm/book3s_hv.c
++++ b/arch/powerpc/kvm/book3s_hv.c
+@@ -3317,7 +3317,7 @@ void kvmppc_alloc_host_rm_ops(void)
+ return;
+ }
+
+- get_online_cpus();
++ cpus_read_lock();
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
+ if (!cpu_online(cpu))
+@@ -3339,17 +3339,17 @@ void kvmppc_alloc_host_rm_ops(void)
+ l_ops = (unsigned long) ops;
+
+ if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
+- put_online_cpus();
++ cpus_read_unlock();
+ kfree(ops->rm_core);
+ kfree(ops);
+ return;
+ }
+
+- cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE,
+- "ppc/kvm_book3s:prepare",
+- kvmppc_set_host_core,
+- kvmppc_clear_host_core);
+- put_online_cpus();
++ cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
++ "ppc/kvm_book3s:prepare",
++ kvmppc_set_host_core,
++ kvmppc_clear_host_core);
++ cpus_read_unlock();
+ }
+
+ void kvmppc_free_host_rm_ops(void)
+--
+2.11.0
+
diff --git a/patches/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch b/patches/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..55de7d033a2a
--- /dev/null
+++ b/patches/0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,120 @@
+From 205dcc1ecbc566cbc20acf246e68de3b080b3ecf Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:36 +0200
+Subject: [PATCH 10/13] cpufreq/sh: Replace racy task affinity logic
+
+The target() callback must run on the affected cpu. This is achieved by
+temporarily setting the affinity of the calling thread to the requested CPU
+and reset it to the original affinity afterwards.
+
+That's racy vs. concurrent affinity settings for that thread resulting in
+code executing on the wrong CPU.
+
+Replace it by work_on_cpu(). All call pathes which invoke the callbacks are
+already protected against CPU hotplug.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: linux-pm@vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201042.958216363@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/cpufreq/sh-cpufreq.c | 45 +++++++++++++++++++++++++------------------
+ 1 file changed, 27 insertions(+), 18 deletions(-)
+
+--- a/drivers/cpufreq/sh-cpufreq.c
++++ b/drivers/cpufreq/sh-cpufreq.c
+@@ -30,54 +30,63 @@
+
+ static DEFINE_PER_CPU(struct clk, sh_cpuclk);
+
++struct cpufreq_target {
++ struct cpufreq_policy *policy;
++ unsigned int freq;
++};
++
+ static unsigned int sh_cpufreq_get(unsigned int cpu)
+ {
+ return (clk_get_rate(&per_cpu(sh_cpuclk, cpu)) + 500) / 1000;
+ }
+
+-/*
+- * Here we notify other drivers of the proposed change and the final change.
+- */
+-static int sh_cpufreq_target(struct cpufreq_policy *policy,
+- unsigned int target_freq,
+- unsigned int relation)
++static long __sh_cpufreq_target(void *arg)
+ {
+- unsigned int cpu = policy->cpu;
++ struct cpufreq_target *target = arg;
++ struct cpufreq_policy *policy = target->policy;
++ int cpu = policy->cpu;
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, cpu);
+- cpumask_t cpus_allowed;
+ struct cpufreq_freqs freqs;
+ struct device *dev;
+ long freq;
+
+- cpus_allowed = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+- BUG_ON(smp_processor_id() != cpu);
++ if (smp_processor_id() != cpu)
++ return -ENODEV;
+
+ dev = get_cpu_device(cpu);
+
+ /* Convert target_freq from kHz to Hz */
+- freq = clk_round_rate(cpuclk, target_freq * 1000);
++ freq = clk_round_rate(cpuclk, target->freq * 1000);
+
+ if (freq < (policy->min * 1000) || freq > (policy->max * 1000))
+ return -EINVAL;
+
+- dev_dbg(dev, "requested frequency %u Hz\n", target_freq * 1000);
++ dev_dbg(dev, "requested frequency %u Hz\n", target->freq * 1000);
+
+ freqs.old = sh_cpufreq_get(cpu);
+ freqs.new = (freq + 500) / 1000;
+ freqs.flags = 0;
+
+- cpufreq_freq_transition_begin(policy, &freqs);
+- set_cpus_allowed_ptr(current, &cpus_allowed);
++ cpufreq_freq_transition_begin(target->policy, &freqs);
+ clk_set_rate(cpuclk, freq);
+- cpufreq_freq_transition_end(policy, &freqs, 0);
++ cpufreq_freq_transition_end(target->policy, &freqs, 0);
+
+ dev_dbg(dev, "set frequency %lu Hz\n", freq);
+-
+ return 0;
+ }
+
++/*
++ * Here we notify other drivers of the proposed change and the final change.
++ */
++static int sh_cpufreq_target(struct cpufreq_policy *policy,
++ unsigned int target_freq,
++ unsigned int relation)
++{
++ struct cpufreq_target data = { .policy = policy, .freq = target_freq };
++
++ return work_on_cpu(policy->cpu, __sh_cpufreq_target, &data);
++}
++
+ static int sh_cpufreq_verify(struct cpufreq_policy *policy)
+ {
+ struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu);
diff --git a/patches/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch b/patches/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
index 10af5d18f2ea..4b00ec745cd1 100644
--- a/patches/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
+++ b/patches/0010-futex-rt_mutex-Restructure-rt_mutex_finish_proxy_loc.patch
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -3030,10 +3030,13 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3032,10 +3032,13 @@ static int futex_wait_requeue_pi(u32 __u
*/
WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* haven't already.
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1743,21 +1743,23 @@ struct task_struct *rt_mutex_next_owner(
+@@ -1753,21 +1753,23 @@ struct task_struct *rt_mutex_next_owner(
}
/**
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
-@@ -1770,9 +1772,6 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1780,9 +1782,6 @@ int rt_mutex_finish_proxy_lock(struct rt
/* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -1783,3 +1782,42 @@ int rt_mutex_finish_proxy_lock(struct rt
+@@ -1793,3 +1792,42 @@ int rt_mutex_finish_proxy_lock(struct rt
return ret;
}
diff --git a/patches/0010-iommu-vt-d-Adjust-system_state-checks.patch b/patches/0010-iommu-vt-d-Adjust-system_state-checks.patch
new file mode 100644
index 000000000000..8b5535e280e4
--- /dev/null
+++ b/patches/0010-iommu-vt-d-Adjust-system_state-checks.patch
@@ -0,0 +1,47 @@
+From b608fe356fe8328665445a26ec75dfac918c8c5d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:41 +0200
+Subject: [PATCH 10/17] iommu/vt-d: Adjust system_state checks
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state checks in dmar_parse_one_atsr() and
+dmar_iommu_notify_scope_dev() to handle the extra states.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Joerg Roedel <joro@8bytes.org>
+Cc: David Woodhouse <dwmw2@infradead.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: iommu@lists.linux-foundation.org
+Link: http://lkml.kernel.org/r/20170516184735.712365947@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ drivers/iommu/intel-iommu.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/iommu/intel-iommu.c
++++ b/drivers/iommu/intel-iommu.c
+@@ -4310,7 +4310,7 @@ int dmar_parse_one_atsr(struct acpi_dmar
+ struct acpi_dmar_atsr *atsr;
+ struct dmar_atsr_unit *atsru;
+
+- if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
++ if (system_state >= SYSTEM_RUNNING && !intel_iommu_enabled)
+ return 0;
+
+ atsr = container_of(hdr, struct acpi_dmar_atsr, header);
+@@ -4560,7 +4560,7 @@ int dmar_iommu_notify_scope_dev(struct d
+ struct acpi_dmar_atsr *atsr;
+ struct acpi_dmar_reserved_memory *rmrr;
+
+- if (!intel_iommu_enabled && system_state != SYSTEM_BOOTING)
++ if (!intel_iommu_enabled && system_state >= SYSTEM_RUNNING)
+ return 0;
+
+ list_for_each_entry(rmrru, &dmar_rmrr_units, list) {
diff --git a/patches/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch b/patches/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..ede702b81c21
--- /dev/null
+++ b/patches/0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,124 @@
+From 9fe24c4e92d3963d92d7d383e28ed098bd5689d8 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 12 Apr 2017 22:07:37 +0200
+Subject: [PATCH 11/13] cpufreq/sparc-us3: Replace racy task affinity logic
+
+The access to the safari config register in the CPU frequency functions
+must be executed on the target CPU. This is achieved by temporarily setting
+the affinity of the calling user space thread to the requested CPU and
+reset it to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by a straight forward smp function call.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: linux-pm@vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170412201043.047558840@linutronix.de
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/cpufreq/sparc-us3-cpufreq.c | 46 ++++++++++++------------------------
+ 1 file changed, 16 insertions(+), 30 deletions(-)
+
+--- a/drivers/cpufreq/sparc-us3-cpufreq.c
++++ b/drivers/cpufreq/sparc-us3-cpufreq.c
+@@ -35,22 +35,28 @@ static struct us3_freq_percpu_info *us3_
+ #define SAFARI_CFG_DIV_32 0x0000000080000000UL
+ #define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL
+
+-static unsigned long read_safari_cfg(void)
++static void read_safari_cfg(void *arg)
+ {
+- unsigned long ret;
++ unsigned long ret, *val = arg;
+
+ __asm__ __volatile__("ldxa [%%g0] %1, %0"
+ : "=&r" (ret)
+ : "i" (ASI_SAFARI_CONFIG));
+- return ret;
++ *val = ret;
+ }
+
+-static void write_safari_cfg(unsigned long val)
++static void update_safari_cfg(void *arg)
+ {
++ unsigned long reg, *new_bits = arg;
++
++ read_safari_cfg(&reg);
++ reg &= ~SAFARI_CFG_DIV_MASK;
++ reg |= *new_bits;
++
+ __asm__ __volatile__("stxa %0, [%%g0] %1\n\t"
+ "membar #Sync"
+ : /* no outputs */
+- : "r" (val), "i" (ASI_SAFARI_CONFIG)
++ : "r" (reg), "i" (ASI_SAFARI_CONFIG)
+ : "memory");
+ }
+
+@@ -78,29 +84,17 @@ static unsigned long get_current_freq(un
+
+ static unsigned int us3_freq_get(unsigned int cpu)
+ {
+- cpumask_t cpus_allowed;
+ unsigned long reg;
+- unsigned int ret;
+-
+- cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+- reg = read_safari_cfg();
+- ret = get_current_freq(cpu, reg);
+
+- set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+- return ret;
++ if (smp_call_function_single(cpu, read_safari_cfg, &reg, 1))
++ return 0;
++ return get_current_freq(cpu, reg);
+ }
+
+ static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index)
+ {
+ unsigned int cpu = policy->cpu;
+- unsigned long new_bits, new_freq, reg;
+- cpumask_t cpus_allowed;
+-
+- cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
++ unsigned long new_bits, new_freq;
+
+ new_freq = sparc64_get_clock_tick(cpu) / 1000;
+ switch (index) {
+@@ -121,15 +115,7 @@ static int us3_freq_target(struct cpufre
+ BUG();
+ }
+
+- reg = read_safari_cfg();
+-
+- reg &= ~SAFARI_CFG_DIV_MASK;
+- reg |= new_bits;
+- write_safari_cfg(reg);
+-
+- set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+- return 0;
++ return smp_call_function_single(cpu, update_safari_cfg, &new_bits, 1);
+ }
+
+ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
diff --git a/patches/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch b/patches/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
index 84017cee0304..42c581b2187d 100644
--- a/patches/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
+++ b/patches/0011-futex-Rework-futex_lock_pi-to-use-rt_mutex_-_proxy_l.patch
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2097,20 +2097,7 @@ queue_unlock(struct futex_hash_bucket *h
+@@ -2099,20 +2099,7 @@ queue_unlock(struct futex_hash_bucket *h
hb_waiters_dec(hb);
}
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int prio;
-@@ -2127,6 +2114,24 @@ static inline void queue_me(struct futex
+@@ -2129,6 +2116,24 @@ static inline void queue_me(struct futex
plist_node_init(&q->list, prio);
plist_add(&q->list, &hb->chain);
q->task = current;
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock(&hb->lock);
}
-@@ -2585,6 +2590,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2587,6 +2592,7 @@ static int futex_lock_pi(u32 __user *uad
{
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct futex_hash_bucket *hb;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -2637,25 +2643,52 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2639,25 +2645,52 @@ static int futex_lock_pi(u32 __user *uad
}
}
@@ -207,7 +207,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1491,19 +1491,6 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1493,19 +1493,6 @@ int __sched rt_mutex_lock_interruptible(
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/*
@@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Futex variant, must not use fastpath.
*/
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
-@@ -1772,12 +1759,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1782,12 +1769,6 @@ int rt_mutex_wait_proxy_lock(struct rt_m
/* sleep on the mutex */
ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
@@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
-@@ -1817,6 +1798,13 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1827,6 +1808,13 @@ bool rt_mutex_cleanup_proxy_lock(struct
fixup_rt_mutex_waiters(lock);
cleanup = true;
}
diff --git a/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch b/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
new file mode 100644
index 000000000000..9e3f51dfc438
--- /dev/null
+++ b/patches/0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
@@ -0,0 +1,88 @@
+From e560c89c8ac0baadf0da351f602c599016568fc7 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:22 +0200
+Subject: [PATCH 11/32] hwtracing/coresight-etm3x: Use
+ cpuhp_setup_state_nocalls_cpuslocked()
+
+etm_probe() holds get_online_cpus() while invoking
+cpuhp_setup_state_nocalls().
+
+cpuhp_setup_state_nocalls() invokes get_online_cpus() as well. This is
+correct, but prevents the conversion of the hotplug locking to a percpu
+rwsem.
+
+Use cpuhp_setup_state_nocalls_cpuslocked() to avoid the nested
+call. Convert *_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170524081547.889092478@linutronix.de
+---
+ drivers/hwtracing/coresight/coresight-etm3x.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c
+index a51b6b64ecdf..93ee8fc539be 100644
+--- a/drivers/hwtracing/coresight/coresight-etm3x.c
++++ b/drivers/hwtracing/coresight/coresight-etm3x.c
+@@ -587,7 +587,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
+ * after cpu online mask indicates the cpu is offline but before the
+ * DYING hotplug callback is serviced by the ETM driver.
+ */
+- get_online_cpus();
++ cpus_read_lock();
+ spin_lock(&drvdata->spinlock);
+
+ /*
+@@ -597,7 +597,7 @@ static void etm_disable_sysfs(struct coresight_device *csdev)
+ smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
+
+ spin_unlock(&drvdata->spinlock);
+- put_online_cpus();
++ cpus_read_unlock();
+
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
+ }
+@@ -795,7 +795,7 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+
+ drvdata->cpu = pdata ? pdata->cpu : 0;
+
+- get_online_cpus();
++ cpus_read_lock();
+ etmdrvdata[drvdata->cpu] = drvdata;
+
+ if (smp_call_function_single(drvdata->cpu,
+@@ -803,17 +803,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id)
+ dev_err(dev, "ETM arch init failed\n");
+
+ if (!etm_count++) {
+- cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+- "arm/coresight:starting",
+- etm_starting_cpu, etm_dying_cpu);
+- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+- "arm/coresight:online",
+- etm_online_cpu, NULL);
++ cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
++ "arm/coresight:starting",
++ etm_starting_cpu, etm_dying_cpu);
++ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
++ "arm/coresight:online",
++ etm_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
+- put_online_cpus();
++ cpus_read_unlock();
+
+ if (etm_arch_supported(drvdata->arch) == false) {
+ ret = -EINVAL;
+--
+2.11.0
+
diff --git a/patches/0012-async-Adjust-system_state-checks.patch b/patches/0012-async-Adjust-system_state-checks.patch
new file mode 100644
index 000000000000..7de0d1fc371f
--- /dev/null
+++ b/patches/0012-async-Adjust-system_state-checks.patch
@@ -0,0 +1,61 @@
+From b4def42724594cd399cfee365221f5b38639711d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:43 +0200
+Subject: [PATCH 12/17] async: Adjust system_state checks
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in async_run_entry_fn() and
+async_synchronize_cookie_domain() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Arjan van de Ven <arjan@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184735.865155020@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/async.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/kernel/async.c
++++ b/kernel/async.c
+@@ -114,14 +114,14 @@ static void async_run_entry_fn(struct wo
+ ktime_t uninitialized_var(calltime), delta, rettime;
+
+ /* 1) run (and print duration) */
+- if (initcall_debug && system_state == SYSTEM_BOOTING) {
++ if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ pr_debug("calling %lli_%pF @ %i\n",
+ (long long)entry->cookie,
+ entry->func, task_pid_nr(current));
+ calltime = ktime_get();
+ }
+ entry->func(entry->data, entry->cookie);
+- if (initcall_debug && system_state == SYSTEM_BOOTING) {
++ if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ rettime = ktime_get();
+ delta = ktime_sub(rettime, calltime);
+ pr_debug("initcall %lli_%pF returned 0 after %lld usecs\n",
+@@ -284,14 +284,14 @@ void async_synchronize_cookie_domain(asy
+ {
+ ktime_t uninitialized_var(starttime), delta, endtime;
+
+- if (initcall_debug && system_state == SYSTEM_BOOTING) {
++ if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ pr_debug("async_waiting @ %i\n", task_pid_nr(current));
+ starttime = ktime_get();
+ }
+
+ wait_event(async_done, lowest_in_progress(domain) >= cookie);
+
+- if (initcall_debug && system_state == SYSTEM_BOOTING) {
++ if (initcall_debug && system_state < SYSTEM_RUNNING) {
+ endtime = ktime_get();
+ delta = ktime_sub(endtime, starttime);
+
diff --git a/patches/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch b/patches/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..4f8155a4ea8a
--- /dev/null
+++ b/patches/0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,129 @@
+From 12699ac53a2e5fbd1fd7c164b11685d55c8aa28b Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 13 Apr 2017 10:22:43 +0200
+Subject: [PATCH 12/13] cpufreq/sparc-us2e: Replace racy task affinity logic
+
+The access to the HBIRD_ESTAR_MODE register in the cpu frequency control
+functions must happen on the target CPU. This is achieved by temporarily
+setting the affinity of the calling user space thread to the requested CPU
+and reset it to the original affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by a straight forward smp function call.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: Herbert Xu <herbert@gondor.apana.org.au>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: linux-pm@vger.kernel.org
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131020280.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/cpufreq/sparc-us2e-cpufreq.c | 45 ++++++++++++++++-------------------
+ 1 file changed, 21 insertions(+), 24 deletions(-)
+
+--- a/drivers/cpufreq/sparc-us2e-cpufreq.c
++++ b/drivers/cpufreq/sparc-us2e-cpufreq.c
+@@ -118,10 +118,6 @@ static void us2e_transition(unsigned lon
+ unsigned long clock_tick,
+ unsigned long old_divisor, unsigned long divisor)
+ {
+- unsigned long flags;
+-
+- local_irq_save(flags);
+-
+ estar &= ~ESTAR_MODE_DIV_MASK;
+
+ /* This is based upon the state transition diagram in the IIe manual. */
+@@ -152,8 +148,6 @@ static void us2e_transition(unsigned lon
+ } else {
+ BUG();
+ }
+-
+- local_irq_restore(flags);
+ }
+
+ static unsigned long index_to_estar_mode(unsigned int index)
+@@ -229,48 +223,51 @@ static unsigned long estar_to_divisor(un
+ return ret;
+ }
+
++static void __us2e_freq_get(void *arg)
++{
++ unsigned long *estar = arg;
++
++ *estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
++}
++
+ static unsigned int us2e_freq_get(unsigned int cpu)
+ {
+- cpumask_t cpus_allowed;
+ unsigned long clock_tick, estar;
+
+- cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+ clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+- estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+-
+- set_cpus_allowed_ptr(current, &cpus_allowed);
++ if (smp_call_function_single(cpu, __us2e_freq_get, &estar, 1))
++ return 0;
+
+ return clock_tick / estar_to_divisor(estar);
+ }
+
+-static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
++static void __us2e_freq_target(void *arg)
+ {
+- unsigned int cpu = policy->cpu;
++ unsigned int cpu = smp_processor_id();
++ unsigned int *index = arg;
+ unsigned long new_bits, new_freq;
+ unsigned long clock_tick, divisor, old_divisor, estar;
+- cpumask_t cpus_allowed;
+-
+- cpumask_copy(&cpus_allowed, &current->cpus_allowed);
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+
+ new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
+- new_bits = index_to_estar_mode(index);
+- divisor = index_to_divisor(index);
++ new_bits = index_to_estar_mode(*index);
++ divisor = index_to_divisor(*index);
+ new_freq /= divisor;
+
+ estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
+
+ old_divisor = estar_to_divisor(estar);
+
+- if (old_divisor != divisor)
++ if (old_divisor != divisor) {
+ us2e_transition(estar, new_bits, clock_tick * 1000,
+ old_divisor, divisor);
++ }
++}
+
+- set_cpus_allowed_ptr(current, &cpus_allowed);
++static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index)
++{
++ unsigned int cpu = policy->cpu;
+
+- return 0;
++ return smp_call_function_single(cpu, __us2e_freq_target, &index, 1);
+ }
+
+ static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy)
diff --git a/patches/0012-futex-Futex_unlock_pi-determinism.patch b/patches/0012-futex-Futex_unlock_pi-determinism.patch
index 9cd5ce650ff6..1715c5c79704 100644
--- a/patches/0012-futex-Futex_unlock_pi-determinism.patch
+++ b/patches/0012-futex-Futex_unlock_pi-determinism.patch
@@ -35,8 +35,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1396,15 +1396,10 @@ static int wake_futex_pi(u32 __user *uad
- WAKE_Q(wake_q);
+@@ -1398,15 +1398,10 @@ static int wake_futex_pi(u32 __user *uad
+ DEFINE_WAKE_Q(wake_q);
int ret = 0;
- raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* When this happens, give up our locks and try again, giving
* the futex_lock_pi() instance time to complete, either by
-@@ -2792,15 +2787,18 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2794,15 +2789,18 @@ static int futex_unlock_pi(u32 __user *u
if (pi_state->owner != current)
goto out_unlock;
diff --git a/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch b/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
new file mode 100644
index 000000000000..f4055c995111
--- /dev/null
+++ b/patches/0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
@@ -0,0 +1,89 @@
+From e9f5d63f84febb7e9dfe4e0dc696adf88053fbf2 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:23 +0200
+Subject: [PATCH 12/32] hwtracing/coresight-etm4x: Use
+ cpuhp_setup_state_nocalls_cpuslocked()
+
+etm_probe4() holds get_online_cpus() while invoking
+cpuhp_setup_state_nocalls().
+
+cpuhp_setup_state_nocalls() invokes get_online_cpus() as well. This is
+correct, but prevents the conversion of the hotplug locking to a percpu
+rwsem.
+
+Use cpuhp_setup_state_nocalls_cpuslocked() to avoid the nested
+call. Convert *_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170524081547.983493849@linutronix.de
+---
+ drivers/hwtracing/coresight/coresight-etm4x.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
+index d1340fb4e457..532adc9dd32a 100644
+--- a/drivers/hwtracing/coresight/coresight-etm4x.c
++++ b/drivers/hwtracing/coresight/coresight-etm4x.c
+@@ -371,7 +371,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
+ * after cpu online mask indicates the cpu is offline but before the
+ * DYING hotplug callback is serviced by the ETM driver.
+ */
+- get_online_cpus();
++ cpus_read_lock();
+ spin_lock(&drvdata->spinlock);
+
+ /*
+@@ -381,7 +381,7 @@ static void etm4_disable_sysfs(struct coresight_device *csdev)
+ smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
+
+ spin_unlock(&drvdata->spinlock);
+- put_online_cpus();
++ cpus_read_unlock();
+
+ dev_info(drvdata->dev, "ETM tracing disabled\n");
+ }
+@@ -982,7 +982,7 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+
+ drvdata->cpu = pdata ? pdata->cpu : 0;
+
+- get_online_cpus();
++ cpus_read_lock();
+ etmdrvdata[drvdata->cpu] = drvdata;
+
+ if (smp_call_function_single(drvdata->cpu,
+@@ -990,18 +990,18 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
+ dev_err(dev, "ETM arch init failed\n");
+
+ if (!etm4_count++) {
+- cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING,
+- "arm/coresight4:starting",
+- etm4_starting_cpu, etm4_dying_cpu);
+- ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+- "arm/coresight4:online",
+- etm4_online_cpu, NULL);
++ cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ARM_CORESIGHT_STARTING,
++ "arm/coresight4:starting",
++ etm4_starting_cpu, etm4_dying_cpu);
++ ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
++ "arm/coresight4:online",
++ etm4_online_cpu, NULL);
+ if (ret < 0)
+ goto err_arch_supported;
+ hp_online = ret;
+ }
+
+- put_online_cpus();
++ cpus_read_unlock();
+
+ if (etm4_arch_supported(drvdata->arch) == false) {
+ ret = -EINVAL;
+--
+2.11.0
+
diff --git a/patches/0013-crypto-N2-Replace-racy-task-affinity-logic.patch b/patches/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
new file mode 100644
index 000000000000..4cbb7f554e29
--- /dev/null
+++ b/patches/0013-crypto-N2-Replace-racy-task-affinity-logic.patch
@@ -0,0 +1,95 @@
+From 73810a069120aa831debb4d967310ab900f628ad Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Thu, 13 Apr 2017 10:20:23 +0200
+Subject: [PATCH 13/13] crypto: N2 - Replace racy task affinity logic
+
+spu_queue_register() needs to invoke setup functions on a particular
+CPU. This is achieved by temporarily setting the affinity of the
+calling user space thread to the requested CPU and reset it to the original
+affinity afterwards.
+
+That's racy vs. CPU hotplug and concurrent affinity settings for that
+thread resulting in code executing on the wrong CPU and overwriting the
+new affinity setting.
+
+Replace it by using work_on_cpu_safe() which guarantees to run the code on
+the requested CPU or to fail in case the CPU is offline.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Herbert Xu <herbert@gondor.apana.org.au>
+Acked-by: "David S. Miller" <davem@davemloft.net>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Tony Luck <tony.luck@intel.com>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Lai Jiangshan <jiangshanlai@gmail.com>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: linux-crypto@vger.kernel.org
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1704131019420.2408@nanos
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+---
+ drivers/crypto/n2_core.c | 31 ++++++++++++++++---------------
+ 1 file changed, 16 insertions(+), 15 deletions(-)
+
+--- a/drivers/crypto/n2_core.c
++++ b/drivers/crypto/n2_core.c
+@@ -65,6 +65,11 @@ struct spu_queue {
+ struct list_head list;
+ };
+
++struct spu_qreg {
++ struct spu_queue *queue;
++ unsigned long type;
++};
++
+ static struct spu_queue **cpu_to_cwq;
+ static struct spu_queue **cpu_to_mau;
+
+@@ -1631,31 +1636,27 @@ static void queue_cache_destroy(void)
+ kmem_cache_destroy(queue_cache[HV_NCS_QTYPE_CWQ - 1]);
+ }
+
+-static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
++static long spu_queue_register_workfn(void *arg)
+ {
+- cpumask_var_t old_allowed;
++ struct spu_qreg *qr = arg;
++ struct spu_queue *p = qr->queue;
++ unsigned long q_type = qr->type;
+ unsigned long hv_ret;
+
+- if (cpumask_empty(&p->sharing))
+- return -EINVAL;
+-
+- if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
+- return -ENOMEM;
+-
+- cpumask_copy(old_allowed, &current->cpus_allowed);
+-
+- set_cpus_allowed_ptr(current, &p->sharing);
+-
+ hv_ret = sun4v_ncs_qconf(q_type, __pa(p->q),
+ CWQ_NUM_ENTRIES, &p->qhandle);
+ if (!hv_ret)
+ sun4v_ncs_sethead_marker(p->qhandle, 0);
+
+- set_cpus_allowed_ptr(current, old_allowed);
++ return hv_ret ? -EINVAL : 0;
++}
+
+- free_cpumask_var(old_allowed);
++static int spu_queue_register(struct spu_queue *p, unsigned long q_type)
++{
++ int cpu = cpumask_any_and(&p->sharing, cpu_online_mask);
++ struct spu_qreg qr = { .queue = p, .type = q_type };
+
+- return (hv_ret ? -EINVAL : 0);
++ return work_on_cpu_safe(cpu, spu_queue_register_workfn, &qr);
+ }
+
+ static int spu_queue_setup(struct spu_queue *p)
diff --git a/patches/0013-extable-Adjust-system_state-checks.patch b/patches/0013-extable-Adjust-system_state-checks.patch
new file mode 100644
index 000000000000..b11d7de46e05
--- /dev/null
+++ b/patches/0013-extable-Adjust-system_state-checks.patch
@@ -0,0 +1,36 @@
+From 0594729c24d846889408a07057b5cc9e8d931419 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:44 +0200
+Subject: [PATCH 13/17] extable: Adjust system_state checks
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in core_kernel_text() to handle the extra
+states, i.e. to cover init text up to the point where the system switches
+to state RUNNING.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20170516184735.949992741@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/extable.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/extable.c
++++ b/kernel/extable.c
+@@ -75,7 +75,7 @@ int core_kernel_text(unsigned long addr)
+ addr < (unsigned long)_etext)
+ return 1;
+
+- if (system_state == SYSTEM_BOOTING &&
++ if (system_state < SYSTEM_RUNNING &&
+ init_kernel_text(addr))
+ return 1;
+ return 0;
diff --git a/patches/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch b/patches/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
index 2128174f26cd..f6e22ae19ffd 100644
--- a/patches/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
+++ b/patches/0013-futex-Drop-hb-lock-before-enqueueing-on-the-rtmutex.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2652,20 +2652,33 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2654,20 +2654,33 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(to))
hrtimer_start_expires(&to->timer, HRTIMER_MODE_ABS);
-@@ -2678,6 +2691,9 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2680,6 +2693,9 @@ static int futex_lock_pi(u32 __user *uad
* first acquire the hb->lock before removing the lock from the
* rt_mutex waitqueue, such that we can keep the hb and rt_mutex
* wait lists consistent.
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (ret && !rt_mutex_cleanup_proxy_lock(&q.pi_state->pi_mutex, &rt_waiter))
ret = 0;
-@@ -2789,10 +2805,6 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2791,10 +2807,6 @@ static int futex_unlock_pi(u32 __user *u
get_pi_state(pi_state);
/*
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* wake_futex_pi() must observe a state consistent with what we
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1659,31 +1659,14 @@ void rt_mutex_proxy_unlock(struct rt_mut
+@@ -1669,31 +1669,14 @@ void rt_mutex_proxy_unlock(struct rt_mut
rt_mutex_set_owner(lock, NULL);
}
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* We enforce deadlock detection for futexes */
ret = task_blocks_on_rt_mutex(lock, waiter, task,
-@@ -1702,12 +1685,36 @@ int rt_mutex_start_proxy_lock(struct rt_
+@@ -1712,12 +1695,36 @@ int rt_mutex_start_proxy_lock(struct rt_
if (unlikely(ret))
remove_waiter(lock, waiter);
diff --git a/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch b/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
new file mode 100644
index 000000000000..883a08ddb171
--- /dev/null
+++ b/patches/0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
@@ -0,0 +1,64 @@
+From 04b247c2ebdd6ba1c46c7c22546229a89760b43a Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:24 +0200
+Subject: [PATCH 13/32] perf/x86/intel/cqm: Use cpuhp_setup_state_cpuslocked()
+
+intel_cqm_init() holds get_online_cpus() while registerring the hotplug
+callbacks.
+
+cpuhp_setup_state() invokes get_online_cpus() as well. This is correct, but
+prevents the conversion of the hotplug locking to a percpu rwsem.
+
+Use cpuhp_setup_state_cpuslocked() to avoid the nested call. Convert
+*_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Fenghua Yu <fenghua.yu@intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081548.075604046@linutronix.de
+---
+ arch/x86/events/intel/cqm.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
+index 8c00dc09a5d2..2521f771f2f5 100644
+--- a/arch/x86/events/intel/cqm.c
++++ b/arch/x86/events/intel/cqm.c
+@@ -1682,7 +1682,7 @@ static int __init intel_cqm_init(void)
+ *
+ * Also, check that the scales match on all cpus.
+ */
+- get_online_cpus();
++ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+@@ -1746,14 +1746,14 @@ static int __init intel_cqm_init(void)
+ * Setup the hot cpu notifier once we are sure cqm
+ * is enabled to avoid notifier leak.
+ */
+- cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING,
+- "perf/x86/cqm:starting",
+- intel_cqm_cpu_starting, NULL);
+- cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "perf/x86/cqm:online",
+- NULL, intel_cqm_cpu_exit);
+-
++ cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_STARTING,
++ "perf/x86/cqm:starting",
++ intel_cqm_cpu_starting, NULL);
++ cpuhp_setup_state_cpuslocked(CPUHP_AP_PERF_X86_CQM_ONLINE,
++ "perf/x86/cqm:online",
++ NULL, intel_cqm_cpu_exit);
+ out:
+- put_online_cpus();
++ cpus_read_unlock();
+
+ if (ret) {
+ kfree(str);
+--
+2.11.0
+
diff --git a/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch b/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
new file mode 100644
index 000000000000..2f0309d5b976
--- /dev/null
+++ b/patches/0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
@@ -0,0 +1,74 @@
+From fe2a5cd8aa038e2b02fda983afc2083e94c04b4f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:25 +0200
+Subject: [PATCH 14/32] ARM/hw_breakpoint: Use cpuhp_setup_state_cpuslocked()
+
+arch_hw_breakpoint_init() holds get_online_cpus() while registerring the
+hotplug callbacks.
+
+cpuhp_setup_state() invokes get_online_cpus() as well. This is correct, but
+prevents the conversion of the hotplug locking to a percpu rwsem.
+
+Use cpuhp_setup_state_cpuslocked() to avoid the nested call. Convert
+*_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170524081548.170940729@linutronix.de
+---
+ arch/arm/kernel/hw_breakpoint.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
+index be3b3fbd382f..63cb4c7c6593 100644
+--- a/arch/arm/kernel/hw_breakpoint.c
++++ b/arch/arm/kernel/hw_breakpoint.c
+@@ -1090,7 +1090,7 @@ static int __init arch_hw_breakpoint_init(void)
+ * driven low on this core and there isn't an architected way to
+ * determine that.
+ */
+- get_online_cpus();
++ cpus_read_lock();
+ register_undef_hook(&debug_reg_hook);
+
+ /*
+@@ -1098,15 +1098,16 @@ static int __init arch_hw_breakpoint_init(void)
+ * assume that a halting debugger will leave the world in a nice state
+ * for us.
+ */
+- ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "arm/hw_breakpoint:online",
+- dbg_reset_online, NULL);
++ ret = cpuhp_setup_state_cpuslocked(CPUHP_AP_ONLINE_DYN,
++ "arm/hw_breakpoint:online",
++ dbg_reset_online, NULL);
+ unregister_undef_hook(&debug_reg_hook);
+ if (WARN_ON(ret < 0) || !cpumask_empty(&debug_err_mask)) {
+ core_num_brps = 0;
+ core_num_wrps = 0;
+ if (ret > 0)
+ cpuhp_remove_state_nocalls(ret);
+- put_online_cpus();
++ cpus_read_unlock();
+ return 0;
+ }
+
+@@ -1124,7 +1125,7 @@ static int __init arch_hw_breakpoint_init(void)
+ TRAP_HWBKPT, "watchpoint debug exception");
+ hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP,
+ TRAP_HWBKPT, "breakpoint debug exception");
+- put_online_cpus();
++ cpus_read_unlock();
+
+ /* Register PM notifiers. */
+ pm_init();
+--
+2.11.0
+
diff --git a/patches/0014-printk-Adjust-system_state-checks.patch b/patches/0014-printk-Adjust-system_state-checks.patch
new file mode 100644
index 000000000000..c1d9ee4d1166
--- /dev/null
+++ b/patches/0014-printk-Adjust-system_state-checks.patch
@@ -0,0 +1,35 @@
+From ff48cd26fc4889b9deb5f9333d3c61746e450b7f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:45 +0200
+Subject: [PATCH 14/17] printk: Adjust system_state checks
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in boot_delay_msec() to handle the extra
+states.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20170516184736.027534895@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ kernel/printk/printk.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -1176,7 +1176,7 @@ static void boot_delay_msec(int level)
+ unsigned long long k;
+ unsigned long timeout;
+
+- if ((boot_delay == 0 || system_state != SYSTEM_BOOTING)
++ if ((boot_delay == 0 || system_state >= SYSTEM_RUNNING)
+ || suppress_message_printing(level)) {
+ return;
+ }
diff --git a/patches/0015-mm-vmscan-Adjust-system_state-checks.patch b/patches/0015-mm-vmscan-Adjust-system_state-checks.patch
new file mode 100644
index 000000000000..9fef94c667a9
--- /dev/null
+++ b/patches/0015-mm-vmscan-Adjust-system_state-checks.patch
@@ -0,0 +1,39 @@
+From c6202adf3a0969514299cf10ff07376a84ad09bb Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:46 +0200
+Subject: [PATCH 15/17] mm/vmscan: Adjust system_state checks
+
+To enable smp_processor_id() and might_sleep() debug checks earlier, it's
+required to add system states between SYSTEM_BOOTING and SYSTEM_RUNNING.
+
+Adjust the system_state check in kswapd_run() to handle the extra states.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Michal Hocko <mhocko@suse.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/20170516184736.119158930@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ mm/vmscan.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3654,7 +3654,7 @@ int kswapd_run(int nid)
+ pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid);
+ if (IS_ERR(pgdat->kswapd)) {
+ /* failure at boot is fatal */
+- BUG_ON(system_state == SYSTEM_BOOTING);
++ BUG_ON(system_state < SYSTEM_RUNNING);
+ pr_err("Failed to start kswapd on node %d\n", nid);
+ ret = PTR_ERR(pgdat->kswapd);
+ pgdat->kswapd = NULL;
diff --git a/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch b/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch
new file mode 100644
index 000000000000..da46dc48abd6
--- /dev/null
+++ b/patches/0015-s390-kernel-Use-stop_machine_cpuslocked.patch
@@ -0,0 +1,49 @@
+From 2337e879e8805a630b418f3e73a98084d4724b83 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:26 +0200
+Subject: [PATCH 15/32] s390/kernel: Use stop_machine_cpuslocked()
+
+stp_work_fn() holds get_online_cpus() while invoking stop_machine().
+
+stop_machine() invokes get_online_cpus() as well. This is correct, but
+prevents the conversion of the hotplug locking to a percpu rwsem.
+
+Use stop_machine_cpuslocked() to avoid the nested call. Convert
+*_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: linux-s390@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: David Hildenbrand <dahi@linux.vnet.ibm.com>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Link: http://lkml.kernel.org/r/20170524081548.250203087@linutronix.de
+---
+ arch/s390/kernel/time.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
+index c3a52f9a69a0..192efdfac918 100644
+--- a/arch/s390/kernel/time.c
++++ b/arch/s390/kernel/time.c
+@@ -636,10 +636,10 @@ static void stp_work_fn(struct work_struct *work)
+ goto out_unlock;
+
+ memset(&stp_sync, 0, sizeof(stp_sync));
+- get_online_cpus();
++ cpus_read_lock();
+ atomic_set(&stp_sync.cpus, num_online_cpus() - 1);
+- stop_machine(stp_sync_clock, &stp_sync, cpu_online_mask);
+- put_online_cpus();
++ stop_machine_cpuslocked(stp_sync_clock, &stp_sync, cpu_online_mask);
++ cpus_read_unlock();
+
+ if (!check_sync_clock())
+ /*
+--
+2.11.0
+
diff --git a/patches/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch b/patches/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
new file mode 100644
index 000000000000..74763cdae860
--- /dev/null
+++ b/patches/0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
@@ -0,0 +1,60 @@
+From 69a78ff226fe0241ab6cb9dd961667be477e3cf7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:47 +0200
+Subject: [PATCH 16/17] init: Introduce SYSTEM_SCHEDULING state
+
+might_sleep() debugging and smp_processor_id() debugging should be active
+right after the scheduler starts working. The init task can invoke
+smp_processor_id() from preemptible context as it is pinned on the boot cpu
+until sched_smp_init() removes the pinning and lets it schedule on all non
+isolated cpus.
+
+Add a new state which allows to enable those checks earlier and add it to
+the xen do_poweroff() function.
+
+No functional change.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Juergen Gross <jgross@suse.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184736.196214622@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ drivers/xen/manage.c | 1 +
+ include/linux/kernel.h | 6 +++++-
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/xen/manage.c
++++ b/drivers/xen/manage.c
+@@ -190,6 +190,7 @@ static void do_poweroff(void)
+ {
+ switch (system_state) {
+ case SYSTEM_BOOTING:
++ case SYSTEM_SCHEDULING:
+ orderly_poweroff(true);
+ break;
+ case SYSTEM_RUNNING:
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -488,9 +488,13 @@ extern int root_mountflags;
+
+ extern bool early_boot_irqs_disabled;
+
+-/* Values used for system_state */
++/*
++ * Values used for system_state. Ordering of the states must not be changed
++ * as code checks for <, <=, >, >= STATE.
++ */
+ extern enum system_states {
+ SYSTEM_BOOTING,
++ SYSTEM_SCHEDULING,
+ SYSTEM_RUNNING,
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
diff --git a/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch b/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
new file mode 100644
index 000000000000..0317958715c4
--- /dev/null
+++ b/patches/0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
@@ -0,0 +1,56 @@
+From f9a69931c3959940538884d5962b770c3db75df5 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:27 +0200
+Subject: [PATCH 16/32] powerpc/powernv: Use stop_machine_cpuslocked()
+
+set_subcores_per_core() holds get_online_cpus() while invoking stop_machine().
+
+stop_machine() invokes get_online_cpus() as well. This is correct, but
+prevents the conversion of the hotplug locking to a percpu rwsem.
+
+Use stop_machine_cpuslocked() to avoid the nested call. Convert
+*_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: linuxppc-dev@lists.ozlabs.org
+Link: http://lkml.kernel.org/r/20170524081548.331016542@linutronix.de
+---
+ arch/powerpc/platforms/powernv/subcore.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/platforms/powernv/subcore.c b/arch/powerpc/platforms/powernv/subcore.c
+index 0babef11136f..e6230f104dd9 100644
+--- a/arch/powerpc/platforms/powernv/subcore.c
++++ b/arch/powerpc/platforms/powernv/subcore.c
+@@ -348,7 +348,7 @@ static int set_subcores_per_core(int new_mode)
+ state->master = 0;
+ }
+
+- get_online_cpus();
++ cpus_read_lock();
+
+ /* This cpu will update the globals before exiting stop machine */
+ this_cpu_ptr(&split_state)->master = 1;
+@@ -356,9 +356,10 @@ static int set_subcores_per_core(int new_mode)
+ /* Ensure state is consistent before we call the other cpus */
+ mb();
+
+- stop_machine(cpu_update_split_mode, &new_mode, cpu_online_mask);
++ stop_machine_cpuslocked(cpu_update_split_mode, &new_mode,
++ cpu_online_mask);
+
+- put_online_cpus();
++ cpus_read_unlock();
+
+ return 0;
+ }
+--
+2.11.0
+
diff --git a/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch b/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
new file mode 100644
index 000000000000..80b204a20340
--- /dev/null
+++ b/patches/0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
@@ -0,0 +1,41 @@
+From 210e21331fc3a396af640cec652be769d146e49f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:28 +0200
+Subject: [PATCH 17/32] cpu/hotplug: Use stop_machine_cpuslocked() in
+ takedown_cpu()
+
+takedown_cpu() is a cpu hotplug function invoking stop_machine(). The cpu
+hotplug machinery holds the hotplug lock for write.
+
+stop_machine() invokes get_online_cpus() as well. This is correct, but
+prevents the conversion of the hotplug locking to a percpu rwsem.
+
+Use stop_machine_cpuslocked() to avoid the nested call.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081548.423292433@linutronix.de
+---
+ kernel/cpu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index e4389ac55b65..142d889d9f69 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -701,7 +701,7 @@ static int takedown_cpu(unsigned int cpu)
+ /*
+ * So now all preempt/rcu users must observe !cpu_active().
+ */
+- err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
++ err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
+ if (err) {
+ /* CPU refused to die */
+ irq_unlock_sparse();
+--
+2.11.0
+
diff --git a/patches/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch b/patches/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
new file mode 100644
index 000000000000..2ee03f835dde
--- /dev/null
+++ b/patches/0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
@@ -0,0 +1,74 @@
+From 1c3c5eab171590f86edd8d31389d61dd1efe3037 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 16 May 2017 20:42:48 +0200
+Subject: [PATCH 17/17] sched/core: Enable might_sleep() and smp_processor_id()
+ checks early
+
+might_sleep() and smp_processor_id() checks are enabled after the boot
+process is done. That hides bugs in the SMP bringup and driver
+initialization code.
+
+Enable it right when the scheduler starts working, i.e. when init task and
+kthreadd have been created and right before the idle task enables
+preemption.
+
+Tested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Mark Rutland <mark.rutland@arm.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170516184736.272225698@linutronix.de
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+---
+ init/main.c | 10 ++++++++++
+ kernel/sched/core.c | 4 +++-
+ lib/smp_processor_id.c | 2 +-
+ 3 files changed, 14 insertions(+), 2 deletions(-)
+
+--- a/init/main.c
++++ b/init/main.c
+@@ -414,6 +414,16 @@ static noinline void __ref rest_init(voi
+ rcu_read_lock();
+ kthreadd_task = find_task_by_pid_ns(pid, &init_pid_ns);
+ rcu_read_unlock();
++
++ /*
++ * Enable might_sleep() and smp_processor_id() checks.
++ * They cannot be enabled earlier because with CONFIG_PRREMPT=y
++ * kernel_thread() would trigger might_sleep() splats. With
++ * CONFIG_PREEMPT_VOLUNTARY=y the init task might have scheduled
++ * already, but it's stuck on the kthreadd_done completion.
++ */
++ system_state = SYSTEM_SCHEDULING;
++
+ complete(&kthreadd_done);
+
+ /*
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -6223,8 +6223,10 @@ void ___might_sleep(const char *file, in
+
+ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() &&
+ !is_idle_task(current)) ||
+- system_state != SYSTEM_RUNNING || oops_in_progress)
++ system_state == SYSTEM_BOOTING || system_state > SYSTEM_RUNNING ||
++ oops_in_progress)
+ return;
++
+ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
+ return;
+ prev_jiffy = jiffies;
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -28,7 +28,7 @@ notrace static unsigned int check_preemp
+ /*
+ * It is valid to assume CPU-locality during early bootup:
+ */
+- if (system_state != SYSTEM_RUNNING)
++ if (system_state < SYSTEM_SCHEDULING)
+ goto out;
+
+ /*
diff --git a/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch b/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
new file mode 100644
index 000000000000..d53294f11b5d
--- /dev/null
+++ b/patches/0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
@@ -0,0 +1,37 @@
+From 27d3b157fee0bad264eb745d5c547e2e0676f1a2 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:29 +0200
+Subject: [PATCH 18/32] x86/perf: Drop EXPORT of perf_check_microcode
+
+The only caller is the microcode update, which cannot be modular.
+
+Drop the export.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Link: http://lkml.kernel.org/r/20170524081548.515204988@linutronix.de
+---
+ arch/x86/events/core.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
+index 580b60f5ac83..ac650d57ebf7 100644
+--- a/arch/x86/events/core.c
++++ b/arch/x86/events/core.c
+@@ -2224,7 +2224,6 @@ void perf_check_microcode(void)
+ if (x86_pmu.check_microcode)
+ x86_pmu.check_microcode();
+ }
+-EXPORT_SYMBOL_GPL(perf_check_microcode);
+
+ static struct pmu pmu = {
+ .pmu_enable = x86_pmu_enable,
+--
+2.11.0
+
diff --git a/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch b/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
new file mode 100644
index 000000000000..ae066c92721a
--- /dev/null
+++ b/patches/0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
@@ -0,0 +1,81 @@
+From 1ba143a5216fb148211160a0ecc1f8d3f92f06bb Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 24 May 2017 10:15:30 +0200
+Subject: [PATCH 19/32] perf/x86/intel: Drop get_online_cpus() in
+ intel_snb_check_microcode()
+
+If intel_snb_check_microcode() is invoked via
+ microcode_init -> perf_check_microcode -> intel_snb_check_microcode
+
+then get_online_cpus() is invoked nested. This works with the current
+implementation of get_online_cpus() but prevents converting it to a percpu
+rwsem.
+
+intel_snb_check_microcode() is also invoked from intel_sandybridge_quirk()
+unprotected.
+
+Drop get_online_cpus() from intel_snb_check_microcode() and add it to
+intel_sandybridge_quirk() so both call sites are protected.
+
+Convert *_online_cpus() to the new interfaces while at it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Borislav Petkov <bp@suse.de>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Link: http://lkml.kernel.org/r/20170524081548.594862191@linutronix.de
+---
+ arch/x86/events/intel/core.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index a6d91d4e37a1..b9174aacf42f 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -3410,12 +3410,10 @@ static void intel_snb_check_microcode(void)
+ int pebs_broken = 0;
+ int cpu;
+
+- get_online_cpus();
+ for_each_online_cpu(cpu) {
+ if ((pebs_broken = intel_snb_pebs_broken(cpu)))
+ break;
+ }
+- put_online_cpus();
+
+ if (pebs_broken == x86_pmu.pebs_broken)
+ return;
+@@ -3488,7 +3486,9 @@ static bool check_msr(unsigned long msr, u64 mask)
+ static __init void intel_sandybridge_quirk(void)
+ {
+ x86_pmu.check_microcode = intel_snb_check_microcode;
++ cpus_read_lock();
+ intel_snb_check_microcode();
++ cpus_read_unlock();
+ }
+
+ static const struct { int id; char *name; } intel_arch_events_map[] __initconst = {
+@@ -4112,13 +4112,12 @@ static __init int fixup_ht_bug(void)
+
+ lockup_detector_resume();
+
+- get_online_cpus();
++ cpus_read_lock();
+
+- for_each_online_cpu(c) {
++ for_each_online_cpu(c)
+ free_excl_cntrs(c);
+- }
+
+- put_online_cpus();
++ cpus_read_unlock();
+ pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
+ return 0;
+ }
+--
+2.11.0
+
diff --git a/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch b/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
new file mode 100644
index 000000000000..d0c58c21e355
--- /dev/null
+++ b/patches/0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
@@ -0,0 +1,92 @@
+From 1ddd45f8d76f0c15ec4e44073eeaaee6a806ee81 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:31 +0200
+Subject: [PATCH 20/32] PCI: Use cpu_hotplug_disable() instead of
+ get_online_cpus()
+
+Converting the hotplug locking, i.e. get_online_cpus(), to a percpu rwsem
+unearthed a circular lock dependency which was hidden from lockdep due to
+the lockdep annotation of get_online_cpus() which prevents lockdep from
+creating full dependency chains. There are several variants of this. And
+example is:
+
+Chain exists of:
+
+cpu_hotplug_lock.rw_sem --> drm_global_mutex --> &item->mutex
+
+CPU0 CPU1
+---- ----
+lock(&item->mutex);
+ lock(drm_global_mutex);
+ lock(&item->mutex);
+lock(cpu_hotplug_lock.rw_sem);
+
+because there are dependencies through workqueues. The call chain is:
+
+ get_online_cpus
+ apply_workqueue_attrs
+ __alloc_workqueue_key
+ ttm_mem_global_init
+ ast_ttm_mem_global_init
+ drm_global_item_ref
+ ast_mm_init
+ ast_driver_load
+ drm_dev_register
+ drm_get_pci_dev
+ ast_pci_probe
+ local_pci_probe
+ work_for_cpu_fn
+ process_one_work
+ worker_thread
+
+This is not a problem of get_online_cpus() recursion, it's a possible
+deadlock undetected by lockdep so far.
+
+The cure is to use cpu_hotplug_disable() instead of get_online_cpus() to
+protect the PCI probing.
+
+There is a side effect to this: cpu_hotplug_disable() makes a concurrent
+cpu hotplug attempt via the sysfs interfaces fail with -EBUSY, but PCI
+probing usually happens during the boot process where no interaction is
+possible. Any later invocations are infrequent enough and concurrent
+hotplug attempts are so unlikely that the danger of user space visible
+regressions is very close to zero. Anyway, thats preferrable over a real
+deadlock.
+
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: linux-pci@vger.kernel.org
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081548.691198590@linutronix.de
+---
+ drivers/pci/pci-driver.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 192e7b681b96..5bf92fd983e5 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -349,13 +349,13 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+ if (node >= 0 && node != numa_node_id()) {
+ int cpu;
+
+- get_online_cpus();
++ cpu_hotplug_disable();
+ cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ error = work_on_cpu(cpu, local_pci_probe, &ddi);
+ else
+ error = local_pci_probe(&ddi);
+- put_online_cpus();
++ cpu_hotplug_enable();
+ } else
+ error = local_pci_probe(&ddi);
+
+--
+2.11.0
+
diff --git a/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch b/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch
new file mode 100644
index 000000000000..9c53fb247437
--- /dev/null
+++ b/patches/0021-PCI-Replace-the-racy-recursion-prevention.patch
@@ -0,0 +1,135 @@
+From 0b2c2a71e6f07fb67e6f72817d39910f64d2e258 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:32 +0200
+Subject: [PATCH 21/32] PCI: Replace the racy recursion prevention
+
+pci_call_probe() can called recursively when a physcial function is probed
+and the probing creates virtual functions, which are populated via
+pci_bus_add_device() which in turn can end up calling pci_call_probe()
+again.
+
+The code has an interesting way to prevent recursing into the workqueue
+code. That's accomplished by a check whether the current task runs already
+on the numa node which is associated with the device.
+
+While that works to prevent the recursion into the workqueue code, it's
+racy versus normal execution as there is no guarantee that the node does
+not vanish after the check.
+
+There is another issue with this code. It dereferences cpumask_of_node()
+unconditionally without checking whether the node is available.
+
+Make the detection reliable by:
+
+ - Mark a probed device as 'is_probed' in pci_call_probe()
+
+ - Check in pci_call_probe for a virtual function. If it's a virtual
+ function and the associated physical function device is marked
+ 'is_probed' then this is a recursive call, so the call can be invoked in
+ the calling context.
+
+ - Add a check whether the node is online before dereferencing it.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: linux-pci@vger.kernel.org
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081548.771457199@linutronix.de
+---
+ drivers/pci/pci-driver.c | 47 +++++++++++++++++++++++++----------------------
+ include/linux/pci.h | 1 +
+ 2 files changed, 26 insertions(+), 22 deletions(-)
+
+diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
+index 5bf92fd983e5..fe6be6382505 100644
+--- a/drivers/pci/pci-driver.c
++++ b/drivers/pci/pci-driver.c
+@@ -320,10 +320,19 @@ static long local_pci_probe(void *_ddi)
+ return 0;
+ }
+
++static bool pci_physfn_is_probed(struct pci_dev *dev)
++{
++#ifdef CONFIG_PCI_IOV
++ return dev->is_virtfn && dev->physfn->is_probed;
++#else
++ return false;
++#endif
++}
++
+ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+ const struct pci_device_id *id)
+ {
+- int error, node;
++ int error, node, cpu;
+ struct drv_dev_and_id ddi = { drv, dev, id };
+
+ /*
+@@ -332,33 +341,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+ * on the right node.
+ */
+ node = dev_to_node(&dev->dev);
++ dev->is_probed = 1;
++
++ cpu_hotplug_disable();
+
+ /*
+- * On NUMA systems, we are likely to call a PF probe function using
+- * work_on_cpu(). If that probe calls pci_enable_sriov() (which
+- * adds the VF devices via pci_bus_add_device()), we may re-enter
+- * this function to call the VF probe function. Calling
+- * work_on_cpu() again will cause a lockdep warning. Since VFs are
+- * always on the same node as the PF, we can work around this by
+- * avoiding work_on_cpu() when we're already on the correct node.
+- *
+- * Preemption is enabled, so it's theoretically unsafe to use
+- * numa_node_id(), but even if we run the probe function on the
+- * wrong node, it should be functionally correct.
++ * Prevent nesting work_on_cpu() for the case where a Virtual Function
++ * device is probed from work_on_cpu() of the Physical device.
+ */
+- if (node >= 0 && node != numa_node_id()) {
+- int cpu;
+-
+- cpu_hotplug_disable();
++ if (node < 0 || node >= MAX_NUMNODES || !node_online(node) ||
++ pci_physfn_is_probed(dev))
++ cpu = nr_cpu_ids;
++ else
+ cpu = cpumask_any_and(cpumask_of_node(node), cpu_online_mask);
+- if (cpu < nr_cpu_ids)
+- error = work_on_cpu(cpu, local_pci_probe, &ddi);
+- else
+- error = local_pci_probe(&ddi);
+- cpu_hotplug_enable();
+- } else
++
++ if (cpu < nr_cpu_ids)
++ error = work_on_cpu(cpu, local_pci_probe, &ddi);
++ else
+ error = local_pci_probe(&ddi);
+
++ dev->is_probed = 0;
++ cpu_hotplug_enable();
+ return error;
+ }
+
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 33c2b0b77429..5026f2ae86db 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -371,6 +371,7 @@ struct pci_dev {
+ unsigned int irq_managed:1;
+ unsigned int has_secondary_link:1;
+ unsigned int non_compliant_bars:1; /* broken BARs; ignore them */
++ unsigned int is_probed:1; /* device probing in progress */
+ pci_dev_flags_t dev_flags;
+ atomic_t enable_cnt; /* pci_enable_device has been called */
+
+--
+2.11.0
+
diff --git a/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch b/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
new file mode 100644
index 000000000000..b68af8458c1e
--- /dev/null
+++ b/patches/0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
@@ -0,0 +1,69 @@
+From fdaf0a51bad496289356d11d796095a293794b5f Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:33 +0200
+Subject: [PATCH 22/32] ACPI/processor: Use cpu_hotplug_disable() instead of
+ get_online_cpus()
+
+Converting the hotplug locking, i.e. get_online_cpus(), to a percpu rwsem
+unearthed a circular lock dependency which was hidden from lockdep due to
+the lockdep annotation of get_online_cpus() which prevents lockdep from
+creating full dependency chains.
+
+CPU0 CPU1
+---- ----
+lock((&wfc.work));
+ lock(cpu_hotplug_lock.rw_sem);
+ lock((&wfc.work));
+lock(cpu_hotplug_lock.rw_sem);
+
+This dependency is established via acpi_processor_start() which calls into
+the work queue code. And the work queue code establishes the reverse
+dependency.
+
+This is not a problem of get_online_cpus() recursion, it's a possible
+deadlock undetected by lockdep so far.
+
+The cure is to use cpu_hotplug_disable() instead of get_online_cpus() to
+protect the probing from acpi_processor_start().
+
+There is a side effect to this: cpu_hotplug_disable() makes a concurrent
+cpu hotplug attempt via the sysfs interfaces fail with -EBUSY, but that
+probing usually happens during the boot process where no interaction is
+possible. Any later invocations are infrequent enough and concurrent
+hotplug attempts are so unlikely that the danger of user space visible
+regressions is very close to zero. Anyway, thats preferrable over a real
+deadlock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-acpi@vger.kernel.org
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170524081548.851588594@linutronix.de
+---
+ drivers/acpi/processor_driver.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
+index 8697a82bd465..591d1dd3f04e 100644
+--- a/drivers/acpi/processor_driver.c
++++ b/drivers/acpi/processor_driver.c
+@@ -268,9 +268,9 @@ static int acpi_processor_start(struct device *dev)
+ return -ENODEV;
+
+ /* Protect against concurrent CPU hotplug operations */
+- get_online_cpus();
++ cpu_hotplug_disable();
+ ret = __acpi_processor_start(device);
+- put_online_cpus();
++ cpu_hotplug_enable();
+ return ret;
+ }
+
+--
+2.11.0
+
diff --git a/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch b/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
new file mode 100644
index 000000000000..1485ee274f74
--- /dev/null
+++ b/patches/0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
@@ -0,0 +1,309 @@
+From a63fbed776c7124ce9f606234267c3c095b2680e Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:34 +0200
+Subject: [PATCH 23/32] perf/tracing/cpuhotplug: Fix locking order
+
+perf, tracing, kprobes and jump_labels have a gazillion of ways to create
+dependency lock chains. Some of those involve nested invocations of
+get_online_cpus().
+
+The conversion of the hotplug locking to a percpu rwsem requires to avoid
+such nested calls. sys_perf_event_open() protects most of the syscall logic
+against cpu hotplug. This causes nested calls and lock inversions versus
+ftrace and kprobes in various interesting ways.
+
+It's impossible to move the hotplug locking to the outer end of all call
+chains in the involved facilities, so the hotplug protection in
+sys_perf_event_open() needs to be solved differently.
+
+Introduce 'pmus_mutex' which protects a perf private online cpumask. This
+mutex is taken when the mask is updated in the cpu hotplug callbacks and
+can be taken in sys_perf_event_open() to protect the swhash setup/teardown
+code and when the final judgement about a valid event has to be made.
+
+[ tglx: Produced changelog and fixed the swhash interaction ]
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Link: http://lkml.kernel.org/r/20170524081548.930941109@linutronix.de
+---
+ include/linux/perf_event.h | 2 +
+ kernel/events/core.c | 106 ++++++++++++++++++++++++++++++++-------------
+ 2 files changed, 78 insertions(+), 30 deletions(-)
+
+diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
+index 24a635887f28..7d6aa29094b2 100644
+--- a/include/linux/perf_event.h
++++ b/include/linux/perf_event.h
+@@ -801,6 +801,8 @@ struct perf_cpu_context {
+
+ struct list_head sched_cb_entry;
+ int sched_cb_usage;
++
++ int online;
+ };
+
+ struct perf_output_handle {
+diff --git a/kernel/events/core.c b/kernel/events/core.c
+index 6e75a5c9412d..b97cda4d1777 100644
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -389,6 +389,7 @@ static atomic_t nr_switch_events __read_mostly;
+ static LIST_HEAD(pmus);
+ static DEFINE_MUTEX(pmus_lock);
+ static struct srcu_struct pmus_srcu;
++static cpumask_var_t perf_online_mask;
+
+ /*
+ * perf event paranoia level:
+@@ -3812,14 +3813,6 @@ find_get_context(struct pmu *pmu, struct task_struct *task,
+ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EACCES);
+
+- /*
+- * We could be clever and allow to attach a event to an
+- * offline CPU and activate it when the CPU comes up, but
+- * that's for later.
+- */
+- if (!cpu_online(cpu))
+- return ERR_PTR(-ENODEV);
+-
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+ ctx = &cpuctx->ctx;
+ get_ctx(ctx);
+@@ -7703,7 +7696,8 @@ static int swevent_hlist_get_cpu(int cpu)
+ int err = 0;
+
+ mutex_lock(&swhash->hlist_mutex);
+- if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
++ if (!swevent_hlist_deref(swhash) &&
++ cpumask_test_cpu(cpu, perf_online_mask)) {
+ struct swevent_hlist *hlist;
+
+ hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
+@@ -7724,7 +7718,7 @@ static int swevent_hlist_get(void)
+ {
+ int err, cpu, failed_cpu;
+
+- get_online_cpus();
++ mutex_lock(&pmus_lock);
+ for_each_possible_cpu(cpu) {
+ err = swevent_hlist_get_cpu(cpu);
+ if (err) {
+@@ -7732,8 +7726,7 @@ static int swevent_hlist_get(void)
+ goto fail;
+ }
+ }
+- put_online_cpus();
+-
++ mutex_unlock(&pmus_lock);
+ return 0;
+ fail:
+ for_each_possible_cpu(cpu) {
+@@ -7741,8 +7734,7 @@ static int swevent_hlist_get(void)
+ break;
+ swevent_hlist_put_cpu(cpu);
+ }
+-
+- put_online_cpus();
++ mutex_unlock(&pmus_lock);
+ return err;
+ }
+
+@@ -8920,7 +8912,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+ pmu->hrtimer_interval_ms = timer;
+
+ /* update all cpuctx for this PMU */
+- get_online_cpus();
++ cpus_read_lock();
+ for_each_online_cpu(cpu) {
+ struct perf_cpu_context *cpuctx;
+ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+@@ -8929,7 +8921,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
+ cpu_function_call(cpu,
+ (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
+ }
+- put_online_cpus();
++ cpus_read_unlock();
+ mutex_unlock(&mux_interval_mutex);
+
+ return count;
+@@ -9059,6 +9051,7 @@ int perf_pmu_register(struct pmu *pmu, const char *name, int type)
+ lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
+ lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
+ cpuctx->ctx.pmu = pmu;
++ cpuctx->online = cpumask_test_cpu(cpu, perf_online_mask);
+
+ __perf_mux_hrtimer_init(cpuctx, cpu);
+ }
+@@ -9882,12 +9875,10 @@ SYSCALL_DEFINE5(perf_event_open,
+ goto err_task;
+ }
+
+- get_online_cpus();
+-
+ if (task) {
+ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+ if (err)
+- goto err_cpus;
++ goto err_cred;
+
+ /*
+ * Reuse ptrace permission checks for now.
+@@ -10073,6 +10064,23 @@ SYSCALL_DEFINE5(perf_event_open,
+ goto err_locked;
+ }
+
++ if (!task) {
++ /*
++ * Check if the @cpu we're creating an event for is online.
++ *
++ * We use the perf_cpu_context::ctx::mutex to serialize against
++ * the hotplug notifiers. See perf_event_{init,exit}_cpu().
++ */
++ struct perf_cpu_context *cpuctx =
++ container_of(ctx, struct perf_cpu_context, ctx);
++
++ if (!cpuctx->online) {
++ err = -ENODEV;
++ goto err_locked;
++ }
++ }
++
++
+ /*
+ * Must be under the same ctx::mutex as perf_install_in_context(),
+ * because we need to serialize with concurrent event creation.
+@@ -10162,8 +10170,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ put_task_struct(task);
+ }
+
+- put_online_cpus();
+-
+ mutex_lock(&current->perf_event_mutex);
+ list_add_tail(&event->owner_entry, &current->perf_event_list);
+ mutex_unlock(&current->perf_event_mutex);
+@@ -10197,8 +10203,6 @@ SYSCALL_DEFINE5(perf_event_open,
+ err_cred:
+ if (task)
+ mutex_unlock(&task->signal->cred_guard_mutex);
+-err_cpus:
+- put_online_cpus();
+ err_task:
+ if (task)
+ put_task_struct(task);
+@@ -10253,6 +10257,21 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
+ goto err_unlock;
+ }
+
++ if (!task) {
++ /*
++ * Check if the @cpu we're creating an event for is online.
++ *
++ * We use the perf_cpu_context::ctx::mutex to serialize against
++ * the hotplug notifiers. See perf_event_{init,exit}_cpu().
++ */
++ struct perf_cpu_context *cpuctx =
++ container_of(ctx, struct perf_cpu_context, ctx);
++ if (!cpuctx->online) {
++ err = -ENODEV;
++ goto err_unlock;
++ }
++ }
++
+ if (!exclusive_event_installable(event, ctx)) {
+ err = -EBUSY;
+ goto err_unlock;
+@@ -10920,6 +10939,8 @@ static void __init perf_event_init_all_cpus(void)
+ struct swevent_htable *swhash;
+ int cpu;
+
++ zalloc_cpumask_var(&perf_online_mask, GFP_KERNEL);
++
+ for_each_possible_cpu(cpu) {
+ swhash = &per_cpu(swevent_htable, cpu);
+ mutex_init(&swhash->hlist_mutex);
+@@ -10935,7 +10956,7 @@ static void __init perf_event_init_all_cpus(void)
+ }
+ }
+
+-int perf_event_init_cpu(unsigned int cpu)
++void perf_swevent_init_cpu(unsigned int cpu)
+ {
+ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+
+@@ -10948,7 +10969,6 @@ int perf_event_init_cpu(unsigned int cpu)
+ rcu_assign_pointer(swhash->swevent_hlist, hlist);
+ }
+ mutex_unlock(&swhash->hlist_mutex);
+- return 0;
+ }
+
+ #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE
+@@ -10966,19 +10986,22 @@ static void __perf_event_exit_context(void *__info)
+
+ static void perf_event_exit_cpu_context(int cpu)
+ {
++ struct perf_cpu_context *cpuctx;
+ struct perf_event_context *ctx;
+ struct pmu *pmu;
+- int idx;
+
+- idx = srcu_read_lock(&pmus_srcu);
+- list_for_each_entry_rcu(pmu, &pmus, entry) {
+- ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
++ mutex_lock(&pmus_lock);
++ list_for_each_entry(pmu, &pmus, entry) {
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++ ctx = &cpuctx->ctx;
+
+ mutex_lock(&ctx->mutex);
+ smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
++ cpuctx->online = 0;
+ mutex_unlock(&ctx->mutex);
+ }
+- srcu_read_unlock(&pmus_srcu, idx);
++ cpumask_clear_cpu(cpu, perf_online_mask);
++ mutex_unlock(&pmus_lock);
+ }
+ #else
+
+@@ -10986,6 +11009,29 @@ static void perf_event_exit_cpu_context(int cpu) { }
+
+ #endif
+
++int perf_event_init_cpu(unsigned int cpu)
++{
++ struct perf_cpu_context *cpuctx;
++ struct perf_event_context *ctx;
++ struct pmu *pmu;
++
++ perf_swevent_init_cpu(cpu);
++
++ mutex_lock(&pmus_lock);
++ cpumask_set_cpu(cpu, perf_online_mask);
++ list_for_each_entry(pmu, &pmus, entry) {
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++ ctx = &cpuctx->ctx;
++
++ mutex_lock(&ctx->mutex);
++ cpuctx->online = 1;
++ mutex_unlock(&ctx->mutex);
++ }
++ mutex_unlock(&pmus_lock);
++
++ return 0;
++}
++
+ int perf_event_exit_cpu(unsigned int cpu)
+ {
+ perf_event_exit_cpu_context(cpu);
+--
+2.11.0
+
diff --git a/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch b/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
new file mode 100644
index 000000000000..4031d4091409
--- /dev/null
+++ b/patches/0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
@@ -0,0 +1,221 @@
+From f2545b2d4ce13e068897ef60ae64dffe215f4152 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:35 +0200
+Subject: [PATCH 24/32] jump_label: Reorder hotplug lock and jump_label_lock
+
+The conversion of the hotplug locking to a percpu rwsem unearthed lock
+ordering issues all over the place.
+
+The jump_label code has two issues:
+
+ 1) Nested get_online_cpus() invocations
+
+ 2) Ordering problems vs. the cpus rwsem and the jump_label_mutex
+
+To cure these, the following lock order has been established;
+
+ cpus_rwsem -> jump_label_lock -> text_mutex
+
+Even if not all architectures need protection against CPU hotplug, taking
+cpus_rwsem before jump_label_lock is now mandatory in code pathes which
+actually modify code and therefor need text_mutex protection.
+
+Move the get_online_cpus() invocations into the core jump label code and
+establish the proper lock order where required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: "David S. Miller" <davem@davemloft.net>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Chris Metcalf <cmetcalf@mellanox.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Jason Baron <jbaron@akamai.com>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Link: http://lkml.kernel.org/r/20170524081549.025830817@linutronix.de
+---
+ arch/mips/kernel/jump_label.c | 2 --
+ arch/sparc/kernel/jump_label.c | 2 --
+ arch/tile/kernel/jump_label.c | 2 --
+ arch/x86/kernel/jump_label.c | 2 --
+ kernel/jump_label.c | 20 ++++++++++++++------
+ 5 files changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
+index 3e586daa3a32..32e3168316cd 100644
+--- a/arch/mips/kernel/jump_label.c
++++ b/arch/mips/kernel/jump_label.c
+@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+ insn.word = 0; /* nop */
+ }
+
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+ insn_p->halfword[0] = insn.word >> 16;
+@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct jump_entry *e,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ #endif /* HAVE_JUMP_LABEL */
+diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c
+index 07933b9e9ce0..93adde1ac166 100644
+--- a/arch/sparc/kernel/jump_label.c
++++ b/arch/sparc/kernel/jump_label.c
+@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ val = 0x01000000;
+ }
+
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ *insn = val;
+ flushi(insn);
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ #endif
+diff --git a/arch/tile/kernel/jump_label.c b/arch/tile/kernel/jump_label.c
+index 07802d586988..93931a46625b 100644
+--- a/arch/tile/kernel/jump_label.c
++++ b/arch/tile/kernel/jump_label.c
+@@ -45,14 +45,12 @@ static void __jump_label_transform(struct jump_entry *e,
+ void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+ {
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+
+ __jump_label_transform(e, type);
+ flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
+
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ __init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
+diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
+index c37bd0f39c70..ab4f491da2a9 100644
+--- a/arch/x86/kernel/jump_label.c
++++ b/arch/x86/kernel/jump_label.c
+@@ -105,11 +105,9 @@ static void __jump_label_transform(struct jump_entry *entry,
+ void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+ {
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ __jump_label_transform(entry, type, NULL, 0);
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ static enum {
+diff --git a/kernel/jump_label.c b/kernel/jump_label.c
+index 6c9cb208ac48..d11c506a6ac3 100644
+--- a/kernel/jump_label.c
++++ b/kernel/jump_label.c
+@@ -15,6 +15,7 @@
+ #include <linux/static_key.h>
+ #include <linux/jump_label_ratelimit.h>
+ #include <linux/bug.h>
++#include <linux/cpu.h>
+
+ #ifdef HAVE_JUMP_LABEL
+
+@@ -124,6 +125,7 @@ void static_key_slow_inc(struct static_key *key)
+ return;
+ }
+
++ cpus_read_lock();
+ jump_label_lock();
+ if (atomic_read(&key->enabled) == 0) {
+ atomic_set(&key->enabled, -1);
+@@ -133,12 +135,14 @@ void static_key_slow_inc(struct static_key *key)
+ atomic_inc(&key->enabled);
+ }
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+ EXPORT_SYMBOL_GPL(static_key_slow_inc);
+
+ static void __static_key_slow_dec(struct static_key *key,
+ unsigned long rate_limit, struct delayed_work *work)
+ {
++ cpus_read_lock();
+ /*
+ * The negative count check is valid even when a negative
+ * key->enabled is in use by static_key_slow_inc(); a
+@@ -149,6 +153,7 @@ static void __static_key_slow_dec(struct static_key *key,
+ if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
+ WARN(atomic_read(&key->enabled) < 0,
+ "jump label: negative count!\n");
++ cpus_read_unlock();
+ return;
+ }
+
+@@ -159,6 +164,7 @@ static void __static_key_slow_dec(struct static_key *key,
+ jump_label_update(key);
+ }
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+
+ static void jump_label_update_timeout(struct work_struct *work)
+@@ -334,6 +340,7 @@ void __init jump_label_init(void)
+ if (static_key_initialized)
+ return;
+
++ cpus_read_lock();
+ jump_label_lock();
+ jump_label_sort_entries(iter_start, iter_stop);
+
+@@ -353,6 +360,7 @@ void __init jump_label_init(void)
+ }
+ static_key_initialized = true;
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+
+ #ifdef CONFIG_MODULES
+@@ -590,28 +598,28 @@ jump_label_module_notify(struct notifier_block *self, unsigned long val,
+ struct module *mod = data;
+ int ret = 0;
+
++ cpus_read_lock();
++ jump_label_lock();
++
+ switch (val) {
+ case MODULE_STATE_COMING:
+- jump_label_lock();
+ ret = jump_label_add_module(mod);
+ if (ret) {
+ WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
+ jump_label_del_module(mod);
+ }
+- jump_label_unlock();
+ break;
+ case MODULE_STATE_GOING:
+- jump_label_lock();
+ jump_label_del_module(mod);
+- jump_label_unlock();
+ break;
+ case MODULE_STATE_LIVE:
+- jump_label_lock();
+ jump_label_invalidate_module_init(mod);
+- jump_label_unlock();
+ break;
+ }
+
++ jump_label_unlock();
++ cpus_read_unlock();
++
+ return notifier_from_errno(ret);
+ }
+
+--
+2.11.0
+
diff --git a/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch b/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
new file mode 100644
index 000000000000..19126208a19c
--- /dev/null
+++ b/patches/0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
@@ -0,0 +1,264 @@
+From 2d1e38f56622b9bb5af85be63c1052c056f5c677 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:36 +0200
+Subject: [PATCH 25/32] kprobes: Cure hotplug lock ordering issues
+
+Converting the cpu hotplug locking to a percpu rwsem unearthed hidden lock
+ordering problems.
+
+There is a wide range of locks involved in this: kprobe_mutex,
+jump_label_mutex, ftrace_lock, text_mutex, event_mutex, module_mutex,
+func_hash->regex_lock and a gazillion of lock order permutations with
+nested get_online_cpus() calls.
+
+Some of those permutations are potential deadlocks even with the current
+nesting hotplug locking scheme, but they can't be discovered by lockdep.
+
+The conversion of the hotplug locking to a percpu rwsem requires to prevent
+nested locking, so it's required to take the hotplug rwsem early in the
+call chain and establish a proper lock order.
+
+After quite some analysis and going down the wrong road severa times the
+following lock order has been chosen:
+
+kprobe_mutex -> cpus_rwsem -> jump_label_mutex -> text_mutex
+
+For kprobes which hook on an ftrace function trace point, it's required to
+drop cpus_rwsem before calling into the ftrace code to avoid a deadlock on
+the func_hash->regex_lock.
+
+[ Steven: Ftrace interaction fixes ]
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Link: http://lkml.kernel.org/r/20170524081549.104864779@linutronix.de
+---
+ kernel/kprobes.c | 59 ++++++++++++++++++++++++++++++--------------------------
+ 1 file changed, 32 insertions(+), 27 deletions(-)
+
+diff --git a/kernel/kprobes.c b/kernel/kprobes.c
+index 2d2d3a568e4e..9f6056749a28 100644
+--- a/kernel/kprobes.c
++++ b/kernel/kprobes.c
+@@ -483,11 +483,6 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
+ */
+ static void do_optimize_kprobes(void)
+ {
+- /* Optimization never be done when disarmed */
+- if (kprobes_all_disarmed || !kprobes_allow_optimization ||
+- list_empty(&optimizing_list))
+- return;
+-
+ /*
+ * The optimization/unoptimization refers online_cpus via
+ * stop_machine() and cpu-hotplug modifies online_cpus.
+@@ -495,14 +490,19 @@ static void do_optimize_kprobes(void)
+ * This combination can cause a deadlock (cpu-hotplug try to lock
+ * text_mutex but stop_machine can not be done because online_cpus
+ * has been changed)
+- * To avoid this deadlock, we need to call get_online_cpus()
++ * To avoid this deadlock, caller must have locked cpu hotplug
+ * for preventing cpu-hotplug outside of text_mutex locking.
+ */
+- get_online_cpus();
++ lockdep_assert_cpus_held();
++
++ /* Optimization never be done when disarmed */
++ if (kprobes_all_disarmed || !kprobes_allow_optimization ||
++ list_empty(&optimizing_list))
++ return;
++
+ mutex_lock(&text_mutex);
+ arch_optimize_kprobes(&optimizing_list);
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ /*
+@@ -513,12 +513,13 @@ static void do_unoptimize_kprobes(void)
+ {
+ struct optimized_kprobe *op, *tmp;
+
++ /* See comment in do_optimize_kprobes() */
++ lockdep_assert_cpus_held();
++
+ /* Unoptimization must be done anytime */
+ if (list_empty(&unoptimizing_list))
+ return;
+
+- /* Ditto to do_optimize_kprobes */
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+ arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
+ /* Loop free_list for disarming */
+@@ -537,7 +538,6 @@ static void do_unoptimize_kprobes(void)
+ list_del_init(&op->list);
+ }
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ }
+
+ /* Reclaim all kprobes on the free_list */
+@@ -562,6 +562,7 @@ static void kick_kprobe_optimizer(void)
+ static void kprobe_optimizer(struct work_struct *work)
+ {
+ mutex_lock(&kprobe_mutex);
++ cpus_read_lock();
+ /* Lock modules while optimizing kprobes */
+ mutex_lock(&module_mutex);
+
+@@ -587,6 +588,7 @@ static void kprobe_optimizer(struct work_struct *work)
+ do_free_cleaned_kprobes();
+
+ mutex_unlock(&module_mutex);
++ cpus_read_unlock();
+ mutex_unlock(&kprobe_mutex);
+
+ /* Step 5: Kick optimizer again if needed */
+@@ -650,9 +652,8 @@ static void optimize_kprobe(struct kprobe *p)
+ /* Short cut to direct unoptimizing */
+ static void force_unoptimize_kprobe(struct optimized_kprobe *op)
+ {
+- get_online_cpus();
++ lockdep_assert_cpus_held();
+ arch_unoptimize_kprobe(op);
+- put_online_cpus();
+ if (kprobe_disabled(&op->kp))
+ arch_disarm_kprobe(&op->kp);
+ }
+@@ -791,6 +792,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
+ return;
+
+ /* For preparing optimization, jump_label_text_reserved() is called */
++ cpus_read_lock();
+ jump_label_lock();
+ mutex_lock(&text_mutex);
+
+@@ -812,6 +814,7 @@ static void try_to_optimize_kprobe(struct kprobe *p)
+ out:
+ mutex_unlock(&text_mutex);
+ jump_label_unlock();
++ cpus_read_unlock();
+ }
+
+ #ifdef CONFIG_SYSCTL
+@@ -826,6 +829,7 @@ static void optimize_all_kprobes(void)
+ if (kprobes_allow_optimization)
+ goto out;
+
++ cpus_read_lock();
+ kprobes_allow_optimization = true;
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+@@ -833,6 +837,7 @@ static void optimize_all_kprobes(void)
+ if (!kprobe_disabled(p))
+ optimize_kprobe(p);
+ }
++ cpus_read_unlock();
+ printk(KERN_INFO "Kprobes globally optimized\n");
+ out:
+ mutex_unlock(&kprobe_mutex);
+@@ -851,6 +856,7 @@ static void unoptimize_all_kprobes(void)
+ return;
+ }
+
++ cpus_read_lock();
+ kprobes_allow_optimization = false;
+ for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
+ head = &kprobe_table[i];
+@@ -859,6 +865,7 @@ static void unoptimize_all_kprobes(void)
+ unoptimize_kprobe(p, false);
+ }
+ }
++ cpus_read_unlock();
+ mutex_unlock(&kprobe_mutex);
+
+ /* Wait for unoptimizing completion */
+@@ -1010,14 +1017,11 @@ static void arm_kprobe(struct kprobe *kp)
+ arm_kprobe_ftrace(kp);
+ return;
+ }
+- /*
+- * Here, since __arm_kprobe() doesn't use stop_machine(),
+- * this doesn't cause deadlock on text_mutex. So, we don't
+- * need get_online_cpus().
+- */
++ cpus_read_lock();
+ mutex_lock(&text_mutex);
+ __arm_kprobe(kp);
+ mutex_unlock(&text_mutex);
++ cpus_read_unlock();
+ }
+
+ /* Disarm a kprobe with text_mutex */
+@@ -1027,10 +1031,12 @@ static void disarm_kprobe(struct kprobe *kp, bool reopt)
+ disarm_kprobe_ftrace(kp);
+ return;
+ }
+- /* Ditto */
++
++ cpus_read_lock();
+ mutex_lock(&text_mutex);
+ __disarm_kprobe(kp, reopt);
+ mutex_unlock(&text_mutex);
++ cpus_read_unlock();
+ }
+
+ /*
+@@ -1298,13 +1304,10 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
+ int ret = 0;
+ struct kprobe *ap = orig_p;
+
++ cpus_read_lock();
++
+ /* For preparing optimization, jump_label_text_reserved() is called */
+ jump_label_lock();
+- /*
+- * Get online CPUs to avoid text_mutex deadlock.with stop machine,
+- * which is invoked by unoptimize_kprobe() in add_new_kprobe()
+- */
+- get_online_cpus();
+ mutex_lock(&text_mutex);
+
+ if (!kprobe_aggrprobe(orig_p)) {
+@@ -1352,8 +1355,8 @@ static int register_aggr_kprobe(struct kprobe *orig_p, struct kprobe *p)
+
+ out:
+ mutex_unlock(&text_mutex);
+- put_online_cpus();
+ jump_label_unlock();
++ cpus_read_unlock();
+
+ if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
+ ap->flags &= ~KPROBE_FLAG_DISABLED;
+@@ -1555,9 +1558,12 @@ int register_kprobe(struct kprobe *p)
+ goto out;
+ }
+
+- mutex_lock(&text_mutex); /* Avoiding text modification */
++ cpus_read_lock();
++ /* Prevent text modification */
++ mutex_lock(&text_mutex);
+ ret = prepare_kprobe(p);
+ mutex_unlock(&text_mutex);
++ cpus_read_unlock();
+ if (ret)
+ goto out;
+
+@@ -1570,7 +1576,6 @@ int register_kprobe(struct kprobe *p)
+
+ /* Try to optimize kprobe */
+ try_to_optimize_kprobe(p);
+-
+ out:
+ mutex_unlock(&kprobe_mutex);
+
+--
+2.11.0
+
diff --git a/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch b/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
new file mode 100644
index 000000000000..7f75a4abaa0b
--- /dev/null
+++ b/patches/0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
@@ -0,0 +1,66 @@
+From c23a465625e287c4deba0fdf5e8adc59cfd2a0b7 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:37 +0200
+Subject: [PATCH 26/32] arm64: Prevent cpu hotplug rwsem recursion
+
+The text patching functions which are invoked from jump_label and kprobes
+code are protected against cpu hotplug at the call sites.
+
+Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug
+rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any
+unprotected callers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Will Deacon <will.deacon@arm.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170524081549.197070135@linutronix.de
+---
+ arch/arm64/include/asm/insn.h | 1 -
+ arch/arm64/kernel/insn.c | 5 +++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h
+index 29cb2ca756f6..4214c38d016b 100644
+--- a/arch/arm64/include/asm/insn.h
++++ b/arch/arm64/include/asm/insn.h
+@@ -433,7 +433,6 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset);
+ bool aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn);
+
+ int aarch64_insn_patch_text_nosync(void *addr, u32 insn);
+-int aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt);
+ int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt);
+
+ s32 aarch64_insn_adrp_get_offset(u32 insn);
+diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
+index b884a926a632..cd872133e88e 100644
+--- a/arch/arm64/kernel/insn.c
++++ b/arch/arm64/kernel/insn.c
+@@ -255,6 +255,7 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
+ return ret;
+ }
+
++static
+ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+ {
+ struct aarch64_insn_patch patch = {
+@@ -267,8 +268,8 @@ int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
+ if (cnt <= 0)
+ return -EINVAL;
+
+- return stop_machine(aarch64_insn_patch_text_cb, &patch,
+- cpu_online_mask);
++ return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
++ cpu_online_mask);
+ }
+
+ int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
+--
+2.11.0
+
diff --git a/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch b/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch
new file mode 100644
index 000000000000..0e61074ae428
--- /dev/null
+++ b/patches/0027-arm-Prevent-hotplug-rwsem-recursion.patch
@@ -0,0 +1,54 @@
+From 9489cc8f370be811f7e741a772bcce88b712272d Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:38 +0200
+Subject: [PATCH 27/32] arm: Prevent hotplug rwsem recursion
+
+The text patching functions which are invoked from jump_label and kprobes
+code are protected against cpu hotplug at the call sites.
+
+Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug
+rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any
+unprotected callers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: linux-arm-kernel@lists.infradead.org
+Link: http://lkml.kernel.org/r/20170524081549.275871311@linutronix.de
+---
+ arch/arm/kernel/patch.c | 2 +-
+ arch/arm/probes/kprobes/core.c | 3 ++-
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
+index 020560b2dcb7..a1a34722c655 100644
+--- a/arch/arm/kernel/patch.c
++++ b/arch/arm/kernel/patch.c
+@@ -124,5 +124,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
+ .insn = insn,
+ };
+
+- stop_machine(patch_text_stop_machine, &patch, NULL);
++ stop_machine_cpuslocked(patch_text_stop_machine, &patch, NULL);
+ }
+diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
+index ad1f4e6a9e33..52d1cd14fda4 100644
+--- a/arch/arm/probes/kprobes/core.c
++++ b/arch/arm/probes/kprobes/core.c
+@@ -182,7 +182,8 @@ void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
+ .addr = addr,
+ .insn = insn,
+ };
+- stop_machine(__kprobes_remove_breakpoint, &p, cpu_online_mask);
++ stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
++ cpu_online_mask);
+ }
+
+ void __kprobes arch_disarm_kprobe(struct kprobe *p)
+--
+2.11.0
+
diff --git a/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch b/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch
new file mode 100644
index 000000000000..ec94de3bbaec
--- /dev/null
+++ b/patches/0028-s390-Prevent-hotplug-rwsem-recursion.patch
@@ -0,0 +1,65 @@
+From 5d5dbc4ef27e72104dea6102e4d1a1bf5a8ed971 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:39 +0200
+Subject: [PATCH 28/32] s390: Prevent hotplug rwsem recursion
+
+The text patching functions which are invoked from jump_label and kprobes
+code are protected against cpu hotplug at the call sites.
+
+Use stop_machine_cpuslocked() to avoid recursion on the cpu hotplug
+rwsem. stop_machine_cpuslocked() contains a lockdep assertion to catch any
+unprotected callers.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Acked-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: linux-s390@vger.kernel.org
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Link: http://lkml.kernel.org/r/20170524081549.354513406@linutronix.de
+---
+ arch/s390/kernel/jump_label.c | 2 +-
+ arch/s390/kernel/kprobes.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/s390/kernel/jump_label.c b/arch/s390/kernel/jump_label.c
+index 6aa630a8d24f..262506cee4c3 100644
+--- a/arch/s390/kernel/jump_label.c
++++ b/arch/s390/kernel/jump_label.c
+@@ -93,7 +93,7 @@ void arch_jump_label_transform(struct jump_entry *entry,
+ args.entry = entry;
+ args.type = type;
+
+- stop_machine(__sm_arch_jump_label_transform, &args, NULL);
++ stop_machine_cpuslocked(__sm_arch_jump_label_transform, &args, NULL);
+ }
+
+ void arch_jump_label_transform_static(struct jump_entry *entry,
+diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
+index 3d6a99746454..6842e4501e2e 100644
+--- a/arch/s390/kernel/kprobes.c
++++ b/arch/s390/kernel/kprobes.c
+@@ -196,7 +196,7 @@ void arch_arm_kprobe(struct kprobe *p)
+ {
+ struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
+
+- stop_machine(swap_instruction, &args, NULL);
++ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
+ NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+@@ -204,7 +204,7 @@ void arch_disarm_kprobe(struct kprobe *p)
+ {
+ struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
+
+- stop_machine(swap_instruction, &args, NULL);
++ stop_machine_cpuslocked(swap_instruction, &args, NULL);
+ }
+ NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+--
+2.11.0
+
diff --git a/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch b/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
new file mode 100644
index 000000000000..25e340a31c1f
--- /dev/null
+++ b/patches/0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
@@ -0,0 +1,193 @@
+From fc8dffd379ca5620664336eb895a426b42847558 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:40 +0200
+Subject: [PATCH 29/32] cpu/hotplug: Convert hotplug locking to percpu rwsem
+
+There are no more (known) nested calls to get_online_cpus() and all
+observed lock ordering problems have been addressed.
+
+Replace the magic nested 'rwsem' hackery with a percpu-rwsem.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081549.447014063@linutronix.de
+---
+ include/linux/cpu.h | 2 +-
+ kernel/cpu.c | 107 +++++++---------------------------------------------
+ 2 files changed, 14 insertions(+), 95 deletions(-)
+
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index af4d660798e5..ca73bc1563f4 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -103,7 +103,7 @@ extern void cpus_write_lock(void);
+ extern void cpus_write_unlock(void);
+ extern void cpus_read_lock(void);
+ extern void cpus_read_unlock(void);
+-static inline void lockdep_assert_cpus_held(void) { }
++extern void lockdep_assert_cpus_held(void);
+ extern void cpu_hotplug_disable(void);
+ extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 142d889d9f69..66836216ebae 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -27,6 +27,7 @@
+ #include <linux/smpboot.h>
+ #include <linux/relay.h>
+ #include <linux/slab.h>
++#include <linux/percpu-rwsem.h>
+
+ #include <trace/events/power.h>
+ #define CREATE_TRACE_POINTS
+@@ -196,121 +197,41 @@ void cpu_maps_update_done(void)
+ mutex_unlock(&cpu_add_remove_lock);
+ }
+
+-/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
++/*
++ * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
+ * Should always be manipulated under cpu_add_remove_lock
+ */
+ static int cpu_hotplug_disabled;
+
+ #ifdef CONFIG_HOTPLUG_CPU
+
+-static struct {
+- struct task_struct *active_writer;
+- /* wait queue to wake up the active_writer */
+- wait_queue_head_t wq;
+- /* verifies that no writer will get active while readers are active */
+- struct mutex lock;
+- /*
+- * Also blocks the new readers during
+- * an ongoing cpu hotplug operation.
+- */
+- atomic_t refcount;
+-
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- struct lockdep_map dep_map;
+-#endif
+-} cpu_hotplug = {
+- .active_writer = NULL,
+- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
+- .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
+-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+- .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
+-#endif
+-};
+-
+-/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
+-#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
+-#define cpuhp_lock_acquire_tryread() \
+- lock_map_acquire_tryread(&cpu_hotplug.dep_map)
+-#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
+-#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
+-
++DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
+
+ void cpus_read_lock(void)
+ {
+- might_sleep();
+- if (cpu_hotplug.active_writer == current)
+- return;
+- cpuhp_lock_acquire_read();
+- mutex_lock(&cpu_hotplug.lock);
+- atomic_inc(&cpu_hotplug.refcount);
+- mutex_unlock(&cpu_hotplug.lock);
++ percpu_down_read(&cpu_hotplug_lock);
+ }
+ EXPORT_SYMBOL_GPL(cpus_read_lock);
+
+ void cpus_read_unlock(void)
+ {
+- int refcount;
+-
+- if (cpu_hotplug.active_writer == current)
+- return;
+-
+- refcount = atomic_dec_return(&cpu_hotplug.refcount);
+- if (WARN_ON(refcount < 0)) /* try to fix things up */
+- atomic_inc(&cpu_hotplug.refcount);
+-
+- if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
+- wake_up(&cpu_hotplug.wq);
+-
+- cpuhp_lock_release();
+-
++ percpu_up_read(&cpu_hotplug_lock);
+ }
+ EXPORT_SYMBOL_GPL(cpus_read_unlock);
+
+-/*
+- * This ensures that the hotplug operation can begin only when the
+- * refcount goes to zero.
+- *
+- * Note that during a cpu-hotplug operation, the new readers, if any,
+- * will be blocked by the cpu_hotplug.lock
+- *
+- * Since cpu_hotplug_begin() is always called after invoking
+- * cpu_maps_update_begin(), we can be sure that only one writer is active.
+- *
+- * Note that theoretically, there is a possibility of a livelock:
+- * - Refcount goes to zero, last reader wakes up the sleeping
+- * writer.
+- * - Last reader unlocks the cpu_hotplug.lock.
+- * - A new reader arrives at this moment, bumps up the refcount.
+- * - The writer acquires the cpu_hotplug.lock finds the refcount
+- * non zero and goes to sleep again.
+- *
+- * However, this is very difficult to achieve in practice since
+- * get_online_cpus() not an api which is called all that often.
+- *
+- */
+ void cpus_write_lock(void)
+ {
+- DEFINE_WAIT(wait);
+-
+- cpu_hotplug.active_writer = current;
+- cpuhp_lock_acquire();
+-
+- for (;;) {
+- mutex_lock(&cpu_hotplug.lock);
+- prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
+- if (likely(!atomic_read(&cpu_hotplug.refcount)))
+- break;
+- mutex_unlock(&cpu_hotplug.lock);
+- schedule();
+- }
+- finish_wait(&cpu_hotplug.wq, &wait);
++ percpu_down_write(&cpu_hotplug_lock);
+ }
+
+ void cpus_write_unlock(void)
+ {
+- cpu_hotplug.active_writer = NULL;
+- mutex_unlock(&cpu_hotplug.lock);
+- cpuhp_lock_release();
++ percpu_up_write(&cpu_hotplug_lock);
++}
++
++void lockdep_assert_cpus_held(void)
++{
++ percpu_rwsem_assert_held(&cpu_hotplug_lock);
+ }
+
+ /*
+@@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
+ EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+-/* Notifier wrappers for transitioning to state machine */
+-
+ static int bringup_wait_for_ap(unsigned int cpu)
+ {
+ struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
+--
+2.11.0
+
diff --git a/patches/0030-sched-Provide-is_percpu_thread-helper.patch b/patches/0030-sched-Provide-is_percpu_thread-helper.patch
new file mode 100644
index 000000000000..18aec16e0773
--- /dev/null
+++ b/patches/0030-sched-Provide-is_percpu_thread-helper.patch
@@ -0,0 +1,43 @@
+From 62ec05dd71b19f5be890a1992227cc7b2ac0adc4 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:41 +0200
+Subject: [PATCH 30/32] sched: Provide is_percpu_thread() helper
+
+Provide a helper function for checking whether current task is a per cpu
+thread.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081549.541649540@linutronix.de
+---
+ include/linux/sched.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/linux/sched.h b/include/linux/sched.h
+index 2b69fc650201..3dfa5f99d6ee 100644
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -1265,6 +1265,16 @@ extern struct pid *cad_pid;
+ #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+ #define used_math() tsk_used_math(current)
+
++static inline bool is_percpu_thread(void)
++{
++#ifdef CONFIG_SMP
++ return (current->flags & PF_NO_SETAFFINITY) &&
++ (current->nr_cpus_allowed == 1);
++#else
++ return true;
++#endif
++}
++
+ /* Per-process atomic flags. */
+ #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
+ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
+--
+2.11.0
+
diff --git a/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch b/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
new file mode 100644
index 000000000000..d665d1bb11cf
--- /dev/null
+++ b/patches/0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
@@ -0,0 +1,194 @@
+From 0266d81e9bf5cc1fe6405c0523dfa015fe55aae1 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:42 +0200
+Subject: [PATCH 31/32] acpi/processor: Prevent cpu hotplug deadlock
+
+With the enhanced CPU hotplug lockdep coverage the following lockdep splat
+happens:
+
+======================================================
+WARNING: possible circular locking dependency detected
+4.12.0-rc2+ #84 Tainted: G W
+------------------------------------------------------
+cpuhp/1/15 is trying to acquire lock:
+flush_work+0x39/0x2f0
+
+but task is already holding lock:
+cpuhp_thread_fun+0x30/0x160
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #2 (cpuhp_state){+.+.+.}:
+ lock_acquire+0xb4/0x200
+ cpuhp_kick_ap_work+0x72/0x330
+ _cpu_down+0x8b/0x100
+ do_cpu_down+0x3e/0x60
+ cpu_down+0x10/0x20
+ cpu_subsys_offline+0x14/0x20
+ device_offline+0x88/0xb0
+ online_store+0x4c/0xa0
+ dev_attr_store+0x18/0x30
+ sysfs_kf_write+0x45/0x60
+ kernfs_fop_write+0x156/0x1e0
+ __vfs_write+0x37/0x160
+ vfs_write+0xca/0x1c0
+ SyS_write+0x58/0xc0
+ entry_SYSCALL_64_fastpath+0x23/0xc2
+
+-> #1 (cpu_hotplug_lock.rw_sem){++++++}:
+ lock_acquire+0xb4/0x200
+ cpus_read_lock+0x3d/0xb0
+ apply_workqueue_attrs+0x17/0x50
+ __alloc_workqueue_key+0x1e1/0x530
+ scsi_host_alloc+0x373/0x480 [scsi_mod]
+ ata_scsi_add_hosts+0xcb/0x130 [libata]
+ ata_host_register+0x11a/0x2c0 [libata]
+ ata_host_activate+0xf0/0x150 [libata]
+ ahci_host_activate+0x13e/0x170 [libahci]
+ ahci_init_one+0xa3a/0xd3f [ahci]
+ local_pci_probe+0x45/0xa0
+ work_for_cpu_fn+0x14/0x20
+ process_one_work+0x1f9/0x690
+ worker_thread+0x200/0x3d0
+ kthread+0x138/0x170
+ ret_from_fork+0x31/0x40
+
+-> #0 ((&wfc.work)){+.+.+.}:
+ __lock_acquire+0x11e1/0x13e0
+ lock_acquire+0xb4/0x200
+ flush_work+0x5c/0x2f0
+ work_on_cpu+0xa1/0xd0
+ acpi_processor_get_throttling+0x3d/0x50
+ acpi_processor_reevaluate_tstate+0x2c/0x50
+ acpi_soft_cpu_online+0x69/0xd0
+ cpuhp_invoke_callback+0xb4/0x8b0
+ cpuhp_up_callbacks+0x36/0xc0
+ cpuhp_thread_fun+0x14e/0x160
+ smpboot_thread_fn+0x1e8/0x300
+ kthread+0x138/0x170
+ ret_from_fork+0x31/0x40
+
+other info that might help us debug this:
+
+Chain exists of:
+ (&wfc.work) --> cpu_hotplug_lock.rw_sem --> cpuhp_state
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(cpuhp_state);
+ lock(cpu_hotplug_lock.rw_sem);
+ lock(cpuhp_state);
+ lock((&wfc.work));
+
+ *** DEADLOCK ***
+
+1 lock held by cpuhp/1/15:
+cpuhp_thread_fun+0x30/0x160
+
+stack backtrace:
+CPU: 1 PID: 15 Comm: cpuhp/1 Tainted: G W 4.12.0-rc2+ #84
+Hardware name: Supermicro SYS-4048B-TR4FT/X10QBi, BIOS 1.1a 07/29/2015
+Call Trace:
+ dump_stack+0x85/0xc4
+ print_circular_bug+0x209/0x217
+ __lock_acquire+0x11e1/0x13e0
+ lock_acquire+0xb4/0x200
+ ? lock_acquire+0xb4/0x200
+ ? flush_work+0x39/0x2f0
+ ? acpi_processor_start+0x50/0x50
+ flush_work+0x5c/0x2f0
+ ? flush_work+0x39/0x2f0
+ ? acpi_processor_start+0x50/0x50
+ ? mark_held_locks+0x6d/0x90
+ ? queue_work_on+0x56/0x90
+ ? trace_hardirqs_on_caller+0x154/0x1c0
+ ? trace_hardirqs_on+0xd/0x10
+ ? acpi_processor_start+0x50/0x50
+ work_on_cpu+0xa1/0xd0
+ ? find_worker_executing_work+0x50/0x50
+ ? acpi_processor_power_exit+0x70/0x70
+ acpi_processor_get_throttling+0x3d/0x50
+ acpi_processor_reevaluate_tstate+0x2c/0x50
+ acpi_soft_cpu_online+0x69/0xd0
+ cpuhp_invoke_callback+0xb4/0x8b0
+ ? lock_acquire+0xb4/0x200
+ ? padata_replace+0x120/0x120
+ cpuhp_up_callbacks+0x36/0xc0
+ cpuhp_thread_fun+0x14e/0x160
+ smpboot_thread_fn+0x1e8/0x300
+ kthread+0x138/0x170
+ ? sort_range+0x30/0x30
+ ? kthread_create_on_node+0x70/0x70
+ ret_from_fork+0x31/0x40
+
+The problem is that the work is scheduled on the current CPU from the
+hotplug thread associated with that CPU.
+
+It's not required to invoke these functions via the workqueue because the
+hotplug thread runs on the target CPU already.
+
+Check whether current is a per cpu thread pinned on the target CPU and
+invoke the function directly to avoid the workqueue.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: linux-acpi@vger.kernel.org
+Cc: Len Brown <lenb@kernel.org>
+Link: http://lkml.kernel.org/r/20170524081549.620489733@linutronix.de
+---
+ drivers/acpi/processor_throttling.c | 16 ++++++++--------
+ 1 file changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
+index 3de34633f7f9..7f9aff4b8d62 100644
+--- a/drivers/acpi/processor_throttling.c
++++ b/drivers/acpi/processor_throttling.c
+@@ -909,6 +909,13 @@ static long __acpi_processor_get_throttling(void *data)
+ return pr->throttling.acpi_processor_get_throttling(pr);
+ }
+
++static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
++{
++ if (direct || (is_percpu_thread() && cpu == smp_processor_id()))
++ return fn(arg);
++ return work_on_cpu(cpu, fn, arg);
++}
++
+ static int acpi_processor_get_throttling(struct acpi_processor *pr)
+ {
+ if (!pr)
+@@ -926,7 +933,7 @@ static int acpi_processor_get_throttling(struct acpi_processor *pr)
+ if (!cpu_online(pr->id))
+ return -ENODEV;
+
+- return work_on_cpu(pr->id, __acpi_processor_get_throttling, pr);
++ return call_on_cpu(pr->id, __acpi_processor_get_throttling, pr, false);
+ }
+
+ static int acpi_processor_get_fadt_info(struct acpi_processor *pr)
+@@ -1076,13 +1083,6 @@ static long acpi_processor_throttling_fn(void *data)
+ arg->target_state, arg->force);
+ }
+
+-static int call_on_cpu(int cpu, long (*fn)(void *), void *arg, bool direct)
+-{
+- if (direct)
+- return fn(arg);
+- return work_on_cpu(cpu, fn, arg);
+-}
+-
+ static int __acpi_processor_set_throttling(struct acpi_processor *pr,
+ int state, bool force, bool direct)
+ {
+--
+2.11.0
+
diff --git a/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch b/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
new file mode 100644
index 000000000000..b86bdd88a22a
--- /dev/null
+++ b/patches/0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
@@ -0,0 +1,93 @@
+From 49dfe2a6779717d9c18395684ee31bdc98b22e53 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Wed, 24 May 2017 10:15:43 +0200
+Subject: [PATCH 32/32] cpuhotplug: Link lock stacks for hotplug callbacks
+
+The CPU hotplug callbacks are not covered by lockdep versus the cpu hotplug
+rwsem.
+
+CPU0 CPU1
+cpuhp_setup_state(STATE, startup, teardown);
+ cpus_read_lock();
+ invoke_callback_on_ap();
+ kick_hotplug_thread(ap);
+ wait_for_completion(); hotplug_thread_fn()
+ lock(m);
+ do_stuff();
+ unlock(m);
+
+Lockdep does not know about this dependency and will not trigger on the
+following code sequence:
+
+ lock(m);
+ cpus_read_lock();
+
+Add a lockdep map and connect the initiators lock chain with the hotplug
+thread lock chain, so potential deadlocks can be detected.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Acked-by: Ingo Molnar <mingo@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Sebastian Siewior <bigeasy@linutronix.de>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Link: http://lkml.kernel.org/r/20170524081549.709375845@linutronix.de
+---
+ kernel/cpu.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/kernel/cpu.c b/kernel/cpu.c
+index 66836216ebae..7435ffc6163b 100644
+--- a/kernel/cpu.c
++++ b/kernel/cpu.c
+@@ -66,6 +66,12 @@ struct cpuhp_cpu_state {
+
+ static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+
++#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
++static struct lock_class_key cpuhp_state_key;
++static struct lockdep_map cpuhp_state_lock_map =
++ STATIC_LOCKDEP_MAP_INIT("cpuhp_state", &cpuhp_state_key);
++#endif
++
+ /**
+ * cpuhp_step - Hotplug state machine step
+ * @name: Name of the step
+@@ -403,6 +409,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
+
+ st->should_run = false;
+
++ lock_map_acquire(&cpuhp_state_lock_map);
+ /* Single callback invocation for [un]install ? */
+ if (st->single) {
+ if (st->cb_state < CPUHP_AP_ONLINE) {
+@@ -429,6 +436,7 @@ static void cpuhp_thread_fun(unsigned int cpu)
+ else if (st->state > st->target)
+ ret = cpuhp_ap_offline(cpu, st);
+ }
++ lock_map_release(&cpuhp_state_lock_map);
+ st->result = ret;
+ complete(&st->done);
+ }
+@@ -443,6 +451,9 @@ cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
+ if (!cpu_online(cpu))
+ return 0;
+
++ lock_map_acquire(&cpuhp_state_lock_map);
++ lock_map_release(&cpuhp_state_lock_map);
++
+ /*
+ * If we are up and running, use the hotplug thread. For early calls
+ * we invoke the thread function directly.
+@@ -486,6 +497,8 @@ static int cpuhp_kick_ap_work(unsigned int cpu)
+ enum cpuhp_state state = st->state;
+
+ trace_cpuhp_enter(cpu, st->target, state, cpuhp_kick_ap_work);
++ lock_map_acquire(&cpuhp_state_lock_map);
++ lock_map_release(&cpuhp_state_lock_map);
+ __cpuhp_kick_ap_work(st);
+ wait_for_completion(&st->done);
+ trace_cpuhp_exit(cpu, st->state, state, st->result);
+--
+2.11.0
+
diff --git a/patches/ARM-enable-irq-in-translation-section-permission-fau.patch b/patches/ARM-enable-irq-in-translation-section-permission-fau.patch
index c1db6bee2360..70eaa5a13a9f 100644
--- a/patches/ARM-enable-irq-in-translation-section-permission-fau.patch
+++ b/patches/ARM-enable-irq-in-translation-section-permission-fau.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
-@@ -430,6 +430,9 @@ do_translation_fault(unsigned long addr,
+@@ -431,6 +431,9 @@ do_translation_fault(unsigned long addr,
if (addr < TASK_SIZE)
return do_page_fault(addr, fsr, regs);
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (user_mode(regs))
goto bad_area;
-@@ -497,6 +500,9 @@ do_translation_fault(unsigned long addr,
+@@ -498,6 +501,9 @@ do_translation_fault(unsigned long addr,
static int
do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
diff --git a/patches/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch b/patches/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
new file mode 100644
index 000000000000..943d62e2a39a
--- /dev/null
+++ b/patches/CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
@@ -0,0 +1,43 @@
+From 5ffb5cace8448c787c9f44e16a7b12f8c2866848 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 4 Apr 2017 17:43:55 +0200
+Subject: [PATCH] CPUFREQ: Loongson2: drop set_cpus_allowed_ptr()
+
+It is pure mystery to me why we need to be on a specific CPU while
+looking up a value in an array.
+My best shot at this is that before commit d4019f0a92ab ("cpufreq: move
+freq change notifications to cpufreq core") it was required to invoke
+cpufreq_notify_transition() on a special CPU.
+
+Since it looks like a waste, remove it.
+
+Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
+Cc: Viresh Kumar <viresh.kumar@linaro.org>
+Cc: linux-pm@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/cpufreq/loongson2_cpufreq.c | 7 -------
+ 1 file changed, 7 deletions(-)
+
+--- a/drivers/cpufreq/loongson2_cpufreq.c
++++ b/drivers/cpufreq/loongson2_cpufreq.c
+@@ -51,19 +51,12 @@ static int loongson2_cpu_freq_notifier(s
+ static int loongson2_cpufreq_target(struct cpufreq_policy *policy,
+ unsigned int index)
+ {
+- unsigned int cpu = policy->cpu;
+- cpumask_t cpus_allowed;
+ unsigned int freq;
+
+- cpus_allowed = current->cpus_allowed;
+- set_cpus_allowed_ptr(current, cpumask_of(cpu));
+-
+ freq =
+ ((cpu_clock_freq / 1000) *
+ loongson2_clockmod_table[index].driver_data) / 8;
+
+- set_cpus_allowed_ptr(current, &cpus_allowed);
+-
+ /* setting the cpu frequency */
+ clk_set_rate(policy->clk, freq * 1000);
+
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 9d1b301a423c..67fa30de8beb 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -7,24 +7,24 @@ with a "full" buffer after executing "dmesg" on the shell.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/printk/printk.c | 26 ++++++++++++++++++++++++++
- 1 file changed, 26 insertions(+)
+ kernel/printk/printk.c | 27 +++++++++++++++++++++++++++
+ 1 file changed, 27 insertions(+)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1396,6 +1396,7 @@ static int syslog_print_all(char __user
+@@ -1409,6 +1409,8 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
+ int attempts = 0;
++ int num_msg;
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1407,6 +1408,14 @@ static int syslog_print_all(char __user
+@@ -1420,6 +1422,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
- enum log_flags prev;
-+ int num_msg;
+
+try_again:
+ attempts++;
+ if (attempts > 10) {
@@ -32,44 +32,45 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ goto out;
+ }
+ num_msg = 0;
-
++
/*
* Find first record that fits, including all following records,
-@@ -1422,6 +1431,14 @@ static int syslog_print_all(char __user
- prev = msg->flags;
+ * into the user-provided buffer for this dump.
+@@ -1432,6 +1442,14 @@ static int syslog_print_all(char __user
+ len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
++ logbuf_unlock_irq();
++ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* move first record forward until length fits into the buffer */
-@@ -1435,6 +1452,14 @@ static int syslog_print_all(char __user
- prev = msg->flags;
+@@ -1443,6 +1461,14 @@ static int syslog_print_all(char __user
+ len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
+ num_msg++;
+ if (num_msg > 5) {
+ num_msg = 0;
-+ raw_spin_unlock_irq(&logbuf_lock);
-+ raw_spin_lock_irq(&logbuf_lock);
++ logbuf_unlock_irq();
++ logbuf_lock_irq();
+ if (clear_seq < log_first_seq)
+ goto try_again;
+ }
}
/* last message fitting into this dump */
-@@ -1475,6 +1500,7 @@ static int syslog_print_all(char __user
+@@ -1481,6 +1507,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
+out:
- raw_spin_unlock_irq(&logbuf_lock);
+ logbuf_unlock_irq();
kfree(text);
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index a8bb2ec41969..192e4c6b3f35 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
-@@ -619,7 +619,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
-@@ -640,7 +640,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -653,7 +653,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -696,7 +696,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -709,7 +709,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_vgic_sync_hwstate(vcpu);
diff --git a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
index 9ccfeb415f5c..390ab95e6379 100644
--- a/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
+++ b/patches/KVM-lapic-mark-LAPIC-timer-handler-as-irqsafe.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -1939,6 +1939,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
+@@ -2062,6 +2062,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
HRTIMER_MODE_ABS_PINNED);
apic->lapic_timer.timer.function = apic_timer_fn;
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 28b3dc4c6424..72457678877d 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2697,7 +2697,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2607,7 +2607,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2735,7 +2735,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2645,7 +2645,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_init(&sp->so_delegreturn_mutex);
return sp;
}
-@@ -1497,8 +1497,12 @@ static int nfs4_reclaim_open_state(struc
+@@ -1510,8 +1510,12 @@ static int nfs4_reclaim_open_state(struc
* recovering after a network partition or a reboot from a
* server that doesn't support a grace period.
*/
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
restart:
list_for_each_entry(state, &sp->so_states, open_states) {
if (!test_and_clear_bit(ops->state_flag_bit, &state->flags))
-@@ -1567,14 +1571,20 @@ static int nfs4_reclaim_open_state(struc
+@@ -1580,14 +1584,20 @@ static int nfs4_reclaim_open_state(struc
spin_lock(&sp->so_lock);
goto restart;
}
diff --git a/patches/Revert-random-invalidate-batched-entropy-after-crng-.patch b/patches/Revert-random-invalidate-batched-entropy-after-crng-.patch
new file mode 100644
index 000000000000..cb5315b34db6
--- /dev/null
+++ b/patches/Revert-random-invalidate-batched-entropy-after-crng-.patch
@@ -0,0 +1,161 @@
+From 8adeebf2a94f4625c39c25ec461d0d2ab623b3ad Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 14 Jun 2017 21:29:16 +0200
+Subject: [PATCH] Revert "random: invalidate batched entropy after crng init"
+
+This reverts commit 86f95e53ed76fec2579e00351c6050ab398a7730.
+
+In -RT lockdep complains with
+| -> #1 (primary_crng.lock){+.+...}:
+| lock_acquire+0xb5/0x2b0
+| rt_spin_lock+0x46/0x50
+| _extract_crng+0x39/0xa0
+| extract_crng+0x3a/0x40
+| get_random_u64+0x17a/0x200
+| cache_random_seq_create+0x51/0x100
+| init_cache_random_seq+0x35/0x90
+| __kmem_cache_create+0xd3/0x560
+| create_boot_cache+0x8c/0xb2
+| create_kmalloc_cache+0x54/0x9f
+| create_kmalloc_caches+0xe3/0xfd
+| kmem_cache_init+0x14f/0x1f0
+| start_kernel+0x1e7/0x3b3
+| x86_64_start_reservations+0x2a/0x2c
+| x86_64_start_kernel+0x13d/0x14c
+| verify_cpu+0x0/0xfc
+|
+| -> #0 (batched_entropy_reset_lock){+.+...}:
+| __lock_acquire+0x11b4/0x1320
+| lock_acquire+0xb5/0x2b0
+| rt_write_lock+0x26/0x40
+| rt_write_lock_irqsave+0x9/0x10
+| invalidate_batched_entropy+0x28/0xb0
+| crng_fast_load+0xb5/0xe0
+| add_interrupt_randomness+0x16c/0x1a0
+| irq_thread+0x15c/0x1e0
+| kthread+0x112/0x150
+| ret_from_fork+0x31/0x40
+
+so revert this for now and check later with upstream.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/char/random.c | 37 -------------------------------------
+ 1 file changed, 37 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1,9 +1,6 @@
+ /*
+ * random.c -- A strong random number generator
+ *
+- * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
+- * Rights Reserved.
+- *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
+@@ -765,8 +762,6 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init
+ static struct crng_state **crng_node_pool __read_mostly;
+ #endif
+
+-static void invalidate_batched_entropy(void);
+-
+ static void crng_initialize(struct crng_state *crng)
+ {
+ int i;
+@@ -804,7 +799,6 @@ static int crng_fast_load(const char *cp
+ cp++; crng_init_cnt++; len--;
+ }
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+- invalidate_batched_entropy();
+ crng_init = 1;
+ wake_up_interruptible(&crng_init_wait);
+ pr_notice("random: fast init done\n");
+@@ -842,7 +836,6 @@ static void crng_reseed(struct crng_stat
+ memzero_explicit(&buf, sizeof(buf));
+ crng->init_time = jiffies;
+ if (crng == &primary_crng && crng_init < 2) {
+- invalidate_batched_entropy();
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+@@ -2023,7 +2016,6 @@ struct batched_entropy {
+ };
+ unsigned int position;
+ };
+-static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
+
+ /*
+ * Get a random word for internal kernel use only. The quality of the random
+@@ -2034,8 +2026,6 @@ static DEFINE_PER_CPU(struct batched_ent
+ u64 get_random_u64(void)
+ {
+ u64 ret;
+- bool use_lock = crng_init < 2;
+- unsigned long flags;
+ struct batched_entropy *batch;
+
+ #if BITS_PER_LONG == 64
+@@ -2048,15 +2038,11 @@ u64 get_random_u64(void)
+ #endif
+
+ batch = &get_cpu_var(batched_entropy_u64);
+- if (use_lock)
+- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u64[batch->position++];
+- if (use_lock)
+- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u64);
+ return ret;
+ }
+@@ -2066,45 +2052,22 @@ static DEFINE_PER_CPU(struct batched_ent
+ u32 get_random_u32(void)
+ {
+ u32 ret;
+- bool use_lock = crng_init < 2;
+- unsigned long flags;
+ struct batched_entropy *batch;
+
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ batch = &get_cpu_var(batched_entropy_u32);
+- if (use_lock)
+- read_lock_irqsave(&batched_entropy_reset_lock, flags);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
+ batch->position = 0;
+ }
+ ret = batch->entropy_u32[batch->position++];
+- if (use_lock)
+- read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+ put_cpu_var(batched_entropy_u32);
+ return ret;
+ }
+ EXPORT_SYMBOL(get_random_u32);
+
+-/* It's important to invalidate all potential batched entropy that might
+- * be stored before the crng is initialized, which we can do lazily by
+- * simply resetting the counter to zero so that it's re-extracted on the
+- * next usage. */
+-static void invalidate_batched_entropy(void)
+-{
+- int cpu;
+- unsigned long flags;
+-
+- write_lock_irqsave(&batched_entropy_reset_lock, flags);
+- for_each_possible_cpu (cpu) {
+- per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
+- per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
+- }
+- write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
+-}
+-
+ /**
+ * randomize_page - Generate a random, page aligned address
+ * @start: The smallest acceptable address the caller will take.
diff --git a/patches/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/patches/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
deleted file mode 100644
index 3fa509c360c5..000000000000
--- a/patches/Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+++ /dev/null
@@ -1,217 +0,0 @@
-From 16145f9c01a2e671aceb731050de9fbf977d31d0 Mon Sep 17 00:00:00 2001
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Fri, 26 May 2017 19:16:07 +0200
-Subject: [PATCH] Revert "timers: Don't wake ktimersoftd on every tick"
-
-This reverts commit 032f93cae150a ("timers: Don't wake ktimersoftd on
-every tick").
-
-The problem is that the look ahead optimization from the tick timer
-interrupt context can race with the softirq thread expiring timer. As
-a consequence the temporary hlist heads which hold the to expire
-timers are overwritten and the timers which are already removed from
-the wheel bucket for expiry are now dangling w/o a list head.
-
-That means those timers never get expired. If one of those timers is
-canceled the removal operation will result in a hlist corruption.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 96 +++++++++++++++-------------------------------------
- 1 file changed, 29 insertions(+), 67 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -206,8 +206,6 @@ struct timer_base {
- bool is_idle;
- DECLARE_BITMAP(pending_map, WHEEL_SIZE);
- struct hlist_head vectors[WHEEL_SIZE];
-- struct hlist_head expired_lists[LVL_DEPTH];
-- int expired_count;
- } ____cacheline_aligned;
-
- static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-@@ -1355,8 +1353,7 @@ static void call_timer_fn(struct timer_l
- }
- }
-
--static inline void __expire_timers(struct timer_base *base,
-- struct hlist_head *head)
-+static void expire_timers(struct timer_base *base, struct hlist_head *head)
- {
- while (!hlist_empty(head)) {
- struct timer_list *timer;
-@@ -1387,38 +1384,21 @@ static inline void __expire_timers(struc
- }
- }
-
--static void expire_timers(struct timer_base *base)
--{
-- struct hlist_head *head;
--
-- while (base->expired_count--) {
-- head = base->expired_lists + base->expired_count;
-- __expire_timers(base, head);
-- }
-- base->expired_count = 0;
--}
--
--static void __collect_expired_timers(struct timer_base *base)
-+static int __collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
- {
- unsigned long clk = base->clk;
- struct hlist_head *vec;
-- int i;
-+ int i, levels = 0;
- unsigned int idx;
-
-- /*
-- * expire_timers() must be called at least once before we can
-- * collect more timers
-- */
-- if (WARN_ON(base->expired_count))
-- return;
--
- for (i = 0; i < LVL_DEPTH; i++) {
- idx = (clk & LVL_MASK) + i * LVL_SIZE;
-
- if (__test_and_clear_bit(idx, base->pending_map)) {
- vec = base->vectors + idx;
-- hlist_move_list(vec,
-- &base->expired_lists[base->expired_count++]);
-+ hlist_move_list(vec, heads++);
-+ levels++;
- }
- /* Is it time to look at the next level? */
- if (clk & LVL_CLK_MASK)
-@@ -1426,6 +1406,7 @@ static void __collect_expired_timers(str
- /* Shift clock for the next level granularity */
- clk >>= LVL_CLK_SHIFT;
- }
-+ return levels;
- }
-
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -1618,7 +1599,8 @@ void timer_clear_idle(void)
- base->is_idle = false;
- }
-
--static void collect_expired_timers(struct timer_base *base)
-+static int collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
- {
- /*
- * NOHZ optimization. After a long idle sleep we need to forward the
-@@ -1635,49 +1617,20 @@ static void collect_expired_timers(struc
- if (time_after(next, jiffies)) {
- /* The call site will increment clock! */
- base->clk = jiffies - 1;
-- return;
-+ return 0;
- }
- base->clk = next;
- }
-- __collect_expired_timers(base);
-+ return __collect_expired_timers(base, heads);
- }
- #else
--static inline void collect_expired_timers(struct timer_base *base)
-+static inline int collect_expired_timers(struct timer_base *base,
-+ struct hlist_head *heads)
- {
-- __collect_expired_timers(base);
-+ return __collect_expired_timers(base, heads);
- }
- #endif
-
--static int find_expired_timers(struct timer_base *base)
--{
-- const unsigned long int end_clk = jiffies;
--
-- while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
-- collect_expired_timers(base);
-- base->clk++;
-- }
--
-- return base->expired_count;
--}
--
--/* Called from CPU tick routine to quickly collect expired timers */
--static int tick_find_expired(struct timer_base *base)
--{
-- int count;
--
-- raw_spin_lock(&base->lock);
--
-- if (unlikely(time_after(jiffies, base->clk + HZ))) {
-- /* defer to ktimersoftd; don't spend too long in irq context */
-- count = -1;
-- } else
-- count = find_expired_timers(base);
--
-- raw_spin_unlock(&base->lock);
--
-- return count;
--}
--
- /*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
-@@ -1704,11 +1657,22 @@ void update_process_times(int user_tick)
- */
- static inline void __run_timers(struct timer_base *base)
- {
-+ struct hlist_head heads[LVL_DEPTH];
-+ int levels;
-+
-+ if (!time_after_eq(jiffies, base->clk))
-+ return;
-+
- raw_spin_lock_irq(&base->lock);
-
-- while (find_expired_timers(base))
-- expire_timers(base);
-+ while (time_after_eq(jiffies, base->clk)) {
-+
-+ levels = collect_expired_timers(base, heads);
-+ base->clk++;
-
-+ while (levels--)
-+ expire_timers(base, heads + levels);
-+ }
- raw_spin_unlock_irq(&base->lock);
- wakeup_timer_waiters(base);
- }
-@@ -1734,12 +1698,12 @@ void run_local_timers(void)
-
- hrtimer_run_queues();
- /* Raise the softirq only if required. */
-- if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
-+ if (time_before(jiffies, base->clk)) {
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
- return;
- /* CPU is awake, so check the deferrable base. */
- base++;
-- if (time_before(jiffies, base->clk) || !tick_find_expired(base))
-+ if (time_before(jiffies, base->clk))
- return;
- }
- raise_softirq(TIMER_SOFTIRQ);
-@@ -1909,7 +1873,6 @@ int timers_dead_cpu(unsigned int cpu)
- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- BUG_ON(old_base->running_timer);
-- BUG_ON(old_base->expired_count);
-
- for (i = 0; i < WHEEL_SIZE; i++)
- migrate_timer_list(new_base, old_base->vectors + i);
-@@ -1936,7 +1899,6 @@ static void __init init_timer_cpu(int cp
- #ifdef CONFIG_PREEMPT_RT_FULL
- init_swait_queue_head(&base->wait_for_running_timer);
- #endif
-- base->expired_count = 0;
- }
- }
-
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 172e6cddbad6..298239b10735 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Mutex for _OSI support */
--- a/drivers/acpi/acpica/hwregs.c
+++ b/drivers/acpi/acpica/hwregs.c
-@@ -363,14 +363,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
+@@ -428,14 +428,14 @@ acpi_status acpi_hw_clear_acpi_status(vo
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
new file mode 100644
index 000000000000..ed8cad2ced36
--- /dev/null
+++ b/patches/add_migrate_disable.patch
@@ -0,0 +1,255 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: kernel/sched/core: add migrate_disable()
+
+---
+ include/linux/preempt.h | 23 ++++++++
+ include/linux/sched.h | 7 ++
+ include/linux/smp.h | 3 +
+ kernel/sched/core.c | 134 +++++++++++++++++++++++++++++++++++++++++++++++-
+ kernel/sched/debug.c | 4 +
+ 5 files changed, 169 insertions(+), 2 deletions(-)
+
+--- a/include/linux/preempt.h
++++ b/include/linux/preempt.h
+@@ -184,6 +184,22 @@ do { \
+
+ #define preemptible() (preempt_count() == 0 && !irqs_disabled())
+
++#ifdef CONFIG_SMP
++
++extern void migrate_disable(void);
++extern void migrate_enable(void);
++
++int __migrate_disabled(struct task_struct *p);
++
++#else
++#define migrate_disable() barrier()
++#define migrate_enable() barrier()
++static inline int __migrate_disabled(struct task_struct *p)
++{
++ return 0;
++}
++#endif
++
+ #ifdef CONFIG_PREEMPT
+ #define preempt_enable() \
+ do { \
+@@ -252,6 +268,13 @@ do { \
+ #define preempt_enable_notrace() barrier()
+ #define preemptible() 0
+
++#define migrate_disable() barrier()
++#define migrate_enable() barrier()
++
++static inline int __migrate_disabled(struct task_struct *p)
++{
++ return 0;
++}
+ #endif /* CONFIG_PREEMPT_COUNT */
+
+ #ifdef MODULE
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -537,6 +537,13 @@ struct task_struct {
+ int nr_cpus_allowed;
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ int migrate_disable;
++ int migrate_disable_update;
++# ifdef CONFIG_SCHED_DEBUG
++ int migrate_disable_atomic;
++# endif
++#endif
+
+ #ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+--- a/include/linux/smp.h
++++ b/include/linux/smp.h
+@@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void)
+ #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+ #define put_cpu() preempt_enable()
+
++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); })
++#define put_cpu_light() migrate_enable()
++
+ /*
+ * Callback to arch code if there's nosmp or maxcpus=0 on the
+ * boot command line:
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -1047,7 +1047,15 @@ void set_cpus_allowed_common(struct task
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+-void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++int __migrate_disabled(struct task_struct *p)
++{
++ return p->migrate_disable;
++}
++#endif
++
++static void __do_set_cpus_allowed_tail(struct task_struct *p,
++ const struct cpumask *new_mask)
+ {
+ struct rq *rq = task_rq(p);
+ bool queued, running;
+@@ -1076,6 +1084,20 @@ void do_set_cpus_allowed(struct task_str
+ set_curr_task(rq, p);
+ }
+
++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
++{
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ if (__migrate_disabled(p)) {
++ lockdep_assert_held(&p->pi_lock);
++
++ cpumask_copy(&p->cpus_mask, new_mask);
++ p->migrate_disable_update = 1;
++ return;
++ }
++#endif
++ __do_set_cpus_allowed_tail(p, new_mask);
++}
++
+ /*
+ * Change a given task's CPU affinity. Migrate the thread to a
+ * proper CPU and schedule it away if the CPU it's executing on
+@@ -1134,9 +1156,16 @@ static int __set_cpus_allowed_ptr(struct
+ }
+
+ /* Can the task run on the task's current CPU? If so, we're done */
+- if (cpumask_test_cpu(task_cpu(p), new_mask))
++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
+ goto out;
+
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ if (__migrate_disabled(p)) {
++ p->migrate_disable_update = 1;
++ goto out;
++ }
++#endif
++
+ dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
+ if (task_running(rq, p) || p->state == TASK_WAKING) {
+ struct migration_arg arg = { p, dest_cpu };
+@@ -7357,3 +7386,104 @@ const u32 sched_prio_to_wmult[40] = {
+ /* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
+ /* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
+ };
++
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++
++void migrate_disable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic++;
++#endif
++ return;
++ }
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ if (p->migrate_disable) {
++ p->migrate_disable++;
++ return;
++ }
++
++ /* get_online_cpus(); */
++
++ preempt_disable();
++ p->migrate_disable = 1;
++
++ p->cpus_ptr = cpumask_of(smp_processor_id());
++ p->nr_cpus_allowed = 1;
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_disable);
++
++void migrate_enable(void)
++{
++ struct task_struct *p = current;
++
++ if (in_atomic() || irqs_disabled()) {
++#ifdef CONFIG_SCHED_DEBUG
++ p->migrate_disable_atomic--;
++#endif
++ return;
++ }
++
++#ifdef CONFIG_SCHED_DEBUG
++ WARN_ON_ONCE(p->migrate_disable_atomic);
++#endif
++
++ WARN_ON_ONCE(p->migrate_disable <= 0);
++ if (p->migrate_disable > 1) {
++ p->migrate_disable--;
++ return;
++ }
++
++ preempt_disable();
++
++ p->cpus_ptr = &p->cpus_mask;
++ p->nr_cpus_allowed = cpumask_weight(&p->cpus_mask);
++ p->migrate_disable = 0;
++
++ if (p->migrate_disable_update) {
++ struct rq *rq;
++ struct rq_flags rf;
++
++ rq = task_rq_lock(p, &rf);
++ update_rq_clock(rq);
++
++ __do_set_cpus_allowed_tail(p, &p->cpus_mask);
++ task_rq_unlock(rq, p, &rf);
++
++ p->migrate_disable_update = 0;
++
++ WARN_ON(smp_processor_id() != task_cpu(p));
++ if (!cpumask_test_cpu(task_cpu(p), &p->cpus_mask)) {
++ const struct cpumask *cpu_valid_mask = cpu_active_mask;
++ struct migration_arg arg;
++ unsigned int dest_cpu;
++
++ if (p->flags & PF_KTHREAD) {
++ /*
++ * Kernel threads are allowed on online && !active CPUs
++ */
++ cpu_valid_mask = cpu_online_mask;
++ }
++ dest_cpu = cpumask_any_and(cpu_valid_mask, &p->cpus_mask);
++ arg.task = p;
++ arg.dest_cpu = dest_cpu;
++
++ preempt_enable();
++ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
++ tlb_migrate_finish(p->mm);
++ /* put_online_cpus(); */
++ return;
++ }
++ }
++ /* put_online_cpus(); */
++ preempt_enable();
++}
++EXPORT_SYMBOL(migrate_enable);
++#endif
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -958,6 +958,10 @@ void proc_sched_show_task(struct task_st
+ P(dl.runtime);
+ P(dl.deadline);
+ }
++#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
++ P(migrate_disable);
++#endif
++ P(nr_cpus_allowed);
+ #undef PN_SCHEDSTAT
+ #undef PN
+ #undef __PN
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 596f6f2cf08f..4cb41aefeeb7 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -12,15 +12,15 @@ indicate that support for full RT preemption is now available.
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
---
arch/arm64/Kconfig | 1 +
- arch/arm64/include/asm/thread_info.h | 7 ++++++-
+ arch/arm64/include/asm/thread_info.h | 7 +++++--
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 12 +++++++++---
arch/arm64/kernel/signal.c | 2 +-
- 5 files changed, 18 insertions(+), 5 deletions(-)
+ 5 files changed, 17 insertions(+), 6 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -91,6 +91,7 @@ config ARM64
+@@ -96,6 +96,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -30,23 +30,23 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
-@@ -49,6 +49,7 @@ struct thread_info {
- mm_segment_t addr_limit; /* address limit */
- struct task_struct *task; /* main task structure */
+@@ -51,6 +51,7 @@ struct thread_info {
+ u64 ttbr0; /* saved TTBR0_EL1 */
+ #endif
int preempt_count; /* 0 => preemptable, <0 => bug */
+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */
- int cpu; /* cpu */
};
-@@ -112,6 +113,7 @@ static inline struct thread_info *curren
- #define TIF_NEED_RESCHED 1
+ #define INIT_THREAD_INFO(tsk) \
+@@ -86,6 +87,7 @@ struct thread_info {
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
-+#define TIF_NEED_RESCHED_LAZY 4
+ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
++#define TIF_NEED_RESCHED_LAZY 5
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -127,6 +129,7 @@ static inline struct thread_info *curren
+@@ -101,6 +103,7 @@ struct thread_info {
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -54,41 +54,41 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -135,7 +138,9 @@ static inline struct thread_info *curren
- #define _TIF_32BIT (1 << TIF_32BIT)
+@@ -111,8 +114,8 @@ struct thread_info {
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
-- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE)
-+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-+ _TIF_NEED_RESCHED_LAZY)
+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
+- _TIF_UPROBE)
+-
++ _TIF_UPROBE | _TIF_NEED_RESCHED_LAZY)
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
-
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
+ _TIF_NOHZ)
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -38,6 +38,7 @@ int main(void)
BLANK();
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ DEFINE(TSK_TI_FLAGS, offsetof(struct task_struct, thread_info.flags));
+ DEFINE(TSK_TI_PREEMPT, offsetof(struct task_struct, thread_info.preempt_count));
++ DEFINE(TSK_TI_PREEMPT_LAZY, offsetof(struct task_struct, thread_info.preempt_lazy_count));
+ DEFINE(TSK_TI_ADDR_LIMIT, offsetof(struct task_struct, thread_info.addr_limit));
+ #ifdef CONFIG_ARM64_SW_TTBR0_PAN
+ DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -428,11 +428,16 @@ ENDPROC(el1_sync)
+@@ -488,11 +488,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
- ldr w24, [tsk, #TI_PREEMPT] // get preempt count
+ ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
- cbnz w24, 1f // preempt count != 0
+ cbnz w24, 2f // preempt count != 0
- ldr x0, [tsk, #TI_FLAGS] // get flags
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get flags
- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
- bl el1_preempt
+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling?
+
-+ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count
++ ldr w24, [tsk, #TSK_TI_PREEMPT_LAZY] // get preempt lazy count
+ cbnz w24, 2f // preempt lazy count != 0
+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling?
1:
@@ -97,9 +97,9 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -446,6 +451,7 @@ ENDPROC(el1_irq)
+@@ -506,6 +511,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
- ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS
+ ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling?
ret x24
diff --git a/patches/arm-convert-boot-lock-to-raw.patch b/patches/arm-convert-boot-lock-to-raw.patch
index 5a45693ab81c..b1d6c1caacf7 100644
--- a/patches/arm-convert-boot-lock-to-raw.patch
+++ b/patches/arm-convert-boot-lock-to-raw.patch
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
-@@ -64,7 +64,7 @@ static const struct omap_smp_config omap
+@@ -69,7 +69,7 @@ static const struct omap_smp_config omap
.startup_addr = omap5_secondary_startup,
};
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __iomem *omap4_get_scu_base(void)
{
-@@ -131,8 +131,8 @@ static void omap4_secondary_init(unsigne
+@@ -136,8 +136,8 @@ static void omap4_secondary_init(unsigne
/*
* Synchronise with the boot thread.
*/
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
-@@ -146,7 +146,7 @@ static int omap4_boot_secondary(unsigned
+@@ -150,7 +150,7 @@ static int omap4_boot_secondary(unsigned
* Set synchronisation state between this boot processor
* and the secondary one
*/
@@ -196,7 +196,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Update the AuxCoreBoot0 with boot state for secondary core.
-@@ -223,7 +223,7 @@ static int omap4_boot_secondary(unsigned
+@@ -229,7 +229,7 @@ static int omap4_boot_secondary(unsigned
* Now the secondary core is starting up let it run its
* calibrations, then wait for it to finish
*/
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index 471b51beeff4..a300d3be5222 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -75,6 +75,7 @@ config ARM
+@@ -81,6 +81,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__und_fault:
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
-@@ -36,7 +36,9 @@
+@@ -41,7 +41,9 @@
UNWIND(.cantunwind )
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
bne fast_work_pending
/* perform architecture specific actions before user return */
-@@ -62,8 +64,11 @@ ENDPROC(ret_fast_syscall)
+@@ -67,8 +69,11 @@ ENDPROC(ret_fast_syscall)
str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
disable_irq_notrace @ disable interrupts
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index b6b0a56db4c1..15d7ad9087a8 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -704,7 +704,7 @@ config XEN_DOM0
+@@ -742,7 +742,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
diff --git a/patches/ata-disable-interrupts-if-non-rt.patch b/patches/ata-disable-interrupts-if-non-rt.patch
index d3cd3c3fd5df..32bb01a80bbf 100644
--- a/patches/ata-disable-interrupts-if-non-rt.patch
+++ b/patches/ata-disable-interrupts-if-non-rt.patch
@@ -14,19 +14,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
-@@ -678,9 +678,9 @@ unsigned int ata_sff_data_xfer_noirq(str
+@@ -679,9 +679,9 @@ unsigned int ata_sff_data_xfer_noirq(str
unsigned long flags;
unsigned int consumed;
- local_irq_save(flags);
+ local_irq_save_nort(flags);
- consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
+ consumed = ata_sff_data_xfer32(qc, buf, buflen, rw);
- local_irq_restore(flags);
+ local_irq_restore_nort(flags);
return consumed;
}
-@@ -719,7 +719,7 @@ static void ata_pio_sector(struct ata_qu
+@@ -720,7 +720,7 @@ static void ata_pio_sector(struct ata_qu
unsigned long flags;
/* FIXME: use a bounce buffer */
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
buf = kmap_atomic(page);
/* do the actual data transfer */
-@@ -727,7 +727,7 @@ static void ata_pio_sector(struct ata_qu
+@@ -728,7 +728,7 @@ static void ata_pio_sector(struct ata_qu
do_write);
kunmap_atomic(buf);
@@ -43,8 +43,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
- ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
-@@ -864,7 +864,7 @@ static int __atapi_pio_bytes(struct ata_
+ ap->ops->sff_data_xfer(qc, buf + offset, qc->sect_size,
+@@ -865,7 +865,7 @@ static int __atapi_pio_bytes(struct ata_
unsigned long flags;
/* FIXME: use bounce buffer */
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
buf = kmap_atomic(page);
/* do the actual data transfer */
-@@ -872,7 +872,7 @@ static int __atapi_pio_bytes(struct ata_
+@@ -873,7 +873,7 @@ static int __atapi_pio_bytes(struct ata_
count, rw);
kunmap_atomic(buf);
@@ -61,4 +61,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_irq_restore_nort(flags);
} else {
buf = page_address(page);
- consumed = ap->ops->sff_data_xfer(dev, buf + offset,
+ consumed = ap->ops->sff_data_xfer(qc, buf + offset,
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index a366d69252a2..6c36fa46406f 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -39,13 +39,13 @@ Subject: block: blk-mq: Use swait
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-core.c | 6 +++---
- block/blk-mq.c | 6 +++---
+ block/blk-mq.c | 8 ++++----
include/linux/blkdev.h | 2 +-
- 3 files changed, 7 insertions(+), 7 deletions(-)
+ 3 files changed, 8 insertions(+), 8 deletions(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -662,7 +662,7 @@ int blk_queue_enter(struct request_queue
+@@ -678,7 +678,7 @@ int blk_queue_enter(struct request_queue
if (nowait)
return -EBUSY;
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!atomic_read(&q->mq_freeze_depth) ||
blk_queue_dying(q));
if (blk_queue_dying(q))
-@@ -682,7 +682,7 @@ static void blk_queue_usage_counter_rele
+@@ -698,7 +698,7 @@ static void blk_queue_usage_counter_rele
struct request_queue *q =
container_of(ref, struct request_queue, q_usage_counter);
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(unsigned long data)
-@@ -751,7 +751,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -766,7 +766,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -74,16 +74,24 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Init percpu_ref in atomic mode so that it's faster to shutdown.
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -72,7 +72,7 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
+@@ -79,14 +79,14 @@ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_st
- static void blk_mq_freeze_queue_wait(struct request_queue *q)
+ void blk_mq_freeze_queue_wait(struct request_queue *q)
{
- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter));
}
+ EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_wait);
- /*
-@@ -110,7 +110,7 @@ void blk_mq_unfreeze_queue(struct reques
+ int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
+ unsigned long timeout)
+ {
+- return wait_event_timeout(q->mq_freeze_wq,
++ return swait_event_timeout(q->mq_freeze_wq,
+ percpu_ref_is_zero(&q->q_usage_counter),
+ timeout);
+ }
+@@ -127,7 +127,7 @@ void blk_mq_unfreeze_queue(struct reques
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
@@ -92,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -129,7 +129,7 @@ void blk_mq_wake_waiters(struct request_
+@@ -173,7 +173,7 @@ void blk_mq_wake_waiters(struct request_
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
@@ -103,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -468,7 +468,7 @@ struct request_queue {
+@@ -566,7 +566,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index e3f65acbfe85..bb62787879f2 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -8,14 +8,14 @@ moves the completion into a workqueue.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
block/blk-core.c | 3 +++
- block/blk-mq.c | 20 ++++++++++++++++++++
+ block/blk-mq.c | 24 ++++++++++++++++++++++++
include/linux/blk-mq.h | 2 +-
- include/linux/blkdev.h | 1 +
- 4 files changed, 25 insertions(+), 1 deletion(-)
+ include/linux/blkdev.h | 3 +++
+ 4 files changed, 31 insertions(+), 1 deletion(-)
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -125,6 +125,9 @@ void blk_rq_init(struct request_queue *q
+@@ -116,6 +116,9 @@ void blk_rq_init(struct request_queue *q
INIT_LIST_HEAD(&rq->queuelist);
INIT_LIST_HEAD(&rq->timeout_list);
@@ -27,9 +27,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -177,6 +177,9 @@ static void blk_mq_rq_ctx_init(struct re
- rq->resid_len = 0;
- rq->sense = NULL;
+@@ -213,6 +213,9 @@ void blk_mq_rq_ctx_init(struct request_q
+ rq->errors = 0;
+ rq->extra_len = 0;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -345,6 +348,17 @@ void blk_mq_end_request(struct request *
+@@ -395,6 +398,17 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
-@@ -352,6 +366,8 @@ static void __blk_mq_complete_request_re
+@@ -402,6 +416,8 @@ static void __blk_mq_complete_request_re
rq->q->softirq_done_fn(rq);
}
@@ -64,11 +64,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void blk_mq_ipi_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
-@@ -368,10 +384,14 @@ static void blk_mq_ipi_complete_request(
+@@ -418,10 +434,18 @@ static void blk_mq_ipi_complete_request(
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
++ /*
++ * We could force QUEUE_FLAG_SAME_FORCE then we would not get in
++ * here. But we could try to invoke it one the CPU like this.
++ */
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
@@ -81,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
-@@ -209,7 +209,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -218,7 +218,7 @@ static inline u16 blk_mq_unique_tag_to_t
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
@@ -92,11 +96,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_end_request(struct request *rq, int error);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -89,6 +89,7 @@ struct request {
+@@ -128,6 +128,9 @@ typedef __u32 __bitwise req_flags_t;
+ */
+ struct request {
struct list_head queuelist;
++#ifdef CONFIG_PREEMPT_RT_FULL
++ struct work_struct work;
++#endif
union {
struct call_single_data csd;
-+ struct work_struct work;
u64 fifo_time;
- };
-
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index fd3a94019794..2cb8d57c4dfc 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -363,7 +363,7 @@ static void blk_mq_ipi_complete_request(
+@@ -413,7 +413,7 @@ static void blk_mq_ipi_complete_request(
return;
}
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -375,7 +375,7 @@ static void blk_mq_ipi_complete_request(
+@@ -425,7 +425,7 @@ static void blk_mq_ipi_complete_request(
} else {
rq->q->softirq_done_fn(rq);
}
@@ -30,8 +30,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ put_cpu_light();
}
- static void __blk_mq_complete_request(struct request *rq)
-@@ -906,14 +906,14 @@ void blk_mq_run_hw_queue(struct blk_mq_h
+ static void blk_mq_stat_add(struct request *rq)
+@@ -1143,14 +1143,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
@@ -48,4 +48,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ put_cpu_light();
}
- kblockd_schedule_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work);
+ if (msecs == 0)
diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch
index fa68d6206b3f..ba9ecb0c5fff 100644
--- a/patches/block-mq-use-cpu_light.patch
+++ b/patches/block-mq-use-cpu_light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
-@@ -72,12 +72,12 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -130,12 +130,12 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index 6fcfdbc84dc8..190b4be0019d 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3200,7 +3200,7 @@ static void queue_unplugged(struct reque
+@@ -3186,7 +3186,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3248,7 +3248,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3234,7 +3234,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3268,11 +3267,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3254,11 +3253,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3285,7 +3279,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3271,7 +3265,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3312,8 +3306,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3298,8 +3292,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/block-use-cpu-chill.patch b/patches/block-use-cpu-chill.patch
index 47fb1412057e..3b0f81eef6e5 100644
--- a/patches/block-use-cpu-chill.patch
+++ b/patches/block-use-cpu-chill.patch
@@ -17,15 +17,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
-@@ -7,6 +7,7 @@
- #include <linux/bio.h>
+@@ -8,6 +8,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
+ #include <linux/sched/task.h>
+#include <linux/delay.h>
#include "blk.h"
-@@ -109,7 +110,7 @@ static void ioc_release_fn(struct work_s
+@@ -117,7 +118,7 @@ static void ioc_release_fn(struct work_s
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -34,12 +34,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
-@@ -187,7 +188,7 @@ void put_io_context_active(struct io_con
- spin_unlock(icq->q->queue_lock);
- } else {
- spin_unlock_irqrestore(&ioc->lock, flags);
-- cpu_relax();
-+ cpu_chill();
- goto retry;
+@@ -201,7 +202,7 @@ void put_io_context_active(struct io_con
+ spin_unlock(icq->q->queue_lock);
+ } else {
+ spin_unlock_irqrestore(&ioc->lock, flags);
+- cpu_relax();
++ cpu_chill();
+ goto retry;
+ }
}
- }
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index ef84f389951c..145335072e3f 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1695,6 +1695,7 @@ struct memcg_stock_pcp {
+@@ -1685,6 +1685,7 @@ struct memcg_stock_pcp {
#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_MUTEX(percpu_charge_mutex);
/**
-@@ -1717,7 +1718,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1707,7 +1708,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > CHARGE_BATCH)
return ret;
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -1725,7 +1726,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1715,7 +1716,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1752,13 +1753,13 @@ static void drain_local_stock(struct wor
+@@ -1742,13 +1743,13 @@ static void drain_local_stock(struct wor
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1770,7 +1771,7 @@ static void refill_stock(struct mem_cgro
+@@ -1760,7 +1761,7 @@ static void refill_stock(struct mem_cgro
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
-@@ -1779,7 +1780,7 @@ static void refill_stock(struct mem_cgro
+@@ -1769,7 +1770,7 @@ static void refill_stock(struct mem_cgro
}
stock->nr_pages += nr_pages;
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 3a7950e615ee..88ba2b39b1eb 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -29,20 +29,20 @@ To avoid:
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/cgroup-defs.h | 2 ++
- kernel/cgroup.c | 9 +++++----
+ kernel/cgroup/cgroup.c | 9 +++++----
2 files changed, 7 insertions(+), 4 deletions(-)
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
-@@ -16,6 +16,7 @@
- #include <linux/percpu-refcount.h>
+@@ -17,6 +17,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
+ #include <linux/bpf-cgroup.h>
+#include <linux/swork.h>
#ifdef CONFIG_CGROUPS
-@@ -137,6 +138,7 @@ struct cgroup_subsys_state {
+@@ -139,6 +140,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -50,9 +50,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
/*
---- a/kernel/cgroup.c
-+++ b/kernel/cgroup.c
-@@ -5041,10 +5041,10 @@ static void css_free_rcu_fn(struct rcu_h
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -3895,10 +3895,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -5087,8 +5087,8 @@ static void css_release(struct percpu_re
+@@ -3943,8 +3943,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,11 +76,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -5740,6 +5740,7 @@ static int __init cgroup_wq_init(void)
+@@ -4601,6 +4601,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
+ BUG_ON(swork_get());
-
- /*
- * Used to destroy pidlists and separate to serve as flush domain.
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
diff --git a/patches/char-random-don-t-print-that-the-init-is-done.patch b/patches/char-random-don-t-print-that-the-init-is-done.patch
new file mode 100644
index 000000000000..38da3a2855b1
--- /dev/null
+++ b/patches/char-random-don-t-print-that-the-init-is-done.patch
@@ -0,0 +1,166 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 30 May 2017 16:39:01 +0200
+Subject: char/random: don't print that the init is done
+
+On RT we run into circular locking with pendingb_lock (workqueue),
+port_lock_key (uart) and the primary_crng (random):
+
+ ======================================================
+ [ INFO: possible circular locking dependency detected ]
+ -------------------------------------------------------
+ irq/4-serial/512 is trying to acquire lock:
+ ((pendingb_lock).lock){+.+...}, at: [<ffffffff8108d48d>] queue_work_on+0x5d/0x190
+
+ but task is already holding lock:
+ (&port_lock_key){+.+...}, at: [<ffffffff815b4bb6>] serial8250_handle_irq.part.27+0x16/0xb0
+
+ which lock already depends on the new lock.
+
+ the existing dependency chain (in reverse order) is:
+
+ -> #3 (&port_lock_key){+.+...}:
+ lock_acquire+0xac/0x240
+ rt_spin_lock+0x46/0x50
+ serial8250_console_write+0x211/0x220
+ univ8250_console_write+0x1c/0x20
+ console_unlock+0x563/0x5c0
+ vprintk_emit+0x277/0x320
+ vprintk_default+0x1a/0x20
+ vprintk_func+0x20/0x80
+ printk+0x3e/0x46
+ crng_fast_load+0xde/0xe0
+ add_interrupt_randomness+0x16c/0x1a0
+ irq_thread+0x15c/0x1e0
+ kthread+0x112/0x150
+ ret_from_fork+0x31/0x40
+
+ -> #2 (primary_crng.lock){+.+...}:
+ lock_acquire+0xac/0x240
+ rt_spin_lock+0x46/0x50
+ _extract_crng+0x39/0xa0
+ extract_crng+0x3a/0x40
+ get_random_u32+0x120/0x190
+ new_slab+0x1d6/0x7c0
+ ___slab_alloc+0x30b/0x6f0
+ __slab_alloc.isra.78+0x6c/0xc0
+ __kmalloc+0x254/0x3a0
+ pcpu_mem_zalloc+0x3a/0x70
+ percpu_init_late+0x4f/0x8a
+ start_kernel+0x1ec/0x3b8
+ x86_64_start_reservations+0x2a/0x2c
+ x86_64_start_kernel+0x13d/0x14c
+ verify_cpu+0x0/0xfc
+
+ -> #1 ((batched_entropy_u32_lock).lock){+.+...}:
+ lock_acquire+0xac/0x240
+ rt_spin_lock__no_mg+0x41/0x50
+ get_random_u32+0x64/0x190
+ new_slab+0x1d6/0x7c0
+ ___slab_alloc+0x30b/0x6f0
+ __slab_alloc.isra.78+0x6c/0xc0
+ kmem_cache_alloc+0x26a/0x370
+ __debug_object_init+0x325/0x460
+ debug_object_activate+0x11c/0x1f0
+ __queue_work+0x2c/0x770
+ queue_work_on+0x12a/0x190
+ serio_queue_event+0xd3/0x140
+ __serio_register_port+0x17e/0x1a0
+ i8042_probe+0x623/0x687
+ platform_drv_probe+0x36/0x90
+ driver_probe_device+0x1f8/0x2e0
+ __driver_attach+0x96/0xa0
+ bus_for_each_dev+0x5d/0x90
+ driver_attach+0x19/0x20
+ bus_add_driver+0x125/0x220
+ driver_register+0x5b/0xd0
+ __platform_driver_probe+0x5b/0x120
+ __platform_create_bundle+0xaa/0xd0
+ i8042_init+0x3f1/0x430
+ do_one_initcall+0x3e/0x180
+ kernel_init_freeable+0x212/0x295
+ kernel_init+0x9/0x100
+ ret_from_fork+0x31/0x40
+
+ -> #0 ((pendingb_lock).lock){+.+...}:
+ __lock_acquire+0x11b4/0x1320
+ lock_acquire+0xac/0x240
+ rt_spin_lock+0x46/0x50
+ queue_work_on+0x5d/0x190
+ tty_flip_buffer_push+0x26/0x30
+ serial8250_rx_chars+0x120/0x1f0
+ serial8250_handle_irq.part.27+0x58/0xb0
+ serial8250_default_handle_irq+0x4b/0x60
+ serial8250_interrupt+0x5f/0xd0
+ irq_forced_thread_fn+0x1e/0x70
+ irq_thread+0x137/0x1e0
+ kthread+0x112/0x150
+ ret_from_fork+0x31/0x40
+
+ other info that might help us debug this:
+
+ Chain exists of:
+ (pendingb_lock).lock --> primary_crng.lock --> &port_lock_key
+
+ Possible unsafe locking scenario:
+
+ CPU0 CPU1
+ ---- ----
+ lock(&port_lock_key);
+ lock(primary_crng.lock);
+ lock(&port_lock_key);
+ lock((pendingb_lock).lock);
+
+ *** DEADLOCK ***
+
+ 2 locks held by irq/4-serial/512:
+ #0: (&i->lock){+.+...}, at: [<ffffffff815b0400>] serial8250_interrupt+0x30/0xd0
+ #1: (&port_lock_key){+.+...}, at: [<ffffffff815b4bb6>] serial8250_handle_irq.part.27+0x16/0xb0
+
+ stack backtrace:
+ CPU: 4 PID: 512 Comm: irq/4-serial Not tainted 4.11.3-rt0+ #101
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.10.2-1 04/01/2014
+ Call Trace:
+ dump_stack+0x86/0xc1
+ print_circular_bug+0x1be/0x210
+ __lock_acquire+0x11b4/0x1320
+ lock_acquire+0xac/0x240
+ rt_spin_lock+0x46/0x50
+ queue_work_on+0x5d/0x190
+ tty_flip_buffer_push+0x26/0x30
+ serial8250_rx_chars+0x120/0x1f0
+ serial8250_handle_irq.part.27+0x58/0xb0
+ serial8250_default_handle_irq+0x4b/0x60
+ serial8250_interrupt+0x5f/0xd0
+ irq_forced_thread_fn+0x1e/0x70
+ irq_thread+0x137/0x1e0
+ kthread+0x112/0x150
+ ret_from_fork+0x31/0x40
+
+It should work if we delay that printk after dropping the lock but we
+also could skip it.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/char/random.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -802,7 +802,7 @@ static int crng_fast_load(const char *cp
+ if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
+ crng_init = 1;
+ wake_up_interruptible(&crng_init_wait);
+- pr_notice("random: fast init done\n");
++ /* pr_notice("random: fast init done\n"); */
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ return 1;
+@@ -840,7 +840,7 @@ static void crng_reseed(struct crng_stat
+ crng_init = 2;
+ process_random_ready_list();
+ wake_up_interruptible(&crng_init_wait);
+- pr_notice("random: crng init done\n");
++ /* pr_notice("random: crng init done\n"); */
+ }
+ spin_unlock_irqrestore(&primary_crng.lock, flags);
+ }
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index e6e25643a17a..d7aeb69a61db 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -14,17 +14,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/completion.h | 9 ++---
include/linux/suspend.h | 6 +++
include/linux/swait.h | 1
- include/linux/uprobes.h | 1
kernel/power/hibernate.c | 7 ++++
kernel/power/suspend.c | 5 +++
kernel/sched/completion.c | 32 ++++++++++----------
kernel/sched/core.c | 10 +++++-
kernel/sched/swait.c | 20 ++++++++++++
- 12 files changed, 72 insertions(+), 27 deletions(-)
+ 11 files changed, 71 insertions(+), 27 deletions(-)
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
-@@ -697,7 +697,7 @@ static void ezusb_req_ctx_wait(struct ez
+@@ -696,7 +696,7 @@ static void ezusb_req_ctx_wait(struct ez
while (!ctx->done.done && msecs--)
udelay(1000);
} else {
@@ -35,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1593,7 +1593,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1594,7 +1594,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -46,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
-@@ -346,7 +346,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -345,7 +345,7 @@ ep_io (struct ep_data *epdata, void *buf
spin_unlock_irq (&epdata->dev->lock);
if (likely (value == 0)) {
@@ -55,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (value != 0) {
spin_lock_irq (&epdata->dev->lock);
if (likely (epdata->ep != NULL)) {
-@@ -355,7 +355,7 @@ ep_io (struct ep_data *epdata, void *buf
+@@ -354,7 +354,7 @@ ep_io (struct ep_data *epdata, void *buf
usb_ep_dequeue (epdata->ep, epdata->req);
spin_unlock_irq (&epdata->dev->lock);
@@ -124,19 +123,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
---- a/include/linux/uprobes.h
-+++ b/include/linux/uprobes.h
-@@ -27,6 +27,7 @@
- #include <linux/errno.h>
- #include <linux/rbtree.h>
- #include <linux/types.h>
-+#include <linux/wait.h>
-
- struct vm_area_struct;
- struct mm_struct;
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -683,6 +683,10 @@ static int load_image_and_restore(void)
+@@ -679,6 +679,10 @@ static int load_image_and_restore(void)
return error;
}
@@ -147,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* hibernate - Carry out system hibernation, including saving the image.
*/
-@@ -696,6 +700,8 @@ int hibernate(void)
+@@ -692,6 +696,8 @@ int hibernate(void)
return -EPERM;
}
@@ -156,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -773,6 +779,7 @@ int hibernate(void)
+@@ -769,6 +775,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
@@ -166,7 +155,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -531,6 +531,8 @@ static int enter_state(suspend_state_t s
+@@ -546,6 +546,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -175,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -545,6 +547,8 @@ int pm_suspend(suspend_state_t state)
+@@ -560,6 +562,8 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
@@ -184,7 +173,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = enter_state(state);
if (error) {
suspend_stats.fail++;
-@@ -552,6 +556,7 @@ int pm_suspend(suspend_state_t state)
+@@ -567,6 +571,7 @@ int pm_suspend(suspend_state_t state)
} else {
suspend_stats.success++;
}
@@ -194,13 +183,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(pm_suspend);
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
-@@ -30,10 +30,10 @@ void complete(struct completion *x)
+@@ -31,11 +31,11 @@ void complete(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done++;
+ if (x->done != UINT_MAX)
+ x->done++;
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ swake_up_locked(&x->wait);
@@ -208,13 +198,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete);
-@@ -50,10 +50,10 @@ void complete_all(struct completion *x)
+@@ -52,10 +52,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
- x->done += UINT_MAX/2;
+ x->done = UINT_MAX;
- __wake_up_locked(&x->wait, TASK_NORMAL, 0);
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ swake_up_all_locked(&x->wait);
@@ -222,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete_all);
-@@ -62,20 +62,20 @@ do_wait_for_common(struct completion *x,
+@@ -64,20 +64,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
@@ -248,7 +238,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!x->done)
return timeout;
}
-@@ -89,9 +89,9 @@ static inline long __sched
+@@ -92,9 +92,9 @@ static inline long __sched
{
might_sleep();
@@ -260,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return timeout;
}
-@@ -277,12 +277,12 @@ bool try_wait_for_completion(struct comp
+@@ -280,12 +280,12 @@ bool try_wait_for_completion(struct comp
if (!READ_ONCE(x->done))
return 0;
@@ -268,14 +258,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
- else
+ else if (x->done != UINT_MAX)
x->done--;
- spin_unlock_irqrestore(&x->wait.lock, flags);
+ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
-@@ -311,7 +311,7 @@ bool completion_done(struct completion *
+@@ -314,7 +314,7 @@ bool completion_done(struct completion *
* after it's acquired the lock.
*/
smp_rmb();
@@ -286,9 +276,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3323,7 +3323,10 @@ void migrate_disable(void)
+@@ -7476,7 +7476,10 @@ void migrate_disable(void)
+ return;
}
-
#ifdef CONFIG_SCHED_DEBUG
- WARN_ON_ONCE(p->migrate_disable_atomic);
+ if (unlikely(p->migrate_disable_atomic)) {
@@ -298,7 +288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -3350,7 +3353,10 @@ void migrate_enable(void)
+@@ -7509,7 +7512,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -308,12 +298,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ WARN_ON_ONCE(1);
+ }
#endif
- WARN_ON_ONCE(p->migrate_disable <= 0);
+ WARN_ON_ONCE(p->migrate_disable <= 0);
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
@@ -1,5 +1,6 @@
- #include <linux/sched.h>
+ #include <linux/sched/signal.h>
#include <linux/swait.h>
+#include <linux/suspend.h>
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index b810dbbd0b3e..5147d94b2175 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -91,7 +91,11 @@
+@@ -117,7 +117,11 @@
/*
* The preempt_count offset after spin_lock()
*/
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 5475b66575de..407dbc8bff17 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3372,12 +3372,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1514,12 +1514,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -5092,6 +5092,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -5096,6 +5096,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -5105,6 +5106,7 @@ int __sched __cond_resched_softirq(void)
+@@ -5109,6 +5110,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
index ff69c1662d95..c2e77cee0989 100644
--- a/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
+++ b/patches/cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -260,6 +260,14 @@ struct hotplug_pcp {
+@@ -255,6 +255,14 @@ struct hotplug_pcp {
int grab_lock;
struct completion synced;
#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
index 8d3c7d0febf3..cf7c6bd5bc44 100644
--- a/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
+++ b/patches/cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -210,10 +210,16 @@ static int cpu_hotplug_disabled;
+@@ -205,10 +205,16 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -226,12 +232,24 @@ static struct {
+@@ -221,12 +227,24 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -268,8 +286,8 @@ void pin_current_cpu(void)
+@@ -263,8 +281,8 @@ void pin_current_cpu(void)
return;
}
preempt_enable();
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -342,9 +360,9 @@ void get_online_cpus(void)
+@@ -337,9 +355,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -397,11 +415,11 @@ void cpu_hotplug_begin(void)
+@@ -392,11 +410,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -410,7 +428,7 @@ void cpu_hotplug_begin(void)
+@@ -405,7 +423,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
diff --git a/patches/cpu-rt-rework-cpu-down.patch b/patches/cpu-rt-rework-cpu-down.patch
index 00b2a2b57fcf..4b106ced444a 100644
--- a/patches/cpu-rt-rework-cpu-down.patch
+++ b/patches/cpu-rt-rework-cpu-down.patch
@@ -56,18 +56,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -2479,6 +2479,10 @@ extern void do_set_cpus_allowed(struct t
-
- extern int set_cpus_allowed_ptr(struct task_struct *p,
- const struct cpumask *new_mask);
+@@ -1346,6 +1346,10 @@ extern int task_can_attach(struct task_s
+ #ifdef CONFIG_SMP
+ extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+ extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
#else
- static inline void do_set_cpus_allowed(struct task_struct *p,
- const struct cpumask *new_mask)
-@@ -2491,6 +2495,9 @@ static inline int set_cpus_allowed_ptr(s
+ static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
+ {
+@@ -1356,6 +1360,9 @@ static inline int set_cpus_allowed_ptr(s
return -EINVAL;
return 0;
}
@@ -76,10 +76,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static inline void tell_sched_cpu_down_done(int cpu) { }
#endif
- #ifdef CONFIG_NO_HZ_COMMON
+ #ifndef cpu_relax_yield
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -210,16 +210,10 @@ static int cpu_hotplug_disabled;
+@@ -205,16 +205,10 @@ static int cpu_hotplug_disabled;
static struct {
struct task_struct *active_writer;
@@ -96,7 +96,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Also blocks the new readers during
* an ongoing cpu hotplug operation.
-@@ -232,24 +226,12 @@ static struct {
+@@ -227,24 +221,12 @@ static struct {
} cpu_hotplug = {
.active_writer = NULL,
.wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
#define cpuhp_lock_acquire_tryread() \
-@@ -257,12 +239,42 @@ static struct {
+@@ -252,12 +234,42 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -164,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
/**
-@@ -276,18 +288,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -271,18 +283,39 @@ static DEFINE_PER_CPU(struct hotplug_pcp
void pin_current_cpu(void)
{
struct hotplug_pcp *hp;
@@ -208,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
goto retry;
}
-@@ -308,26 +341,84 @@ void unpin_current_cpu(void)
+@@ -303,26 +336,84 @@ void unpin_current_cpu(void)
wake_up_process(hp->unplug);
}
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -335,23 +426,83 @@ static int sync_unplug_thread(void *data
+@@ -330,23 +421,83 @@ static int sync_unplug_thread(void *data
static int cpu_unplug_begin(unsigned int cpu)
{
struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
@@ -391,7 +391,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void get_online_cpus(void)
-@@ -360,9 +511,9 @@ void get_online_cpus(void)
+@@ -355,9 +506,9 @@ void get_online_cpus(void)
if (cpu_hotplug.active_writer == current)
return;
cpuhp_lock_acquire_read();
@@ -403,7 +403,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(get_online_cpus);
-@@ -415,11 +566,11 @@ void cpu_hotplug_begin(void)
+@@ -410,11 +561,11 @@ void cpu_hotplug_begin(void)
cpuhp_lock_acquire();
for (;;) {
@@ -417,7 +417,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
schedule();
}
finish_wait(&cpu_hotplug.wq, &wait);
-@@ -428,7 +579,7 @@ void cpu_hotplug_begin(void)
+@@ -423,7 +574,7 @@ void cpu_hotplug_begin(void)
void cpu_hotplug_done(void)
{
cpu_hotplug.active_writer = NULL;
@@ -426,7 +426,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_lock_release();
}
-@@ -907,6 +1058,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -806,6 +957,9 @@ static int takedown_cpu(unsigned int cpu
kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
smpboot_park_threads(cpu);
@@ -438,8 +438,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* interrupt affinities.
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1140,6 +1140,84 @@ void do_set_cpus_allowed(struct task_str
- set_curr_task(rq, p);
+@@ -1111,6 +1111,84 @@ void do_set_cpus_allowed(struct task_str
+ __do_set_cpus_allowed_tail(p, new_mask);
}
+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks);
@@ -479,7 +479,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct task_struct *p = current;
+ struct migration_arg arg;
+ struct cpumask *cpumask;
-+ struct cpumask *mask;
++ const struct cpumask *mask;
+ unsigned int dest_cpu;
+ struct rq_flags rf;
+ struct rq *rq;
@@ -495,7 +495,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ rq = task_rq_lock(p, &rf);
+
+ cpumask = this_cpu_ptr(&sched_cpumasks);
-+ mask = &p->cpus_allowed;
++ mask = p->cpus_ptr;
+
+ cpumask_andnot(cpumask, mask, &sched_down_cpumask);
+
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 574893c165a5..162a2ef7ae6c 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1649,12 +1649,13 @@ void hrtimer_init_sleeper(struct hrtimer
+@@ -1622,12 +1622,13 @@ void hrtimer_init_sleeper(struct hrtimer
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1696,7 +1697,8 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1669,7 +1670,8 @@ long __sched hrtimer_nanosleep_restart(s
HRTIMER_MODE_ABS);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
rmtp = restart->nanosleep.rmtp;
-@@ -1713,8 +1715,10 @@ long __sched hrtimer_nanosleep_restart(s
+@@ -1686,8 +1688,10 @@ long __sched hrtimer_nanosleep_restart(s
return ret;
}
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1727,7 +1731,7 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1700,7 +1704,7 @@ long hrtimer_nanosleep(struct timespec *
hrtimer_init_on_stack(&t.timer, clockid, mode);
hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
/* Absolute timers do not update the rmtp value and restart: */
-@@ -1754,6 +1758,12 @@ long hrtimer_nanosleep(struct timespec *
+@@ -1727,6 +1731,12 @@ long hrtimer_nanosleep(struct timespec *
return ret;
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1780,7 +1790,8 @@ void cpu_chill(void)
+@@ -1753,7 +1763,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
diff --git a/patches/cpu_down_move_migrate_enable_back.patch b/patches/cpu_down_move_migrate_enable_back.patch
index c72544630f49..8846874a132e 100644
--- a/patches/cpu_down_move_migrate_enable_back.patch
+++ b/patches/cpu_down_move_migrate_enable_back.patch
@@ -34,7 +34,7 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1195,6 +1195,7 @@ static int __ref _cpu_down(unsigned int
+@@ -1084,6 +1084,7 @@ static int __ref _cpu_down(unsigned int
goto restore_cpus;
}
@@ -42,11 +42,11 @@ Signed-off-by: Tiejun Chen <tiejun.chen@windriver.com>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
if (ret) {
-@@ -1242,7 +1243,6 @@ static int __ref _cpu_down(unsigned int
+@@ -1130,7 +1131,6 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
- migrate_enable();
- /* This post dead nonsense must die */
- if (!ret && hasdied)
- cpu_notify_nofail(CPU_POST_DEAD, cpu);
+ restore_cpus:
+ set_cpus_allowed_ptr(current, cpumask_org);
+ free_cpumask_var(cpumask_org);
diff --git a/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch b/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
index 7ab6f98f1b5c..6b60722a512f 100644
--- a/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
+++ b/patches/cpufreq-drop-K8-s-driver-from-beeing-selected.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
-@@ -124,7 +124,7 @@ config X86_POWERNOW_K7_ACPI
+@@ -125,7 +125,7 @@ config X86_POWERNOW_K7_ACPI
config X86_POWERNOW_K8
tristate "AMD Opteron/Athlon64 PowerNow!"
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index e36168fbe8da..bdf44b5c8213 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -900,7 +900,7 @@ config IOMMU_HELPER
+@@ -908,7 +908,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -400,6 +400,7 @@ config CHECK_SIGNATURE
+@@ -409,6 +409,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 2fa2b8630019..ee674f25a519 100644
--- a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -45,12 +45,12 @@ Cc: stable-rt@vger.kernel.org
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/cpuset.c | 66 ++++++++++++++++++++++++++++----------------------------
+ kernel/cgroup/cpuset.c | 66 ++++++++++++++++++++++++-------------------------
1 file changed, 33 insertions(+), 33 deletions(-)
---- a/kernel/cpuset.c
-+++ b/kernel/cpuset.c
-@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -286,7 +286,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -907,9 +907,9 @@ static void update_cpumasks_hier(struct
+@@ -909,9 +909,9 @@ static void update_cpumasks_hier(struct
continue;
rcu_read_unlock();
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -974,9 +974,9 @@ static int update_cpumask(struct cpuset
+@@ -976,9 +976,9 @@ static int update_cpumask(struct cpuset
if (retval < 0)
return retval;
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
-@@ -1176,9 +1176,9 @@ static void update_nodemasks_hier(struct
+@@ -1178,9 +1178,9 @@ static void update_nodemasks_hier(struct
continue;
rcu_read_unlock();
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1246,9 +1246,9 @@ static int update_nodemask(struct cpuset
+@@ -1248,9 +1248,9 @@ static int update_nodemask(struct cpuset
if (retval < 0)
goto done;
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1339,9 +1339,9 @@ static int update_flag(cpuset_flagbits_t
+@@ -1341,9 +1341,9 @@ static int update_flag(cpuset_flagbits_t
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -1756,7 +1756,7 @@ static int cpuset_common_seq_show(struct
+@@ -1758,7 +1758,7 @@ static int cpuset_common_seq_show(struct
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
switch (type) {
case FILE_CPULIST:
-@@ -1775,7 +1775,7 @@ static int cpuset_common_seq_show(struct
+@@ -1777,7 +1777,7 @@ static int cpuset_common_seq_show(struct
ret = -EINVAL;
}
@@ -137,7 +137,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1989,12 +1989,12 @@ static int cpuset_css_online(struct cgro
+@@ -1991,12 +1991,12 @@ static int cpuset_css_online(struct cgro
cpuset_inc();
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2021,12 +2021,12 @@ static int cpuset_css_online(struct cgro
+@@ -2023,12 +2023,12 @@ static int cpuset_css_online(struct cgro
}
rcu_read_unlock();
@@ -167,7 +167,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
-@@ -2065,7 +2065,7 @@ static void cpuset_css_free(struct cgrou
+@@ -2067,7 +2067,7 @@ static void cpuset_css_free(struct cgrou
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
@@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2076,7 +2076,7 @@ static void cpuset_bind(struct cgroup_su
+@@ -2078,7 +2078,7 @@ static void cpuset_bind(struct cgroup_su
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&cpuset_mutex);
}
-@@ -2177,12 +2177,12 @@ hotplug_update_tasks_legacy(struct cpuse
+@@ -2179,12 +2179,12 @@ hotplug_update_tasks_legacy(struct cpuse
{
bool is_empty;
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2219,10 +2219,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -2221,10 +2221,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -2308,21 +2308,21 @@ static void cpuset_hotplug_workfn(struct
+@@ -2310,21 +2310,21 @@ static void cpuset_hotplug_workfn(struct
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_tasks_nodemask(&top_cpuset);
}
-@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_str
+@@ -2422,11 +2422,11 @@ void cpuset_cpus_allowed(struct task_str
{
unsigned long flags;
@@ -253,7 +253,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct ta
+@@ -2474,11 +2474,11 @@ nodemask_t cpuset_mems_allowed(struct ta
nodemask_t mask;
unsigned long flags;
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return mask;
}
-@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp
+@@ -2570,14 +2570,14 @@ bool __cpuset_node_allowed(int node, gfp
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
diff --git a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index 0416d434c06f..2dd404c05764 100644
--- a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -203,7 +203,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (walk.nbytes) {
glue_ctr_crypt_final_128bit(
gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
-@@ -347,7 +346,7 @@ int glue_xts_crypt_128bit(const struct c
+@@ -382,7 +381,7 @@ int glue_xts_crypt_128bit(const struct c
void *tweak_ctx, void *crypt_ctx)
{
const unsigned int bsize = 128 / 8;
@@ -212,7 +212,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct blkcipher_walk walk;
int err;
-@@ -360,21 +359,21 @@ int glue_xts_crypt_128bit(const struct c
+@@ -395,21 +394,21 @@ int glue_xts_crypt_128bit(const struct c
/* set minimum length to bsize, for tweak_fn */
fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
diff --git a/patches/debugobjects-rt.patch b/patches/debugobjects-rt.patch
index 571317237f9f..2ab2aad54bde 100644
--- a/patches/debugobjects-rt.patch
+++ b/patches/debugobjects-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
-@@ -308,7 +308,10 @@ static void
+@@ -334,7 +334,10 @@ static void
struct debug_obj *obj;
unsigned long flags;
diff --git a/patches/delayacct-use-raw_spinlocks.patch b/patches/delayacct-use-raw_spinlocks.patch
new file mode 100644
index 000000000000..33018e7e1222
--- /dev/null
+++ b/patches/delayacct-use-raw_spinlocks.patch
@@ -0,0 +1,81 @@
+From 2c887ccff27de53f76fbdedc0afea9fa3be3ea2f Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 20 May 2017 12:32:23 +0200
+Subject: [PATCH] delayacct: use raw_spinlocks
+
+try_to_wake_up() might invoke delayacct_blkio_end() while holding the
+pi_lock. The lock is only held for a short amount of time so it should
+be safe to make it raw.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/delayacct.h | 2 +-
+ kernel/delayacct.c | 14 +++++++-------
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+--- a/include/linux/delayacct.h
++++ b/include/linux/delayacct.h
+@@ -29,7 +29,7 @@
+
+ #ifdef CONFIG_TASK_DELAY_ACCT
+ struct task_delay_info {
+- spinlock_t lock;
++ raw_spinlock_t lock;
+ unsigned int flags; /* Private per-task flags */
+
+ /* For each stat XXX, add following, aligned appropriately
+--- a/kernel/delayacct.c
++++ b/kernel/delayacct.c
+@@ -44,7 +44,7 @@ void __delayacct_tsk_init(struct task_st
+ {
+ tsk->delays = kmem_cache_zalloc(delayacct_cache, GFP_KERNEL);
+ if (tsk->delays)
+- spin_lock_init(&tsk->delays->lock);
++ raw_spin_lock_init(&tsk->delays->lock);
+ }
+
+ /*
+@@ -57,10 +57,10 @@ static void delayacct_end(u64 *start, u6
+ unsigned long flags;
+
+ if (ns > 0) {
+- spin_lock_irqsave(&current->delays->lock, flags);
++ raw_spin_lock_irqsave(&current->delays->lock, flags);
+ *total += ns;
+ (*count)++;
+- spin_unlock_irqrestore(&current->delays->lock, flags);
++ raw_spin_unlock_irqrestore(&current->delays->lock, flags);
+ }
+ }
+
+@@ -119,7 +119,7 @@ int __delayacct_add_tsk(struct taskstats
+
+ /* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
+
+- spin_lock_irqsave(&tsk->delays->lock, flags);
++ raw_spin_lock_irqsave(&tsk->delays->lock, flags);
+ tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
+ d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
+ tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
+@@ -129,7 +129,7 @@ int __delayacct_add_tsk(struct taskstats
+ d->blkio_count += tsk->delays->blkio_count;
+ d->swapin_count += tsk->delays->swapin_count;
+ d->freepages_count += tsk->delays->freepages_count;
+- spin_unlock_irqrestore(&tsk->delays->lock, flags);
++ raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
+
+ return 0;
+ }
+@@ -139,10 +139,10 @@ int __delayacct_add_tsk(struct taskstats
+ __u64 ret;
+ unsigned long flags;
+
+- spin_lock_irqsave(&tsk->delays->lock, flags);
++ raw_spin_lock_irqsave(&tsk->delays->lock, flags);
+ ret = nsec_to_clock_t(tsk->delays->blkio_delay +
+ tsk->delays->swapin_delay);
+- spin_unlock_irqrestore(&tsk->delays->lock, flags);
++ raw_spin_unlock_irqrestore(&tsk->delays->lock, flags);
+ return ret;
+ }
+
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index a4d929a38c46..0ec19f8c25a2 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
-@@ -842,7 +842,7 @@ static void dm_old_request_fn(struct req
+@@ -667,7 +667,7 @@ static void dm_old_request_fn(struct req
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 0af912539047..6b2930760412 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -528,6 +528,8 @@ static struct zram_meta *zram_meta_alloc
+@@ -461,6 +461,8 @@ static struct zram_meta *zram_meta_alloc
goto out_error;
}
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return meta;
out_error:
-@@ -576,12 +578,12 @@ static int zram_decompress_page(struct z
+@@ -511,12 +513,12 @@ static int zram_decompress_page(struct z
unsigned long handle;
unsigned int size;
@@ -33,13 +33,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
handle = meta->table[index].handle;
size = zram_get_obj_size(meta, index);
- if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
+ if (!handle || zram_test_flag(meta, index, ZRAM_SAME)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
- memset(mem, 0, PAGE_SIZE);
+ zram_fill_page(mem, PAGE_SIZE, meta->table[index].element);
return 0;
}
-@@ -596,7 +598,7 @@ static int zram_decompress_page(struct z
+@@ -531,7 +533,7 @@ static int zram_decompress_page(struct z
zcomp_stream_put(zram->comp);
}
zs_unmap_object(meta->mem_pool, handle);
@@ -48,17 +48,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Should NEVER happen. Return bio error if it does. */
if (unlikely(ret)) {
-@@ -616,14 +618,14 @@ static int zram_bvec_read(struct zram *z
+@@ -551,14 +553,14 @@ static int zram_bvec_read(struct zram *z
struct zram_meta *meta = zram->meta;
page = bvec->bv_page;
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
if (unlikely(!meta->table[index].handle) ||
- zram_test_flag(meta, index, ZRAM_ZERO)) {
+ zram_test_flag(meta, index, ZRAM_SAME)) {
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
- handle_zero_page(bvec);
+ handle_same_page(bvec, meta->table[index].element);
return 0;
}
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
@@ -66,20 +66,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (is_partial_io(bvec))
/* Use a temporary buffer to decompress the page */
-@@ -700,10 +702,10 @@ static int zram_bvec_write(struct zram *
+@@ -636,11 +638,11 @@ static int zram_bvec_write(struct zram *
if (user_mem)
kunmap_atomic(user_mem);
/* Free memory associated with this sector now. */
- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_lock_table(&meta->table[index]);
zram_free_page(zram, index);
- zram_set_flag(meta, index, ZRAM_ZERO);
+ zram_set_flag(meta, index, ZRAM_SAME);
+ zram_set_element(meta, index, element);
- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
+ zram_unlock_table(&meta->table[index]);
- atomic64_inc(&zram->stats.zero_pages);
+ atomic64_inc(&zram->stats.same_pages);
ret = 0;
-@@ -794,12 +796,12 @@ static int zram_bvec_write(struct zram *
+@@ -731,12 +733,12 @@ static int zram_bvec_write(struct zram *
* Free memory associated with this sector
* before overwriting unused sectors.
*/
@@ -94,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Update stats */
atomic64_add(clen, &zram->stats.compr_data_size);
-@@ -842,9 +844,9 @@ static void zram_bio_discard(struct zram
+@@ -779,9 +781,9 @@ static void zram_bio_discard(struct zram
}
while (n >= PAGE_SIZE) {
@@ -106,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic64_inc(&zram->stats.notify_free);
index++;
n -= PAGE_SIZE;
-@@ -973,9 +975,9 @@ static void zram_slot_free_notify(struct
+@@ -905,9 +907,9 @@ static void zram_slot_free_notify(struct
zram = bdev->bd_disk->private_data;
meta = zram->meta;
@@ -120,9 +121,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -73,6 +73,9 @@ enum zram_pageflags {
- struct zram_table_entry {
- unsigned long handle;
+@@ -76,6 +76,9 @@ struct zram_table_entry {
+ unsigned long element;
+ };
unsigned long value;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ spinlock_t lock;
diff --git a/patches/drivers-net-8139-disable-irq-nosync.patch b/patches/drivers-net-8139-disable-irq-nosync.patch
index 842d45fcd1db..82cd2f3a0f0b 100644
--- a/patches/drivers-net-8139-disable-irq-nosync.patch
+++ b/patches/drivers-net-8139-disable-irq-nosync.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
-@@ -2233,7 +2233,7 @@ static void rtl8139_poll_controller(stru
+@@ -2223,7 +2223,7 @@ static void rtl8139_poll_controller(stru
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
diff --git a/patches/drivers-net-vortex-fix-locking-issues.patch b/patches/drivers-net-vortex-fix-locking-issues.patch
index 543333ebb59a..54eb60b0543d 100644
--- a/patches/drivers-net-vortex-fix-locking-issues.patch
+++ b/patches/drivers-net-vortex-fix-locking-issues.patch
@@ -31,7 +31,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#endif
-@@ -1910,12 +1910,12 @@ static void vortex_tx_timeout(struct net
+@@ -1908,12 +1908,12 @@ static void vortex_tx_timeout(struct net
* Block interrupts because vortex_interrupt does a bare spin_lock()
*/
unsigned long flags;
diff --git a/patches/drivers-random-reduce-preempt-disabled-region.patch b/patches/drivers-random-reduce-preempt-disabled-region.patch
index c41b980a45ec..6cfd0048eae5 100644
--- a/patches/drivers-random-reduce-preempt-disabled-region.patch
+++ b/patches/drivers-random-reduce-preempt-disabled-region.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1028,8 +1028,6 @@ static void add_timer_randomness(struct
+@@ -1017,8 +1017,6 @@ static void add_timer_randomness(struct
} sample;
long delta, delta2, delta3;
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
sample.jiffies = jiffies;
sample.cycles = random_get_entropy();
sample.num = num;
-@@ -1070,7 +1068,6 @@ static void add_timer_randomness(struct
+@@ -1059,7 +1057,6 @@ static void add_timer_randomness(struct
*/
credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
}
diff --git a/patches/drivers-tty-fix-omap-lock-crap.patch b/patches/drivers-tty-fix-omap-lock-crap.patch
index b6a54199b9e9..5a66912fe84b 100644
--- a/patches/drivers-tty-fix-omap-lock-crap.patch
+++ b/patches/drivers-tty-fix-omap-lock-crap.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
-@@ -1257,13 +1257,10 @@ serial_omap_console_write(struct console
+@@ -1312,13 +1312,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(up->dev);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the IER then disable the interrupts
-@@ -1292,8 +1289,7 @@ serial_omap_console_write(struct console
+@@ -1347,8 +1344,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
diff --git a/patches/drivers-tty-pl011-irq-disable-madness.patch b/patches/drivers-tty-pl011-irq-disable-madness.patch
index 1f0748cbd370..76d6db60c33f 100644
--- a/patches/drivers-tty-pl011-irq-disable-madness.patch
+++ b/patches/drivers-tty-pl011-irq-disable-madness.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
-@@ -2194,13 +2194,19 @@ pl011_console_write(struct console *co,
+@@ -2222,13 +2222,19 @@ pl011_console_write(struct console *co,
clk_enable(uap->clk);
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the CR then disable the interrupts
-@@ -2224,8 +2230,7 @@ pl011_console_write(struct console *co,
+@@ -2254,8 +2260,7 @@ pl011_console_write(struct console *co,
pl011_write(old_cr, uap, REG_CR);
if (locked)
diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index b8c2ead205c2..92d15f6af596 100644
--- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -42,14 +42,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int zcomp_compress(struct zcomp_strm *zstrm,
-@@ -174,6 +181,7 @@ static int __zcomp_cpu_notifier(struct z
- pr_err("Can't allocate a compression stream\n");
- return NOTIFY_BAD;
- }
-+ spin_lock_init(&zstrm->zcomp_lock);
- *per_cpu_ptr(comp->stream, cpu) = zstrm;
- break;
- case CPU_DEAD:
+@@ -173,6 +180,7 @@ int zcomp_cpu_up_prepare(unsigned int cp
+ pr_err("Can't allocate a compression stream\n");
+ return -ENOMEM;
+ }
++ spin_lock_init(&zstrm->zcomp_lock);
+ *per_cpu_ptr(comp->stream, cpu) = zstrm;
+ return 0;
+ }
--- a/drivers/block/zram/zcomp.h
+++ b/drivers/block/zram/zcomp.h
@@ -14,6 +14,7 @@ struct zcomp_strm {
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -577,6 +577,7 @@ static int zram_decompress_page(struct z
+@@ -512,6 +512,7 @@ static int zram_decompress_page(struct z
struct zram_meta *meta = zram->meta;
unsigned long handle;
unsigned int size;
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
zram_lock_table(&meta->table[index]);
handle = meta->table[index].handle;
-@@ -588,16 +589,15 @@ static int zram_decompress_page(struct z
+@@ -523,16 +524,15 @@ static int zram_decompress_page(struct z
return 0;
}
diff --git a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
index bf041ca33200..91240516fae1 100644
--- a/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
+++ b/patches/drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
-@@ -1489,7 +1489,9 @@ execbuf_submit(struct i915_execbuffer_pa
+@@ -1445,7 +1445,9 @@ execbuf_submit(struct i915_execbuffer_pa
if (ret)
return ret;
diff --git a/patches/drm-i915-init-spinlock-properly-on-RT.patch b/patches/drm-i915-init-spinlock-properly-on-RT.patch
new file mode 100644
index 000000000000..a0fbb615dd78
--- /dev/null
+++ b/patches/drm-i915-init-spinlock-properly-on-RT.patch
@@ -0,0 +1,26 @@
+From: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+Date: Mon, 29 May 2017 15:33:52 +0200
+Subject: [PATCH] drm/i915: init spinlock properly on -RT
+
+THe lockinit is opencoded so need to fix it up…
+
+Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+---
+ drivers/gpu/drm/i915/i915_gem_timeline.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/gpu/drm/i915/i915_gem_timeline.c
++++ b/drivers/gpu/drm/i915/i915_gem_timeline.c
+@@ -50,7 +50,12 @@ static int __i915_gem_timeline_init(stru
+ tl->fence_context = fences++;
+ tl->common = timeline;
+ #ifdef CONFIG_DEBUG_SPINLOCK
++# ifdef CONFIG_PREEMPT_RT_FULL
++ rt_mutex_init(&tl->lock.lock);
++ __rt_spin_lock_init(&tl->lock, lockname, lockclass);
++# else
+ __raw_spin_lock_init(&tl->lock.rlock, lockname, lockclass);
++# endif
+ #else
+ spin_lock_init(&tl->lock);
+ #endif
diff --git a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index abc871e3196b..2f8e9da43b15 100644
--- a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -812,6 +812,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -867,6 +867,7 @@ static int i915_get_crtc_scanoutpos(stru
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -863,6 +864,7 @@ static int i915_get_crtc_scanoutpos(stru
+@@ -918,6 +919,7 @@ static int i915_get_crtc_scanoutpos(stru
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index a0cd8a8e48f2..59eaf2091ed7 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#include <linux/swork.h>
#include <asm/kmap_types.h>
- #include <asm/uaccess.h>
+ #include <linux/uaccess.h>
@@ -115,7 +116,7 @@ struct kioctx {
struct page **ring_pages;
long nr_pages;
diff --git a/patches/fs-block-rt-support.patch b/patches/fs-block-rt-support.patch
index cd957c2c68a4..d52294701d18 100644
--- a/patches/fs-block-rt-support.patch
+++ b/patches/fs-block-rt-support.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -233,7 +233,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
+@@ -214,7 +214,7 @@ EXPORT_SYMBOL(blk_start_queue_async);
**/
void blk_start_queue(struct request_queue *q)
{
diff --git a/patches/fs-dcache-include-wait.h.patch b/patches/fs-dcache-include-wait.h.patch
deleted file mode 100644
index ed35f8edddba..000000000000
--- a/patches/fs-dcache-include-wait.h.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 14 Sep 2016 11:55:23 +0200
-Subject: fs/dcache: include wait.h
-
-Since commit d9171b934526 ("parallel lookups machinery, part 4 (and
-last)") dcache.h is using but does not include wait.h. It works as long
-as it is included somehow earlier and fails otherwise.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/dcache.h | 1 +
- 1 file changed, 1 insertion(+)
-
---- a/include/linux/dcache.h
-+++ b/include/linux/dcache.h
-@@ -11,6 +11,7 @@
- #include <linux/rcupdate.h>
- #include <linux/lockref.h>
- #include <linux/stringhash.h>
-+#include <linux/wait.h>
-
- struct path;
- struct vfsmount;
diff --git a/patches/fs-dcache-init-in_lookup_hashtable.patch b/patches/fs-dcache-init-in_lookup_hashtable.patch
index 269cf504a89b..2ff9152cd748 100644
--- a/patches/fs-dcache-init-in_lookup_hashtable.patch
+++ b/patches/fs-dcache-init-in_lookup_hashtable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -3604,6 +3604,11 @@ EXPORT_SYMBOL(d_genocide);
+@@ -3610,6 +3610,11 @@ EXPORT_SYMBOL(d_genocide);
void __init vfs_caches_init_early(void)
{
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index d2162306d92c..0140bb1aec0e 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
-@@ -31,6 +31,7 @@
+@@ -32,6 +32,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto repeat;
}
}
-@@ -2324,7 +2336,7 @@ void d_delete(struct dentry * dentry)
+@@ -2330,7 +2342,7 @@ void d_delete(struct dentry * dentry)
if (dentry->d_lockref.count == 1) {
if (!spin_trylock(&inode->i_lock)) {
spin_unlock(&dentry->d_lock);
@@ -94,9 +94,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/namei.h>
+#include <linux/delay.h>
#include <linux/security.h>
+ #include <linux/cred.h>
#include <linux/idr.h>
- #include <linux/init.h> /* init_rootfs */
-@@ -358,7 +359,7 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -357,7 +358,7 @@ int __mnt_want_write(struct vfsmount *m)
smp_mb();
while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
preempt_enable();
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index 576f3167d6fb..554060950cae 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/dcache.c
+++ b/fs/dcache.c
-@@ -2396,21 +2396,24 @@ static inline void end_dir_add(struct in
+@@ -2402,21 +2402,24 @@ static inline void end_dir_add(struct in
static void d_wait_lookup(struct dentry *dentry)
{
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned int hash = name->hash;
struct hlist_bl_head *b = in_lookup_hash(parent, hash);
-@@ -2519,7 +2522,7 @@ void __d_lookup_done(struct dentry *dent
+@@ -2525,7 +2528,7 @@ void __d_lookup_done(struct dentry *dent
hlist_bl_lock(b);
dentry->d_flags &= ~DCACHE_PAR_LOOKUP;
__hlist_bl_del(&dentry->d_u.d_in_lookup_hash);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -1626,7 +1626,7 @@ static struct dentry *lookup_slow(const
+@@ -1628,7 +1628,7 @@ static struct dentry *lookup_slow(const
{
struct dentry *dentry = ERR_PTR(-ENOENT), *old;
struct inode *inode = dir->d_inode;
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
inode_lock_shared(inode);
/* Don't go there if it's already dead */
-@@ -3083,7 +3083,7 @@ static int lookup_open(struct nameidata
+@@ -3069,7 +3069,7 @@ static int lookup_open(struct nameidata
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -ENOENT;
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -485,7 +485,7 @@ static
+@@ -491,7 +491,7 @@ static
void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -120,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct dentry *dentry;
struct dentry *alias;
struct inode *dir = d_inode(parent);
-@@ -1487,7 +1487,7 @@ int nfs_atomic_open(struct inode *dir, s
+@@ -1493,7 +1493,7 @@ int nfs_atomic_open(struct inode *dir, s
struct file *file, unsigned open_flags,
umode_t mode, int *opened)
{
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
-@@ -1834,7 +1834,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1836,7 +1836,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -632,7 +632,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -665,7 +665,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
-@@ -1490,7 +1490,7 @@ struct nfs_unlinkdata {
+@@ -1513,7 +1513,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
diff --git a/patches/fs-namespace-preemption-fix.patch b/patches/fs-namespace-preemption-fix.patch
index b9434a1f0fd0..8f022a6c6d40 100644
--- a/patches/fs-namespace-preemption-fix.patch
+++ b/patches/fs-namespace-preemption-fix.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/namespace.c
+++ b/fs/namespace.c
-@@ -356,8 +356,11 @@ int __mnt_want_write(struct vfsmount *m)
+@@ -355,8 +355,11 @@ int __mnt_want_write(struct vfsmount *m)
* incremented count after it has set MNT_WRITE_HOLD.
*/
smp_mb();
diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index 32560c308179..2bd5cff05e62 100644
--- a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
-@@ -1802,7 +1802,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1813,7 +1813,11 @@ int nfs_rmdir(struct inode *dir, struct
trace_nfs_rmdir_enter(dir, dentry);
if (d_really_is_positive(dentry)) {
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
error = NFS_PROTO(dir)->rmdir(dir, &dentry->d_name);
/* Ensure the VFS deletes this inode */
switch (error) {
-@@ -1812,7 +1816,11 @@ int nfs_rmdir(struct inode *dir, struct
+@@ -1823,7 +1827,11 @@ int nfs_rmdir(struct inode *dir, struct
case -ENOENT:
nfs_dentry_handle_enoent(dentry);
}
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_nfs_rmdir_exit(dir, dentry, error);
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
-@@ -1957,7 +1957,11 @@ static void init_once(void *foo)
+@@ -1984,7 +1984,11 @@ static void init_once(void *foo)
nfsi->nrequests = 0;
nfsi->commit_info.ncommit = 0;
atomic_set(&nfsi->commit_info.rpcs_out, 0);
@@ -124,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* point dentry is definitely not a root, so we won't need
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
-@@ -165,7 +165,11 @@ struct nfs_inode {
+@@ -161,7 +161,11 @@ struct nfs_inode {
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
diff --git a/patches/fs-ntfs-disable-interrupt-non-rt.patch b/patches/fs-ntfs-disable-interrupt-non-rt.patch
index 811c199e5922..0363487b0bd8 100644
--- a/patches/fs-ntfs-disable-interrupt-non-rt.patch
+++ b/patches/fs-ntfs-disable-interrupt-non-rt.patch
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
-@@ -92,13 +92,13 @@ static void ntfs_end_buffer_async_read(s
+@@ -93,13 +93,13 @@ static void ntfs_end_buffer_async_read(s
ofs = 0;
if (file_ofs < init_size)
ofs = init_size - file_ofs;
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
clear_buffer_uptodate(bh);
-@@ -143,13 +143,13 @@ static void ntfs_end_buffer_async_read(s
+@@ -144,13 +144,13 @@ static void ntfs_end_buffer_async_read(s
recs = PAGE_SIZE / rec_size;
/* Should have been verified before we got here... */
BUG_ON(!recs);
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index e30f00d5fee9..f3e951daf43a 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/buffer.c
+++ b/fs/buffer.c
-@@ -301,8 +301,7 @@ static void end_buffer_async_read(struct
+@@ -303,8 +303,7 @@ static void end_buffer_async_read(struct
* decide that the page is now completely done.
*/
first = page_buffers(page);
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -315,8 +314,7 @@ static void end_buffer_async_read(struct
+@@ -317,8 +316,7 @@ static void end_buffer_async_read(struct
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors and they are all
-@@ -328,9 +326,7 @@ static void end_buffer_async_read(struct
+@@ -330,9 +328,7 @@ static void end_buffer_async_read(struct
return;
still_busy:
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -358,8 +354,7 @@ void end_buffer_async_write(struct buffe
+@@ -360,8 +356,7 @@ void end_buffer_async_write(struct buffe
}
first = page_buffers(page);
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_write(bh);
unlock_buffer(bh);
-@@ -371,15 +366,12 @@ void end_buffer_async_write(struct buffe
+@@ -373,15 +368,12 @@ void end_buffer_async_write(struct buffe
}
tmp = tmp->b_this_page;
}
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3383,6 +3375,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3426,6 +3418,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
recalc_bh_state();
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
-@@ -107,8 +107,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
first = page_buffers(page);
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_buffer_async_read(bh);
unlock_buffer(bh);
tmp = bh;
-@@ -123,8 +122,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -124,8 +123,7 @@ static void ntfs_end_buffer_async_read(s
}
tmp = tmp->b_this_page;
} while (tmp != bh);
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If none of the buffers had errors then we can set the page uptodate,
* but we first have to perform the post read mst fixups, if the
-@@ -159,9 +157,7 @@ static void ntfs_end_buffer_async_read(s
+@@ -160,9 +158,7 @@ static void ntfs_end_buffer_async_read(s
unlock_page(page);
return;
still_busy:
diff --git a/patches/ftrace-Fix-trace-header-alignment.patch b/patches/ftrace-Fix-trace-header-alignment.patch
index 20a019ec397c..670b3d292a75 100644
--- a/patches/ftrace-Fix-trace-header-alignment.patch
+++ b/patches/ftrace-Fix-trace-header-alignment.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2896,17 +2896,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3113,17 +3113,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2935,11 +2935,11 @@ static void print_func_help_header_irq(s
+@@ -3152,11 +3152,11 @@ static void print_func_help_header_irq(s
"# |/ _-----=> need-resched_lazy\n"
"# || / _---=> hardirq/softirq\n"
"# ||| / _--=> preempt-depth\n"
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index d5f6d263ef6e..3236c2a37461 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -56,6 +56,8 @@ struct trace_entry {
+@@ -61,6 +61,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
@@ -23,8 +23,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1909,6 +1909,8 @@ tracing_generic_entry_update(struct trac
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+@@ -1946,6 +1946,8 @@ tracing_generic_entry_update(struct trac
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
+
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -2897,9 +2899,10 @@ static void print_lat_help_header(struct
+@@ -3114,9 +3116,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -432,6 +432,11 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -484,6 +484,11 @@ int trace_print_lat_fmt(struct trace_seq
else
trace_seq_putc(s, '.');
diff --git a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index d0443c81bdaa..70e3601e1aa8 100644
--- a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -909,7 +909,9 @@ void exit_pi_state_list(struct task_stru
+@@ -911,7 +911,9 @@ void exit_pi_state_list(struct task_stru
* task still owns the PI-state:
*/
if (head->next != next) {
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch
index 40dc114e9f45..a6f5311b201b 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/futex-requeue-pi-fix.patch
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -135,7 +135,8 @@ static void fixup_rt_mutex_waiters(struc
+@@ -137,7 +137,8 @@ static void fixup_rt_mutex_waiters(struc
static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
{
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1712,6 +1713,35 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1722,6 +1723,35 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
diff --git a/patches/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch b/patches/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
index 21b716ce5196..476d45149fb2 100644
--- a/patches/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
+++ b/patches/futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1775,12 +1775,14 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1785,12 +1785,14 @@ int rt_mutex_wait_proxy_lock(struct rt_m
int ret;
raw_spin_lock_irq(&lock->wait_lock);
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
-@@ -1812,15 +1814,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1822,15 +1824,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
raw_spin_lock_irq(&lock->wait_lock);
/*
diff --git a/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch b/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
index 66da85792465..9ff5501968c5 100644
--- a/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
+++ b/patches/futex-rtmutex-Cure-RT-double-blocking-issue.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2388,6 +2388,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2408,6 +2408,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -34,11 +34,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-@@ -2399,6 +2400,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2419,6 +2420,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
-+
+ /*
+ * RT has a problem here when the wait got interrupted by a timeout
+ * or a signal. task->pi_blocked_on is still set. The task must
@@ -56,6 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ tsk->pi_blocked_on = NULL;
+ raw_spin_unlock(&tsk->pi_lock);
+ }
++
raw_spin_unlock_irq(&lock->wait_lock);
return ret;
diff --git a/patches/futex-workaround-migrate_disable-enable-in-different.patch b/patches/futex-workaround-migrate_disable-enable-in-different.patch
index b73de813e85d..ea63415d1897 100644
--- a/patches/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/patches/futex-workaround-migrate_disable-enable-in-different.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2669,9 +2669,18 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2671,9 +2671,18 @@ static int futex_lock_pi(u32 __user *uad
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret) {
if (ret == 1)
-@@ -2815,10 +2824,21 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2817,10 +2826,21 @@ static int futex_unlock_pi(u32 __user *u
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
diff --git a/patches/genirq-disable-irqpoll-on-rt.patch b/patches/genirq-disable-irqpoll-on-rt.patch
index 4b0751e6c219..9aa92515d4d2 100644
--- a/patches/genirq-disable-irqpoll-on-rt.patch
+++ b/patches/genirq-disable-irqpoll-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
-@@ -442,6 +442,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -454,6 +458,10 @@ module_param(irqfixup, int, 0644);
+@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index c2d12a021a69..320dd408f8f5 100644
--- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -9,25 +9,10 @@ This patch uses swork_queue() instead.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/scsi/qla2xxx/qla_isr.c | 4 +++
- include/linux/interrupt.h | 6 +++++
- kernel/irq/manage.c | 43 ++++++++++++++++++++++++++++++++++++++---
- 3 files changed, 50 insertions(+), 3 deletions(-)
+ include/linux/interrupt.h | 6 ++++++
+ kernel/irq/manage.c | 43 ++++++++++++++++++++++++++++++++++++++++---
+ 2 files changed, 46 insertions(+), 3 deletions(-)
---- a/drivers/scsi/qla2xxx/qla_isr.c
-+++ b/drivers/scsi/qla2xxx/qla_isr.c
-@@ -3125,7 +3125,11 @@ qla24xx_enable_msix(struct qla_hw_data *
- * kref_put().
- */
- kref_get(&qentry->irq_notify.kref);
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ swork_queue(&qentry->irq_notify.swork);
-+#else
- schedule_work(&qentry->irq_notify.work);
-+#endif
- }
-
- /*
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -14,6 +14,7 @@
@@ -60,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -235,7 +235,12 @@ int irq_set_affinity_locked(struct irq_d
+@@ -237,7 +237,12 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@@ -73,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -273,10 +278,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -275,10 +280,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -85,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -298,6 +301,35 @@ static void irq_affinity_notify(struct w
+@@ -300,6 +303,35 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
@@ -121,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -326,7 +358,12 @@ irq_set_affinity_notifier(unsigned int i
+@@ -328,7 +360,12 @@ irq_set_affinity_notifier(unsigned int i
if (notify) {
notify->irq = irq;
kref_init(&notify->kref);
diff --git a/patches/genirq-force-threading.patch b/patches/genirq-force-threading.patch
index 022c3f69daaf..0ee64c401f32 100644
--- a/patches/genirq-force-threading.patch
+++ b/patches/genirq-force-threading.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -406,9 +406,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -418,9 +418,13 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifndef __ARCH_SET_SOFTIRQ_PENDING
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -22,6 +22,7 @@
+@@ -24,6 +24,7 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__read_mostly bool force_irqthreads;
static int __init setup_forced_irqthreads(char *arg)
-@@ -30,6 +31,7 @@ static int __init setup_forced_irqthread
+@@ -32,6 +33,7 @@ static int __init setup_forced_irqthread
return 0;
}
early_param("threadirqs", setup_forced_irqthreads);
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 91cace952f61..59de23114e20 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2111,7 +2111,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2113,7 +2113,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/gpu_don_t_check_for_the_lock_owner.patch b/patches/gpu_don_t_check_for_the_lock_owner.patch
deleted file mode 100644
index 123656cbd367..000000000000
--- a/patches/gpu_don_t_check_for_the_lock_owner.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: gpu: don't check for the lock owner.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/i915_gem_shrinker.c | 2 +-
- drivers/gpu/drm/msm/msm_gem_shrinker.c | 2 +-
- 2 files changed, 2 insertions(+), 2 deletions(-)
-
---- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
-+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
-@@ -40,7 +40,7 @@ static bool mutex_is_locked_by(struct mu
- if (!mutex_is_locked(mutex))
- return false;
-
--#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)
-+#if (defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
---- a/drivers/gpu/drm/msm/msm_gem_shrinker.c
-+++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c
-@@ -23,7 +23,7 @@ static bool mutex_is_locked_by(struct mu
- if (!mutex_is_locked(mutex))
- return false;
-
--#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
-+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE)
- return mutex->owner == task;
- #else
- /* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
index fca9ea6b34c8..a81e4bceaf09 100644
--- a/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
+++ b/patches/hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -418,7 +418,7 @@ static int sync_unplug_thread(void *data
+@@ -413,7 +413,7 @@ static int sync_unplug_thread(void *data
* we don't want any more work on this CPU.
*/
current->flags &= ~PF_NO_SETAFFINITY;
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 4ac72d0a4a88..778db0d943df 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -12,43 +12,35 @@ tasks on the cpu which should be brought down.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/cpu.h | 7 +--
+ include/linux/cpu.h | 5 ++
kernel/cpu.c | 118 ++++++++++++++++++++++++++++++++++++++++++++++++++++
- 2 files changed, 122 insertions(+), 3 deletions(-)
+ kernel/sched/core.c | 4 +
+ 3 files changed, 127 insertions(+)
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -173,9 +173,6 @@ static inline void cpu_notifier_register
- #endif /* CONFIG_SMP */
- extern struct bus_type cpu_subsys;
-
--static inline void pin_current_cpu(void) { }
--static inline void unpin_current_cpu(void) { }
--
- #ifdef CONFIG_HOTPLUG_CPU
- /* Stop CPUs going up and down. */
-
-@@ -185,6 +182,8 @@ extern void get_online_cpus(void);
- extern void put_online_cpus(void);
- extern void cpu_hotplug_disable(void);
+@@ -109,6 +109,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
+ void clear_tasks_mm_cpumask(int cpu);
+ int cpu_down(unsigned int cpu);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
- #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
- #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
- #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
-@@ -202,6 +201,8 @@ static inline void cpu_hotplug_done(void
+
+ #else /* CONFIG_HOTPLUG_CPU */
+
+@@ -118,6 +120,9 @@ static inline void cpu_hotplug_done(void
#define put_online_cpus() do { } while (0)
#define cpu_hotplug_disable() do { } while (0)
#define cpu_hotplug_enable() do { } while (0)
-+static inline void pin_current_cpu(void) { }
-+static inline void unpin_current_cpu(void) { }
- #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
- /* These aren't inline functions due to a GCC bug. */
++static inline void pin_current_cpu(void) { }
++static inline void unpin_current_cpu(void) { }
++
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+ #ifdef CONFIG_PM_SLEEP_SMP
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -239,6 +239,100 @@ static struct {
+@@ -234,6 +234,100 @@ static struct {
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -149,16 +141,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void get_online_cpus(void)
{
-@@ -877,6 +971,8 @@ static int __ref _cpu_down(unsigned int
+@@ -766,6 +860,8 @@ static int __ref _cpu_down(unsigned int
+ {
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
- bool hasdied = false;
+ int mycpu;
+ cpumask_var_t cpumask;
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -884,7 +980,27 @@ static int __ref _cpu_down(unsigned int
+@@ -773,7 +869,27 @@ static int __ref _cpu_down(unsigned int
if (!cpu_present(cpu))
return -EINVAL;
@@ -186,12 +178,39 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpuhp_tasks_frozen = tasks_frozen;
-@@ -923,6 +1039,8 @@ static int __ref _cpu_down(unsigned int
+@@ -811,6 +927,8 @@ static int __ref _cpu_down(unsigned int
+ }
- hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
+ cpu_unplug_done(cpu);
+out_cancel:
cpu_hotplug_done();
- /* This post dead nonsense must die */
- if (!ret && hasdied)
+ return ret;
+ }
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -7471,6 +7471,7 @@ void migrate_disable(void)
+ /* get_online_cpus(); */
+
+ preempt_disable();
++ pin_current_cpu();
+ p->migrate_disable = 1;
+
+ p->cpus_ptr = cpumask_of(smp_processor_id());
+@@ -7535,13 +7536,16 @@ void migrate_enable(void)
+ arg.task = p;
+ arg.dest_cpu = dest_cpu;
+
++ unpin_current_cpu();
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+ /* put_online_cpus(); */
++
+ return;
+ }
+ }
++ unpin_current_cpu();
+ /* put_online_cpus(); */
+ preempt_enable();
+ }
diff --git a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
index 3035be2850bb..1a649d432304 100644
--- a/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
+++ b/patches/hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -318,7 +318,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -313,7 +313,7 @@ static int cpu_unplug_begin(unsigned int
struct task_struct *tsk;
init_completion(&hp->synced);
diff --git a/patches/hotplug-use-migrate-disable.patch b/patches/hotplug-use-migrate-disable.patch
index 790234315e76..501094419116 100644
--- a/patches/hotplug-use-migrate-disable.patch
+++ b/patches/hotplug-use-migrate-disable.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -988,14 +988,13 @@ static int __ref _cpu_down(unsigned int
+@@ -877,14 +877,13 @@ static int __ref _cpu_down(unsigned int
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
@@ -29,11 +29,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
cpu_hotplug_begin();
ret = cpu_unplug_begin(cpu);
-@@ -1044,6 +1043,7 @@ static int __ref _cpu_down(unsigned int
+@@ -932,6 +931,7 @@ static int __ref _cpu_down(unsigned int
cpu_unplug_done(cpu);
out_cancel:
cpu_hotplug_done();
+ migrate_enable();
- /* This post dead nonsense must die */
- if (!ret && hasdied)
- cpu_notify_nofail(CPU_POST_DEAD, cpu);
+ return ret;
+ }
+
diff --git a/patches/hrtimer-enfore-64byte-alignment.patch b/patches/hrtimer-enfore-64byte-alignment.patch
index c76193ac2c83..e6ad9f9136c1 100644
--- a/patches/hrtimer-enfore-64byte-alignment.patch
+++ b/patches/hrtimer-enfore-64byte-alignment.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -127,11 +127,7 @@ struct hrtimer_sleeper {
+@@ -116,11 +116,7 @@ struct hrtimer_sleeper {
struct task_struct *task;
};
diff --git a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
index 56d7c4fd2058..e3b4e25e1e5a 100644
--- a/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
+++ b/patches/hrtimer-fixup-hrtimer-callback-changes-for-preempt-r.patch
@@ -15,10 +15,10 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
include/linux/hrtimer.h | 7 ++
kernel/sched/core.c | 1
kernel/sched/rt.c | 1
- kernel/time/hrtimer.c | 144 ++++++++++++++++++++++++++++++++++++++++++++---
+ kernel/time/hrtimer.c | 143 ++++++++++++++++++++++++++++++++++++++++++++---
kernel/time/tick-sched.c | 1
kernel/watchdog.c | 1
- 6 files changed, 146 insertions(+), 9 deletions(-)
+ 6 files changed, 145 insertions(+), 9 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -30,8 +30,8 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ * @irqsafe: timer can run in hardirq context
* @praecox: timer expiry time if expired at the time of programming
* @is_rel: Set if the timer was armed relative
- * @start_pid: timer statistics field to store the pid of the task which
-@@ -104,6 +106,8 @@ struct hrtimer {
+ *
+@@ -98,6 +100,8 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
@@ -40,7 +40,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
ktime_t praecox;
#endif
-@@ -136,6 +140,7 @@ struct hrtimer_sleeper {
+@@ -125,6 +129,7 @@ struct hrtimer_sleeper {
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @active: red black tree root node for the active timers
@@ -48,7 +48,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
* @get_time: function to retrieve the current time of the clock
* @offset: offset of this clock to the monotonic base
*/
-@@ -144,6 +149,7 @@ struct hrtimer_clock_base {
+@@ -133,6 +138,7 @@ struct hrtimer_clock_base {
int index;
clockid_t clockid;
struct timerqueue_head active;
@@ -56,7 +56,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
ktime_t (*get_time)(void);
ktime_t offset;
} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
-@@ -187,6 +193,7 @@ struct hrtimer_cpu_base {
+@@ -176,6 +182,7 @@ struct hrtimer_cpu_base {
raw_spinlock_t lock;
seqcount_t seq;
struct hrtimer *running;
@@ -66,7 +66,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
unsigned int clock_was_set_seq;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -345,6 +345,7 @@ static void init_rq_hrtick(struct rq *rq
+@@ -352,6 +352,7 @@ static void init_rq_hrtick(struct rq *rq
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
rq->hrtick_timer.function = hrtick;
@@ -76,7 +76,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
static inline void hrtick_clear(struct rq *rq)
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -47,6 +47,7 @@ void init_rt_bandwidth(struct rt_bandwid
+@@ -48,6 +48,7 @@ void init_rt_bandwidth(struct rt_bandwid
hrtimer_init(&rt_b->rt_period_timer,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -100,7 +100,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
static inline void retrigger_next_event(void *arg) { }
-@@ -873,7 +870,7 @@ void hrtimer_wait_for_timer(const struct
+@@ -845,7 +842,7 @@ void hrtimer_wait_for_timer(const struct
{
struct hrtimer_clock_base *base = timer->base;
@@ -109,7 +109,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
wait_event(base->cpu_base->wait,
!(hrtimer_callback_running(timer)));
}
-@@ -923,6 +920,11 @@ static void __remove_hrtimer(struct hrti
+@@ -895,6 +892,11 @@ static void __remove_hrtimer(struct hrti
if (!(state & HRTIMER_STATE_ENQUEUED))
return;
@@ -121,15 +121,15 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);
-@@ -1163,6 +1165,7 @@ static void __hrtimer_init(struct hrtime
+@@ -1144,6 +1146,7 @@ static void __hrtimer_init(struct hrtime
base = hrtimer_clockid_to_base(clock_id);
timer->base = &cpu_base->clock_base[base];
+ INIT_LIST_HEAD(&timer->cb_entry);
timerqueue_init(&timer->node);
+ }
- #ifdef CONFIG_TIMER_STATS
-@@ -1203,6 +1206,7 @@ bool hrtimer_active(const struct hrtimer
+@@ -1178,6 +1181,7 @@ bool hrtimer_active(const struct hrtimer
seq = raw_read_seqcount_begin(&cpu_base->seq);
if (timer->state != HRTIMER_STATE_INACTIVE ||
@@ -137,7 +137,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
cpu_base->running == timer)
return true;
-@@ -1301,12 +1305,112 @@ static void __run_hrtimer(struct hrtimer
+@@ -1275,12 +1279,111 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -204,7 +204,6 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+ raw_write_seqcount_barrier(&cpu_base->seq);
+
+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
-+ timer_stats_account_hrtimer(timer);
+ fn = timer->function;
+
+ raw_spin_unlock_irq(&cpu_base->lock);
@@ -250,8 +249,8 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
for (; active; base++, active >>= 1) {
struct timerqueue_node *node;
-@@ -1346,9 +1450,14 @@ static void __hrtimer_run_queues(struct
- if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
+@@ -1320,9 +1423,14 @@ static void __hrtimer_run_queues(struct
+ if (basenow < hrtimer_get_softexpires_tv64(timer))
break;
- __run_hrtimer(cpu_base, base, timer, &basenow);
@@ -266,7 +265,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
#ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1490,8 +1599,6 @@ void hrtimer_run_queues(void)
+@@ -1464,8 +1572,6 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -275,7 +274,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
/*
-@@ -1513,6 +1620,7 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1487,6 +1593,7 @@ static enum hrtimer_restart hrtimer_wake
void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
{
sl->timer.function = hrtimer_wakeup;
@@ -283,7 +282,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
sl->task = task;
}
EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
-@@ -1647,6 +1755,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1621,6 +1728,7 @@ int hrtimers_prepare_cpu(unsigned int cp
for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
cpu_base->clock_base[i].cpu_base = cpu_base;
timerqueue_init_head(&cpu_base->clock_base[i].active);
@@ -291,7 +290,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
}
cpu_base->cpu = cpu;
-@@ -1723,9 +1832,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
+@@ -1697,9 +1805,26 @@ int hrtimers_dead_cpu(unsigned int scpu)
#endif /* CONFIG_HOTPLUG_CPU */
@@ -320,7 +319,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/**
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -1198,6 +1198,7 @@ void tick_setup_sched_timer(void)
+@@ -1197,6 +1197,7 @@ void tick_setup_sched_timer(void)
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -330,7 +329,7 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
/* Get the next period (per-CPU) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -522,6 +522,7 @@ static void watchdog_enable(unsigned int
+@@ -384,6 +384,7 @@ static void watchdog_enable(unsigned int
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer->function = watchdog_timer_fn;
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 67e796f4c060..5475441f76c6 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -9,15 +9,23 @@ Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/hrtimer.h | 12 +++++++++++-
+ include/linux/hrtimer.h | 13 ++++++++++++-
kernel/time/hrtimer.c | 33 ++++++++++++++++++++++++++++++++-
kernel/time/itimer.c | 1 +
kernel/time/posix-timers.c | 33 +++++++++++++++++++++++++++++++++
- 4 files changed, 77 insertions(+), 2 deletions(-)
+ 4 files changed, 78 insertions(+), 2 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -207,6 +207,9 @@ struct hrtimer_cpu_base {
+@@ -22,6 +22,7 @@
+ #include <linux/percpu.h>
+ #include <linux/timer.h>
+ #include <linux/timerqueue.h>
++#include <linux/wait.h>
+
+ struct hrtimer_clock_base;
+ struct hrtimer_cpu_base;
+@@ -195,6 +196,9 @@ struct hrtimer_cpu_base {
unsigned int nr_hangs;
unsigned int max_hang_time;
#endif
@@ -27,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
-@@ -416,6 +419,13 @@ static inline void hrtimer_restart(struc
+@@ -404,6 +408,13 @@ static inline void hrtimer_restart(struc
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -41,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-@@ -440,7 +450,7 @@ static inline int hrtimer_is_queued(stru
+@@ -428,7 +439,7 @@ static inline int hrtimer_is_queued(stru
* Helper function to check, whether the timer is running the callback
* function
*/
@@ -52,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -856,6 +856,32 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -828,6 +828,32 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -85,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1073,7 +1099,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1042,7 +1068,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -94,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1464,6 +1490,8 @@ void hrtimer_run_queues(void)
+@@ -1438,6 +1464,8 @@ void hrtimer_run_queues(void)
now = hrtimer_update_base(cpu_base);
__hrtimer_run_queues(cpu_base, now);
raw_spin_unlock(&cpu_base->lock);
@@ -103,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1623,6 +1651,9 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1597,6 +1625,9 @@ int hrtimers_prepare_cpu(unsigned int cp
cpu_base->cpu = cpu;
hrtimer_init_hres(cpu_base);
@@ -115,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
-@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
+@@ -195,6 +195,7 @@ int do_setitimer(int which, struct itime
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
@@ -125,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
expires = timeval_to_ktime(value->it_value);
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
-@@ -828,6 +828,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
+@@ -829,6 +829,20 @@ SYSCALL_DEFINE1(timer_getoverrun, timer_
return overrun;
}
@@ -146,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Set a POSIX.1b interval timer. */
/* timr->it_lock is taken. */
static int
-@@ -905,6 +919,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -906,6 +920,7 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
if (!timr)
return -EINVAL;
@@ -154,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kc = clockid_to_kclock(timr->it_clock);
if (WARN_ON_ONCE(!kc || !kc->timer_set))
error = -EINVAL;
-@@ -913,9 +928,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
+@@ -914,9 +929,12 @@ SYSCALL_DEFINE4(timer_settime, timer_t,
unlock_timer(timr, flag);
if (error == TIMER_RETRY) {
@@ -167,7 +175,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (old_setting && !error &&
copy_to_user(old_setting, &old_spec, sizeof (old_spec)))
-@@ -953,10 +971,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
+@@ -954,10 +972,15 @@ SYSCALL_DEFINE1(timer_delete, timer_t, t
if (!timer)
return -EINVAL;
@@ -183,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock(&current->sighand->siglock);
list_del(&timer->list);
-@@ -982,8 +1005,18 @@ static void itimer_delete(struct k_itime
+@@ -983,8 +1006,18 @@ static void itimer_delete(struct k_itime
retry_delete:
spin_lock_irqsave(&timer->it_lock, flags);
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
index 898a270e27f0..c32694faab8a 100644
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
@@ -18,8 +18,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -12131,7 +12131,7 @@ void intel_check_page_flip(struct drm_i9
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+@@ -12113,7 +12113,7 @@ void intel_check_page_flip(struct drm_i9
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
struct intel_flip_work *work;
- WARN_ON(!in_interrupt());
diff --git a/patches/ide-use-nort-local-irq-variants.patch b/patches/ide-use-nort-local-irq-variants.patch
index 7bbc795cca9b..974d1c716018 100644
--- a/patches/ide-use-nort-local-irq-variants.patch
+++ b/patches/ide-use-nort-local-irq-variants.patch
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
-@@ -659,7 +659,7 @@ void ide_timer_expiry (unsigned long dat
+@@ -660,7 +660,7 @@ void ide_timer_expiry (unsigned long dat
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
/* local CPU only, as if we were handling an interrupt */
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef DEBUG
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
-@@ -250,7 +250,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+@@ -251,7 +251,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
page_is_high = PageHighMem(page);
if (page_is_high)
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
buf = kmap_atomic(page) + offset;
-@@ -271,7 +271,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
+@@ -272,7 +272,7 @@ void ide_pio_bytes(ide_drive_t *drive, s
kunmap_atomic(buf);
if (page_is_high)
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
len -= nr_bytes;
}
-@@ -414,7 +414,7 @@ static ide_startstop_t pre_task_out_intr
+@@ -415,7 +415,7 @@ static ide_startstop_t pre_task_out_intr
}
if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
diff --git a/patches/idr-use-local-lock-for-protection.patch b/patches/idr-use-local-lock-for-protection.patch
deleted file mode 100644
index 32dc36c69e92..000000000000
--- a/patches/idr-use-local-lock-for-protection.patch
+++ /dev/null
@@ -1,123 +0,0 @@
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Tue, 14 Jul 2015 14:26:34 +0200
-Subject: idr: Use local lock instead of preempt enable/disable
-
-We need to protect the per cpu variable and prevent migration.
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- include/linux/idr.h | 4 ++++
- lib/idr.c | 43 +++++++++++++++++++++++++++++++++++++------
- 2 files changed, 41 insertions(+), 6 deletions(-)
-
---- a/include/linux/idr.h
-+++ b/include/linux/idr.h
-@@ -95,10 +95,14 @@ bool idr_is_empty(struct idr *idp);
- * Each idr_preload() should be matched with an invocation of this
- * function. See idr_preload() for details.
- */
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void idr_preload_end(void);
-+#else
- static inline void idr_preload_end(void)
- {
- preempt_enable();
- }
-+#endif
-
- /**
- * idr_find - return pointer for given id
---- a/lib/idr.c
-+++ b/lib/idr.c
-@@ -30,6 +30,7 @@
- #include <linux/idr.h>
- #include <linux/spinlock.h>
- #include <linux/percpu.h>
-+#include <linux/locallock.h>
-
- #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1)
- #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT)
-@@ -45,6 +46,37 @@ static DEFINE_PER_CPU(struct idr_layer *
- static DEFINE_PER_CPU(int, idr_preload_cnt);
- static DEFINE_SPINLOCK(simple_ida_lock);
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+static DEFINE_LOCAL_IRQ_LOCK(idr_lock);
-+
-+static inline void idr_preload_lock(void)
-+{
-+ local_lock(idr_lock);
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+ local_unlock(idr_lock);
-+}
-+
-+void idr_preload_end(void)
-+{
-+ idr_preload_unlock();
-+}
-+EXPORT_SYMBOL(idr_preload_end);
-+#else
-+static inline void idr_preload_lock(void)
-+{
-+ preempt_disable();
-+}
-+
-+static inline void idr_preload_unlock(void)
-+{
-+ preempt_enable();
-+}
-+#endif
-+
-+
- /* the maximum ID which can be allocated given idr->layers */
- static int idr_max(int layers)
- {
-@@ -115,14 +147,14 @@ static struct idr_layer *idr_layer_alloc
- * context. See idr_preload() for details.
- */
- if (!in_interrupt()) {
-- preempt_disable();
-+ idr_preload_lock();
- new = __this_cpu_read(idr_preload_head);
- if (new) {
- __this_cpu_write(idr_preload_head, new->ary[0]);
- __this_cpu_dec(idr_preload_cnt);
- new->ary[0] = NULL;
- }
-- preempt_enable();
-+ idr_preload_unlock();
- if (new)
- return new;
- }
-@@ -366,7 +398,6 @@ static void idr_fill_slot(struct idr *id
- idr_mark_full(pa, id);
- }
-
--
- /**
- * idr_preload - preload for idr_alloc()
- * @gfp_mask: allocation mask to use for preloading
-@@ -401,7 +432,7 @@ void idr_preload(gfp_t gfp_mask)
- WARN_ON_ONCE(in_interrupt());
- might_sleep_if(gfpflags_allow_blocking(gfp_mask));
-
-- preempt_disable();
-+ idr_preload_lock();
-
- /*
- * idr_alloc() is likely to succeed w/o full idr_layer buffer and
-@@ -413,9 +444,9 @@ void idr_preload(gfp_t gfp_mask)
- while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) {
- struct idr_layer *new;
-
-- preempt_enable();
-+ idr_preload_unlock();
- new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
-- preempt_disable();
-+ idr_preload_lock();
- if (!new)
- break;
-
diff --git a/patches/introduce_migrate_disable_cpu_light.patch b/patches/introduce_migrate_disable_cpu_light.patch
index d57ee426654b..5eda023568c6 100644
--- a/patches/introduce_migrate_disable_cpu_light.patch
+++ b/patches/introduce_migrate_disable_cpu_light.patch
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int nr_cpus_allowed;
cpumask_t cpus_allowed;
-@@ -1997,14 +2003,6 @@ static inline struct vm_struct *task_sta
+@@ -1991,14 +1997,6 @@ static inline struct vm_struct *task_sta
}
#endif
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TNF_MIGRATED 0x01
#define TNF_NO_GROUP 0x02
#define TNF_SHARED 0x04
-@@ -3522,6 +3520,31 @@ static inline void set_task_cpu(struct t
+@@ -3516,6 +3514,31 @@ static inline void set_task_cpu(struct t
#endif /* CONFIG_SMP */
diff --git a/patches/iommu-amd--Use-WARN_ON_NORT.patch b/patches/iommu-amd--Use-WARN_ON_NORT.patch
index 6cc705c25c66..177838406a15 100644
--- a/patches/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/patches/iommu-amd--Use-WARN_ON_NORT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -1923,10 +1923,10 @@ static int __attach_device(struct iommu_
+@@ -1929,10 +1929,10 @@ static int __attach_device(struct iommu_
int ret;
/*
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* lock domain */
spin_lock(&domain->lock);
-@@ -2094,10 +2094,10 @@ static void __detach_device(struct iommu
+@@ -2100,10 +2100,10 @@ static void __detach_device(struct iommu
struct protection_domain *domain;
/*
diff --git a/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch b/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
index 8b459e50a97a..47f9642a0ade 100644
--- a/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
+++ b/patches/iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool iova_rcache_insert(struct iova_domain *iovad,
unsigned long pfn,
-@@ -420,10 +421,8 @@ alloc_iova_fast(struct iova_domain *iova
+@@ -419,10 +420,8 @@ alloc_iova_fast(struct iova_domain *iova
/* Try replenishing IOVAs by flushing rcache. */
flushed_rcache = true;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto retry;
}
-@@ -751,7 +750,7 @@ static bool __iova_rcache_insert(struct
+@@ -750,7 +749,7 @@ static bool __iova_rcache_insert(struct
bool can_insert = false;
unsigned long flags;
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_full(cpu_rcache->loaded)) {
-@@ -781,7 +780,6 @@ static bool __iova_rcache_insert(struct
+@@ -780,7 +779,6 @@ static bool __iova_rcache_insert(struct
iova_magazine_push(cpu_rcache->loaded, iova_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (mag_to_free) {
iova_magazine_free_pfns(mag_to_free, iovad);
-@@ -815,7 +813,7 @@ static unsigned long __iova_rcache_get(s
+@@ -814,7 +812,7 @@ static unsigned long __iova_rcache_get(s
bool has_pfn = false;
unsigned long flags;
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock_irqsave(&cpu_rcache->lock, flags);
if (!iova_magazine_empty(cpu_rcache->loaded)) {
-@@ -837,7 +835,6 @@ static unsigned long __iova_rcache_get(s
+@@ -836,7 +834,6 @@ static unsigned long __iova_rcache_get(s
iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn);
spin_unlock_irqrestore(&cpu_rcache->lock, flags);
diff --git a/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
index 84f2aac616bd..8276ad7a54f0 100644
--- a/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
+++ b/patches/iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
-@@ -479,7 +479,7 @@ struct deferred_flush_data {
+@@ -480,7 +480,7 @@ struct deferred_flush_data {
struct deferred_flush_table *tables;
};
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* bitmap for indexing intel_iommus */
static int g_num_of_iommus;
-@@ -3719,10 +3719,8 @@ static void add_unmap(struct dmar_domain
+@@ -3720,10 +3720,8 @@ static void add_unmap(struct dmar_domain
struct intel_iommu *iommu;
struct deferred_flush_entry *entry;
struct deferred_flush_data *flush_data;
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Flush all CPUs' entries to avoid deferring too much. If
* this becomes a bottleneck, can just flush us, and rely on
-@@ -3755,8 +3753,6 @@ static void add_unmap(struct dmar_domain
+@@ -3756,8 +3754,6 @@ static void add_unmap(struct dmar_domain
}
flush_data->size++;
spin_unlock_irqrestore(&flush_data->lock, flags);
diff --git a/patches/ipc-sem-rework-semaphore-wakeups.patch b/patches/ipc-sem-rework-semaphore-wakeups.patch
deleted file mode 100644
index 91daf21867af..000000000000
--- a/patches/ipc-sem-rework-semaphore-wakeups.patch
+++ /dev/null
@@ -1,69 +0,0 @@
-Subject: ipc/sem: Rework semaphore wakeups
-From: Peter Zijlstra <peterz@infradead.org>
-Date: Wed, 14 Sep 2011 11:57:04 +0200
-
-Current sysv sems have a weird ass wakeup scheme that involves keeping
-preemption disabled over a potential O(n^2) loop and busy waiting on
-that on other CPUs.
-
-Kill this and simply wake the task directly from under the sem_lock.
-
-This was discovered by a migrate_disable() debug feature that
-disallows:
-
- spin_lock();
- preempt_disable();
- spin_unlock()
- preempt_enable();
-
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Suggested-by: Thomas Gleixner <tglx@linutronix.de>
-Reported-by: Mike Galbraith <efault@gmx.de>
-Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
-Cc: Manfred Spraul <manfred@colorfullife.com>
-Link: http://lkml.kernel.org/r/1315994224.5040.1.camel@twins
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
----
- ipc/sem.c | 10 ++++++++++
- 1 file changed, 10 insertions(+)
-
---- a/ipc/sem.c
-+++ b/ipc/sem.c
-@@ -712,6 +712,13 @@ static int perform_atomic_semop(struct s
- static void wake_up_sem_queue_prepare(struct list_head *pt,
- struct sem_queue *q, int error)
- {
-+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *p = q->sleeper;
-+ get_task_struct(p);
-+ q->status = error;
-+ wake_up_process(p);
-+ put_task_struct(p);
-+#else
- if (list_empty(pt)) {
- /*
- * Hold preempt off so that we don't get preempted and have the
-@@ -723,6 +730,7 @@ static void wake_up_sem_queue_prepare(st
- q->pid = error;
-
- list_add_tail(&q->list, pt);
-+#endif
- }
-
- /**
-@@ -736,6 +744,7 @@ static void wake_up_sem_queue_prepare(st
- */
- static void wake_up_sem_queue_do(struct list_head *pt)
- {
-+#ifndef CONFIG_PREEMPT_RT_BASE
- struct sem_queue *q, *t;
- int did_something;
-
-@@ -748,6 +757,7 @@ static void wake_up_sem_queue_do(struct
- }
- if (did_something)
- preempt_enable();
-+#endif
- }
-
- static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index d7e678836393..5ba4ad58d1d1 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -881,7 +881,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -883,7 +883,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1338,6 +1346,9 @@ static int
+@@ -1340,6 +1348,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
diff --git a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
index 33b7c138ec91..3301b02274f1 100644
--- a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -55,7 +55,7 @@ Cc: stable-rt@vger.kernel.org
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
+@@ -1604,7 +1604,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -64,7 +64,7 @@ Cc: stable-rt@vger.kernel.org
if (in_irq())
irq_work_tick();
#endif
-@@ -1684,9 +1684,7 @@ static __latent_entropy void run_timer_s
+@@ -1645,9 +1645,7 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index 7af377095b54..cdd39f20c119 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -103,6 +103,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -153,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* We start is dequeued state, because no RT tasks are queued */
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -220,6 +220,7 @@ static void nohz_full_kick_func(struct i
+@@ -224,6 +224,7 @@ static void nohz_full_kick_func(struct i
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_func,
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1644,7 +1644,7 @@ void update_process_times(int user_tick)
+@@ -1604,7 +1604,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (in_irq())
irq_work_tick();
#endif
-@@ -1684,6 +1684,10 @@ static __latent_entropy void run_timer_s
+@@ -1645,6 +1645,10 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index aa6a73639ac4..1f4ee1ad694b 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -36,7 +36,7 @@ config ARM
+@@ -42,7 +42,7 @@ config ARM
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
select HAVE_ARCH_HARDENED_USERCOPY
diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch
index 26325d6f706a..5c9ecdc7c8ac 100644
--- a/patches/kconfig-disable-a-few-options-rt.patch
+++ b/patches/kconfig-disable-a-few-options-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -9,6 +9,7 @@ config OPROFILE
+@@ -12,6 +12,7 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
diff --git a/patches/kernel-SRCU-provide-a-static-initializer.patch b/patches/kernel-SRCU-provide-a-static-initializer.patch
index 92b19ed986f3..e67e5b256565 100644
--- a/patches/kernel-SRCU-provide-a-static-initializer.patch
+++ b/patches/kernel-SRCU-provide-a-static-initializer.patch
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
RAW_NOTIFIER_INIT(name)
+#define _SRCU_NOTIFIER_HEAD(name, mod) \
-+ static DEFINE_PER_CPU(struct srcu_struct_array, \
++ static DEFINE_PER_CPU(struct srcu_array, \
+ name##_head_srcu_array); \
+ mod struct srcu_notifier_head name = \
+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -119,7 +119,7 @@ void process_srcu(struct work_struct *wo
*/
#define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
+ static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array)
#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
diff --git a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
index 06355cda6d67..a415094d8443 100644
--- a/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
+++ b/patches/kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -259,6 +259,7 @@ struct hotplug_pcp {
+@@ -254,6 +254,7 @@ struct hotplug_pcp {
int refcount;
int grab_lock;
struct completion synced;
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RT_FULL
/*
* Note, on PREEMPT_RT, the hotplug lock must save the state of
-@@ -362,6 +363,7 @@ static int sync_unplug_thread(void *data
+@@ -357,6 +358,7 @@ static int sync_unplug_thread(void *data
{
struct hotplug_pcp *hp = data;
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_disable();
hp->unplug = current;
wait_for_pinned_cpus(hp);
-@@ -427,6 +429,14 @@ static void __cpu_unplug_sync(struct hot
+@@ -422,6 +424,14 @@ static void __cpu_unplug_sync(struct hot
wait_for_completion(&hp->synced);
}
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Start the sync_unplug_thread on the target cpu and wait for it to
* complete.
-@@ -450,6 +460,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -445,6 +455,7 @@ static int cpu_unplug_begin(unsigned int
tell_sched_cpu_down_begin(cpu);
init_completion(&hp->synced);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
if (IS_ERR(hp->sync_tsk)) {
-@@ -465,8 +476,7 @@ static int cpu_unplug_begin(unsigned int
+@@ -460,8 +471,7 @@ static int cpu_unplug_begin(unsigned int
* wait for tasks that are going to enter these sections and
* we must not have them block.
*/
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -1062,6 +1072,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -961,6 +971,7 @@ static int takedown_cpu(unsigned int cpu
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
diff --git a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
index 0d01ea900edb..0e5f75631782 100644
--- a/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
+++ b/patches/kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
@@ -15,15 +15,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1166,6 +1166,7 @@ static int __ref _cpu_down(unsigned int
- bool hasdied = false;
+@@ -1055,6 +1055,7 @@ static int __ref _cpu_down(unsigned int
+ int prev_state, ret = 0;
int mycpu;
cpumask_var_t cpumask;
+ cpumask_var_t cpumask_org;
if (num_online_cpus() == 1)
return -EBUSY;
-@@ -1176,6 +1177,12 @@ static int __ref _cpu_down(unsigned int
+@@ -1065,6 +1066,12 @@ static int __ref _cpu_down(unsigned int
/* Move the downtaker off the unplug cpu */
if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
return -ENOMEM;
@@ -32,11 +32,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return -ENOMEM;
+ }
+
-+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
++ cpumask_copy(cpumask_org, &current->cpus_mask);
cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
set_cpus_allowed_ptr(current, cpumask);
free_cpumask_var(cpumask);
-@@ -1184,7 +1191,8 @@ static int __ref _cpu_down(unsigned int
+@@ -1073,7 +1080,8 @@ static int __ref _cpu_down(unsigned int
if (mycpu == cpu) {
printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
migrate_enable();
@@ -46,10 +46,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_hotplug_begin();
-@@ -1238,6 +1246,9 @@ static int __ref _cpu_down(unsigned int
- /* This post dead nonsense must die */
- if (!ret && hasdied)
- cpu_notify_nofail(CPU_POST_DEAD, cpu);
+@@ -1123,6 +1131,9 @@ static int __ref _cpu_down(unsigned int
+ out_cancel:
+ cpu_hotplug_done();
+ migrate_enable();
+restore_cpus:
+ set_cpus_allowed_ptr(current, cpumask_org);
+ free_cpumask_var(cpumask_org);
diff --git a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch b/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
deleted file mode 100644
index d5756a116d83..000000000000
--- a/patches/kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
+++ /dev/null
@@ -1,33 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 9 Feb 2016 18:18:01 +0100
-Subject: kernel: migrate_disable() do fastpath in atomic &
- irqs-off
-
-With interrupts off it makes no sense to do the long path since we can't
-leave the CPU anyway. Also we might end up in a recursion with lockdep.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/sched/core.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
---- a/kernel/sched/core.c
-+++ b/kernel/sched/core.c
-@@ -3299,7 +3299,7 @@ void migrate_disable(void)
- {
- struct task_struct *p = current;
-
-- if (in_atomic()) {
-+ if (in_atomic() || irqs_disabled()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic++;
- #endif
-@@ -3326,7 +3326,7 @@ void migrate_enable(void)
- {
- struct task_struct *p = current;
-
-- if (in_atomic()) {
-+ if (in_atomic() || irqs_disabled()) {
- #ifdef CONFIG_SCHED_DEBUG
- p->migrate_disable_atomic--;
- #endif
diff --git a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
index 03f935ccc658..44e7ac4418db 100644
--- a/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
+++ b/patches/kernel-perf-mark-perf_cpu_context-s-timer-as-irqsafe.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -1050,6 +1050,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1043,6 +1043,7 @@ static void __perf_mux_hrtimer_init(stru
raw_spin_lock_init(&cpuctx->hrtimer_lock);
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
timer->function = perf_mux_hrtimer_handler;
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index b89ee3470823..63a372184a3e 100644
--- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1628,6 +1628,11 @@ static void call_console_drivers(int lev
+@@ -1630,6 +1630,11 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
-@@ -2556,6 +2561,11 @@ void console_unblank(void)
+@@ -2357,6 +2362,11 @@ void console_unblank(void)
{
struct console *c;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
new file mode 100644
index 000000000000..2549d7294409
--- /dev/null
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -0,0 +1,760 @@
+From 866f2c8a7f0eec01a72cceeb73bab62eb3624694 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 4 Apr 2017 12:50:16 +0200
+Subject: [PATCH] kernel: sched: Provide a pointer to the valid CPU mask
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+In commit 4b53a3412d66 ("sched/core: Remove the tsk_nr_cpus_allowed()
+wrapper") the tsk_nr_cpus_allowed() wrapper was removed. There was not
+much difference in !RT but in RT we used this to implement
+migrate_disable(). Within a migrate_disable() section the CPU mask is
+restricted to single CPU while the "normal" CPU mask remains untouched.
+
+As an alternative implementation Ingo suggested to use
+ struct task_struct {
+ const cpumask_t *cpus_ptr;
+ cpumask_t cpus_mask;
+ };
+with
+ t->cpus_allowed_ptr = &t->cpus_allowed;
+
+In -RT we then can switch the cpus_ptr to
+ t->cpus_allowed_ptr = &cpumask_of(task_cpu(p));
+
+in a migration disabled region. The rules are simple:
+- Code that 'uses' ->cpus_allowed would use the pointer.
+- Code that 'modifies' ->cpus_allowed would use the direct mask.
+
+While converting the existing users I tried to stick with the rules
+above however… well mostly CPUFREQ tries to temporary switch the CPU
+mask to do something on a certain CPU and then switches the mask back it
+its original value. So in theory `cpus_ptr' could or should be used.
+However if this is invoked in a migration disabled region (which is not
+the case because it would require something like preempt_disable() and
+set_cpus_allowed_ptr() might sleep so it can't be) then the "restore"
+part would restore the wrong mask. So it only looks strange and I go for
+the pointer…
+
+Some drivers copy the cpumask without cpumask_copy() and others use
+cpumask_copy but without alloc_cpumask_var(). I did not fix those as
+part of this, could do this as a follow up…
+
+So is this the way we want it?
+Is the usage of `cpus_ptr' vs `cpus_mask' for the set + restore part
+(see cpufreq users) what we want? At some point it looks like they
+should use a different interface for their doing. I am not sure why
+switching to certain CPU is important but maybe it could be done via a
+workqueue from the CPUFREQ core (so we have a comment desribing why are
+doing this and a get_online_cpus() to ensure that the CPU does not go
+offline too early).
+
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Rafael J. Wysocki <rjw@rjwysocki.net>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/ia64/kernel/mca.c | 2 -
+ arch/mips/include/asm/switch_to.h | 4 +-
+ arch/mips/kernel/mips-mt-fpaff.c | 2 -
+ arch/mips/kernel/traps.c | 6 ++--
+ arch/powerpc/platforms/cell/spufs/sched.c | 2 -
+ arch/tile/include/asm/setup.h | 2 -
+ arch/tile/kernel/hardwall.c | 10 +++---
+ drivers/infiniband/hw/hfi1/affinity.c | 6 ++--
+ drivers/infiniband/hw/hfi1/sdma.c | 3 --
+ drivers/infiniband/hw/qib/qib_file_ops.c | 7 ++--
+ fs/proc/array.c | 4 +-
+ include/linux/init_task.h | 3 +-
+ include/linux/sched.h | 5 ++-
+ kernel/cgroup/cpuset.c | 2 -
+ kernel/fork.c | 2 +
+ kernel/sched/core.c | 42 ++++++++++++++---------------
+ kernel/sched/cpudeadline.c | 4 +-
+ kernel/sched/cpupri.c | 4 +-
+ kernel/sched/deadline.c | 6 ++--
+ kernel/sched/fair.c | 28 +++++++++----------
+ kernel/sched/rt.c | 4 +-
+ kernel/trace/trace_hwlat.c | 2 -
+ lib/smp_processor_id.c | 2 -
+ samples/trace_events/trace-events-sample.c | 2 -
+ 24 files changed, 78 insertions(+), 76 deletions(-)
+
+--- a/arch/ia64/kernel/mca.c
++++ b/arch/ia64/kernel/mca.c
+@@ -1824,7 +1824,7 @@ format_mca_init_stack(void *mca_data, un
+ ti->cpu = cpu;
+ p->stack = ti;
+ p->state = TASK_UNINTERRUPTIBLE;
+- cpumask_set_cpu(cpu, &p->cpus_allowed);
++ cpumask_set_cpu(cpu, &p->cpus_mask);
+ INIT_LIST_HEAD(&p->tasks);
+ p->parent = p->real_parent = p->group_leader = p;
+ INIT_LIST_HEAD(&p->children);
+--- a/arch/mips/include/asm/switch_to.h
++++ b/arch/mips/include/asm/switch_to.h
+@@ -42,7 +42,7 @@ extern struct task_struct *ll_task;
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+- * isn't set), we undo the restriction on cpus_allowed.
++ * isn't set), we undo the restriction on cpus_mask.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+@@ -57,7 +57,7 @@ do { \
+ test_ti_thread_flag(__prev_ti, TIF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ clear_ti_thread_flag(__prev_ti, TIF_FPUBOUND); \
+- prev->cpus_allowed = prev->thread.user_cpus_allowed; \
++ prev->cpus_mask = prev->thread.user_cpus_allowed; \
+ } \
+ next->thread.emulated_fp = 0; \
+ } while(0)
+--- a/arch/mips/kernel/mips-mt-fpaff.c
++++ b/arch/mips/kernel/mips-mt-fpaff.c
+@@ -176,7 +176,7 @@ asmlinkage long mipsmt_sys_sched_getaffi
+ if (retval)
+ goto out_unlock;
+
+- cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
++ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
+
+ out_unlock:
+--- a/arch/mips/kernel/traps.c
++++ b/arch/mips/kernel/traps.c
+@@ -1191,12 +1191,12 @@ static void mt_ase_fp_affinity(void)
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+- if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
++ if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+- = current->cpus_allowed;
+- cpumask_and(&tmask, &current->cpus_allowed,
++ = current->cpus_mask;
++ cpumask_and(&tmask, &current->cpus_mask,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+--- a/arch/powerpc/platforms/cell/spufs/sched.c
++++ b/arch/powerpc/platforms/cell/spufs/sched.c
+@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_
+ * runqueue. The context will be rescheduled on the proper node
+ * if it is timesliced or preempted.
+ */
+- cpumask_copy(&ctx->cpus_allowed, &current->cpus_allowed);
++ cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
+
+ /* Save the current cpu id for spu interrupt routing. */
+ ctx->last_ran = raw_smp_processor_id();
+--- a/arch/tile/include/asm/setup.h
++++ b/arch/tile/include/asm/setup.h
+@@ -49,7 +49,7 @@ int hardwall_ipi_valid(int cpu);
+
+ /* Hook hardwall code into changes in affinity. */
+ #define arch_set_cpus_allowed(p, new_mask) do { \
+- if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
++ if (!cpumask_equal(p->cpus_ptr, new_mask)) \
+ hardwall_deactivate_all(p); \
+ } while (0)
+ #endif
+--- a/arch/tile/kernel/hardwall.c
++++ b/arch/tile/kernel/hardwall.c
+@@ -590,12 +590,12 @@ static int hardwall_activate(struct hard
+ * Get our affinity; if we're not bound to this tile uniquely,
+ * we can't access the network registers.
+ */
+- if (cpumask_weight(&p->cpus_allowed) != 1)
++ if (p->nr_cpus_allowed != 1)
+ return -EPERM;
+
+ /* Make sure we are bound to a cpu assigned to this resource. */
+ cpu = smp_processor_id();
+- BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
++ BUG_ON(cpumask_first(p->cpus_ptr) != cpu);
+ if (!cpumask_test_cpu(cpu, &info->cpumask))
+ return -EINVAL;
+
+@@ -621,17 +621,17 @@ static int hardwall_activate(struct hard
+ * Deactivate a task's hardwall. Must hold lock for hardwall_type.
+ * This method may be called from exit_thread(), so we don't want to
+ * rely on too many fields of struct task_struct still being valid.
+- * We assume the cpus_allowed, pid, and comm fields are still valid.
++ * We assume the nr_cpus_allowed, pid, and comm fields are still valid.
+ */
+ static void _hardwall_deactivate(struct hardwall_type *hwt,
+ struct task_struct *task)
+ {
+ struct thread_struct *ts = &task->thread;
+
+- if (cpumask_weight(&task->cpus_allowed) != 1) {
++ if (task->nr_cpus_allowed != 1) {
+ pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
+ task->pid, task->comm, hwt->name,
+- cpumask_weight(&task->cpus_allowed));
++ task->nr_cpus_allowed);
+ BUG();
+ }
+
+--- a/drivers/infiniband/hw/hfi1/affinity.c
++++ b/drivers/infiniband/hw/hfi1/affinity.c
+@@ -576,7 +576,7 @@ int hfi1_get_proc_affinity(int node)
+ struct hfi1_affinity_node *entry;
+ cpumask_var_t diff, hw_thread_mask, available_mask, intrs_mask;
+ const struct cpumask *node_mask,
+- *proc_mask = &current->cpus_allowed;
++ *proc_mask = current->cpus_ptr;
+ struct hfi1_affinity_node_list *affinity = &node_affinity;
+ struct cpu_mask_set *set = &affinity->proc;
+
+@@ -584,7 +584,7 @@ int hfi1_get_proc_affinity(int node)
+ * check whether process/context affinity has already
+ * been set
+ */
+- if (cpumask_weight(proc_mask) == 1) {
++ if (current->nr_cpus_allowed == 1) {
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
+@@ -595,7 +595,7 @@ int hfi1_get_proc_affinity(int node)
+ cpu = cpumask_first(proc_mask);
+ cpumask_set_cpu(cpu, &set->used);
+ goto done;
+- } else if (cpumask_weight(proc_mask) < cpumask_weight(&set->mask)) {
++ } else if (current->nr_cpus_allowed < cpumask_weight(&set->mask)) {
+ hfi1_cdbg(PROC, "PID %u %s affinity set to CPU set(s) %*pbl",
+ current->pid, current->comm,
+ cpumask_pr_args(proc_mask));
+--- a/drivers/infiniband/hw/hfi1/sdma.c
++++ b/drivers/infiniband/hw/hfi1/sdma.c
+@@ -856,14 +856,13 @@ struct sdma_engine *sdma_select_user_eng
+ {
+ struct sdma_rht_node *rht_node;
+ struct sdma_engine *sde = NULL;
+- const struct cpumask *current_mask = &current->cpus_allowed;
+ unsigned long cpu_id;
+
+ /*
+ * To ensure that always the same sdma engine(s) will be
+ * selected make sure the process is pinned to this CPU only.
+ */
+- if (cpumask_weight(current_mask) != 1)
++ if (current->nr_cpus_allowed != 1)
+ goto out;
+
+ cpu_id = smp_processor_id();
+--- a/drivers/infiniband/hw/qib/qib_file_ops.c
++++ b/drivers/infiniband/hw/qib/qib_file_ops.c
+@@ -1163,7 +1163,7 @@ static unsigned int qib_poll(struct file
+ static void assign_ctxt_affinity(struct file *fp, struct qib_devdata *dd)
+ {
+ struct qib_filedata *fd = fp->private_data;
+- const unsigned int weight = cpumask_weight(&current->cpus_allowed);
++ const unsigned int weight = current->nr_cpus_allowed;
+ const struct cpumask *local_mask = cpumask_of_pcibus(dd->pcidev->bus);
+ int local_cpu;
+
+@@ -1644,9 +1644,8 @@ static int qib_assign_ctxt(struct file *
+ ret = find_free_ctxt(i_minor - 1, fp, uinfo);
+ else {
+ int unit;
+- const unsigned int cpu = cpumask_first(&current->cpus_allowed);
+- const unsigned int weight =
+- cpumask_weight(&current->cpus_allowed);
++ const unsigned int cpu = cpumask_first(current->cpus_ptr);
++ const unsigned int weight = current->nr_cpus_allowed;
+
+ if (weight == 1 && !test_bit(cpu, qib_cpulist))
+ if (!find_hca(cpu, &unit) && unit >= 0)
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -364,9 +364,9 @@ static inline void task_context_switch_c
+ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
+ {
+ seq_printf(m, "Cpus_allowed:\t%*pb\n",
+- cpumask_pr_args(&task->cpus_allowed));
++ cpumask_pr_args(task->cpus_ptr));
+ seq_printf(m, "Cpus_allowed_list:\t%*pbl\n",
+- cpumask_pr_args(&task->cpus_allowed));
++ cpumask_pr_args(task->cpus_ptr));
+ }
+
+ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
+--- a/include/linux/init_task.h
++++ b/include/linux/init_task.h
+@@ -226,7 +226,8 @@ extern struct cred init_cred;
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = MAX_PRIO-20, \
+ .policy = SCHED_NORMAL, \
+- .cpus_allowed = CPU_MASK_ALL, \
++ .cpus_ptr = &tsk.cpus_mask, \
++ .cpus_mask = CPU_MASK_ALL, \
+ .nr_cpus_allowed= NR_CPUS, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+--- a/include/linux/sched.h
++++ b/include/linux/sched.h
+@@ -535,7 +535,8 @@ struct task_struct {
+
+ unsigned int policy;
+ int nr_cpus_allowed;
+- cpumask_t cpus_allowed;
++ const cpumask_t *cpus_ptr;
++ cpumask_t cpus_mask;
+
+ #ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+@@ -1224,7 +1225,7 @@ extern struct pid *cad_pid;
+ #define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+ #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
+ #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+-#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */
++#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
+ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
+ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
+--- a/kernel/cgroup/cpuset.c
++++ b/kernel/cgroup/cpuset.c
+@@ -2092,7 +2092,7 @@ static void cpuset_fork(struct task_stru
+ if (task_css_is_root(task, cpuset_cgrp_id))
+ return;
+
+- set_cpus_allowed_ptr(task, &current->cpus_allowed);
++ set_cpus_allowed_ptr(task, current->cpus_ptr);
+ task->mems_allowed = current->mems_allowed;
+ }
+
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -539,6 +539,8 @@ static struct task_struct *dup_task_stru
+ tsk->stack_canary = get_random_long();
+ #endif
+
++ if (orig->cpus_ptr == &orig->cpus_mask)
++ tsk->cpus_ptr = &tsk->cpus_mask;
+ /*
+ * One for us, one for whoever does the "release_task()" (usually
+ * parent)
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -986,7 +986,7 @@ static struct rq *__migrate_task(struct
+ return rq;
+
+ /* Affinity changed (again). */
+- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++ if (!cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+ return rq;
+
+ rq = move_queued_task(rq, p, dest_cpu);
+@@ -1012,7 +1012,7 @@ static int migration_cpu_stop(void *data
+ local_irq_disable();
+ /*
+ * We need to explicitly wake pending tasks before running
+- * __migrate_task() such that we will not miss enforcing cpus_allowed
++ * __migrate_task() such that we will not miss enforcing cpus_ptr
+ * during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
+ */
+ sched_ttwu_pending();
+@@ -1043,7 +1043,7 @@ static int migration_cpu_stop(void *data
+ */
+ void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
+ {
+- cpumask_copy(&p->cpus_allowed, new_mask);
++ cpumask_copy(&p->cpus_mask, new_mask);
+ p->nr_cpus_allowed = cpumask_weight(new_mask);
+ }
+
+@@ -1113,7 +1113,7 @@ static int __set_cpus_allowed_ptr(struct
+ goto out;
+ }
+
+- if (cpumask_equal(&p->cpus_allowed, new_mask))
++ if (cpumask_equal(p->cpus_ptr, new_mask))
+ goto out;
+
+ if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
+@@ -1264,10 +1264,10 @@ static int migrate_swap_stop(void *data)
+ if (task_cpu(arg->src_task) != arg->src_cpu)
+ goto unlock;
+
+- if (!cpumask_test_cpu(arg->dst_cpu, &arg->src_task->cpus_allowed))
++ if (!cpumask_test_cpu(arg->dst_cpu, arg->src_task->cpus_ptr))
+ goto unlock;
+
+- if (!cpumask_test_cpu(arg->src_cpu, &arg->dst_task->cpus_allowed))
++ if (!cpumask_test_cpu(arg->src_cpu, arg->dst_task->cpus_ptr))
+ goto unlock;
+
+ __migrate_swap_task(arg->src_task, arg->dst_cpu);
+@@ -1308,10 +1308,10 @@ int migrate_swap(struct task_struct *cur
+ if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
+ goto out;
+
+- if (!cpumask_test_cpu(arg.dst_cpu, &arg.src_task->cpus_allowed))
++ if (!cpumask_test_cpu(arg.dst_cpu, arg.src_task->cpus_ptr))
+ goto out;
+
+- if (!cpumask_test_cpu(arg.src_cpu, &arg.dst_task->cpus_allowed))
++ if (!cpumask_test_cpu(arg.src_cpu, arg.dst_task->cpus_ptr))
+ goto out;
+
+ trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
+@@ -1455,7 +1455,7 @@ void kick_process(struct task_struct *p)
+ EXPORT_SYMBOL_GPL(kick_process);
+
+ /*
+- * ->cpus_allowed is protected by both rq->lock and p->pi_lock
++ * ->cpus_ptr is protected by both rq->lock and p->pi_lock
+ *
+ * A few notes on cpu_active vs cpu_online:
+ *
+@@ -1495,14 +1495,14 @@ static int select_fallback_rq(int cpu, s
+ for_each_cpu(dest_cpu, nodemask) {
+ if (!cpu_active(dest_cpu))
+ continue;
+- if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
++ if (cpumask_test_cpu(dest_cpu, p->cpus_ptr))
+ return dest_cpu;
+ }
+ }
+
+ for (;;) {
+ /* Any allowed, online CPU? */
+- for_each_cpu(dest_cpu, &p->cpus_allowed) {
++ for_each_cpu(dest_cpu, p->cpus_ptr) {
+ if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
+ continue;
+ if (!cpu_online(dest_cpu))
+@@ -1547,7 +1547,7 @@ static int select_fallback_rq(int cpu, s
+ }
+
+ /*
+- * The caller (fork, wakeup) owns p->pi_lock, ->cpus_allowed is stable.
++ * The caller (fork, wakeup) owns p->pi_lock, ->cpus_ptr is stable.
+ */
+ static inline
+ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
+@@ -1557,11 +1557,11 @@ int select_task_rq(struct task_struct *p
+ if (p->nr_cpus_allowed > 1)
+ cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+ else
+- cpu = cpumask_any(&p->cpus_allowed);
++ cpu = cpumask_any(p->cpus_ptr);
+
+ /*
+ * In order not to call set_task_cpu() on a blocking task we need
+- * to rely on ttwu() to place the task on a valid ->cpus_allowed
++ * to rely on ttwu() to place the task on a valid ->cpus_ptr
+ * CPU.
+ *
+ * Since this is common to all placement strategies, this lives here.
+@@ -1569,7 +1569,7 @@ int select_task_rq(struct task_struct *p
+ * [ this allows ->select_task() to simply return task_cpu(p) and
+ * not worry about this generic constraint ]
+ */
+- if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) ||
++ if (unlikely(!cpumask_test_cpu(cpu, p->cpus_ptr) ||
+ !cpu_online(cpu)))
+ cpu = select_fallback_rq(task_cpu(p), p);
+
+@@ -2543,7 +2543,7 @@ void wake_up_new_task(struct task_struct
+ #ifdef CONFIG_SMP
+ /*
+ * Fork balancing, do it here and not earlier because:
+- * - cpus_allowed can change in the fork path
++ * - cpus_ptr can change in the fork path
+ * - any previously selected CPU might disappear through hotplug
+ *
+ * Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
+@@ -4315,7 +4315,7 @@ static int __sched_setscheduler(struct t
+ * the entire root_domain to become SCHED_DEADLINE. We
+ * will also fail if there's no bandwidth available.
+ */
+- if (!cpumask_subset(span, &p->cpus_allowed) ||
++ if (!cpumask_subset(span, p->cpus_ptr) ||
+ rq->rd->dl_bw.bw == 0) {
+ task_rq_unlock(rq, p, &rf);
+ return -EPERM;
+@@ -4909,7 +4909,7 @@ long sched_getaffinity(pid_t pid, struct
+ goto out_unlock;
+
+ raw_spin_lock_irqsave(&p->pi_lock, flags);
+- cpumask_and(mask, &p->cpus_allowed, cpu_active_mask);
++ cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+
+ out_unlock:
+@@ -5469,7 +5469,7 @@ int task_can_attach(struct task_struct *
+ * allowed nodes is unnecessary. Thus, cpusets are not
+ * applicable for such threads. This prevents checking for
+ * success of set_cpus_allowed_ptr() on all attached tasks
+- * before cpus_allowed may be changed.
++ * before cpus_mask may be changed.
+ */
+ if (p->flags & PF_NO_SETAFFINITY) {
+ ret = -EINVAL;
+@@ -5525,7 +5525,7 @@ int migrate_task_to(struct task_struct *
+ if (curr_cpu == target_cpu)
+ return 0;
+
+- if (!cpumask_test_cpu(target_cpu, &p->cpus_allowed))
++ if (!cpumask_test_cpu(target_cpu, p->cpus_ptr))
+ return -EINVAL;
+
+ /* TODO: This is not properly updating schedstats */
+@@ -5665,7 +5665,7 @@ static void migrate_tasks(struct rq *dea
+ next->sched_class->put_prev_task(rq, next);
+
+ /*
+- * Rules for changing task_struct::cpus_allowed are holding
++ * Rules for changing task_struct::cpus_mask are holding
+ * both pi_lock and rq->lock, such that holding either
+ * stabilizes the mask.
+ *
+--- a/kernel/sched/cpudeadline.c
++++ b/kernel/sched/cpudeadline.c
+@@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct
+ const struct sched_dl_entity *dl_se = &p->dl;
+
+ if (later_mask &&
+- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
++ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
+ best_cpu = cpumask_any(later_mask);
+ goto out;
+- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
++ } else if (cpumask_test_cpu(cpudl_maximum(cp), p->cpus_ptr) &&
+ dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
+ best_cpu = cpudl_maximum(cp);
+ if (later_mask)
+--- a/kernel/sched/cpupri.c
++++ b/kernel/sched/cpupri.c
+@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struc
+ if (skip)
+ continue;
+
+- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
++ if (cpumask_any_and(p->cpus_ptr, vec->mask) >= nr_cpu_ids)
+ continue;
+
+ if (lowest_mask) {
+- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
++ cpumask_and(lowest_mask, p->cpus_ptr, vec->mask);
+
+ /*
+ * We have to ensure that we have at least one bit
+--- a/kernel/sched/deadline.c
++++ b/kernel/sched/deadline.c
+@@ -252,7 +252,7 @@ static struct rq *dl_task_offline_migrat
+ * If we cannot preempt any rq, fall back to pick any
+ * online cpu.
+ */
+- cpu = cpumask_any_and(cpu_active_mask, &p->cpus_allowed);
++ cpu = cpumask_any_and(cpu_active_mask, p->cpus_ptr);
+ if (cpu >= nr_cpu_ids) {
+ /*
+ * Fail to find any suitable cpu.
+@@ -1286,7 +1286,7 @@ static void set_curr_task_dl(struct rq *
+ static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
+ {
+ if (!task_running(rq, p) &&
+- cpumask_test_cpu(cpu, &p->cpus_allowed))
++ cpumask_test_cpu(cpu, p->cpus_ptr))
+ return 1;
+ return 0;
+ }
+@@ -1435,7 +1435,7 @@ static struct rq *find_lock_later_rq(str
+ /* Retry if something changed. */
+ if (double_lock_balance(rq, later_rq)) {
+ if (unlikely(task_rq(task) != rq ||
+- !cpumask_test_cpu(later_rq->cpu, &task->cpus_allowed) ||
++ !cpumask_test_cpu(later_rq->cpu, task->cpus_ptr) ||
+ task_running(rq, task) ||
+ !dl_task(task) ||
+ !task_on_rq_queued(task))) {
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -1553,7 +1553,7 @@ static void task_numa_compare(struct tas
+ */
+ if (cur) {
+ /* Skip this swap candidate if cannot move to the source cpu */
+- if (!cpumask_test_cpu(env->src_cpu, &cur->cpus_allowed))
++ if (!cpumask_test_cpu(env->src_cpu, cur->cpus_ptr))
+ goto unlock;
+
+ /*
+@@ -1663,7 +1663,7 @@ static void task_numa_find_cpu(struct ta
+
+ for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
+ /* Skip this CPU if the source task cannot migrate */
+- if (!cpumask_test_cpu(cpu, &env->p->cpus_allowed))
++ if (!cpumask_test_cpu(cpu, env->p->cpus_ptr))
+ continue;
+
+ env->dst_cpu = cpu;
+@@ -5460,7 +5460,7 @@ find_idlest_group(struct sched_domain *s
+
+ /* Skip over this group if it has no CPUs allowed */
+ if (!cpumask_intersects(sched_group_cpus(group),
+- &p->cpus_allowed))
++ p->cpus_ptr))
+ continue;
+
+ local_group = cpumask_test_cpu(this_cpu,
+@@ -5580,7 +5580,7 @@ find_idlest_cpu(struct sched_group *grou
+ return cpumask_first(sched_group_cpus(group));
+
+ /* Traverse only the allowed CPUs */
+- for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
++ for_each_cpu_and(i, sched_group_cpus(group), p->cpus_ptr) {
+ if (idle_cpu(i)) {
+ struct rq *rq = cpu_rq(i);
+ struct cpuidle_state *idle = idle_get_state(rq);
+@@ -5719,7 +5719,7 @@ static int select_idle_core(struct task_
+ if (!test_idle_cores(target, false))
+ return -1;
+
+- cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
++ cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
+
+ for_each_cpu_wrap(core, cpus, target, wrap) {
+ bool idle = true;
+@@ -5753,7 +5753,7 @@ static int select_idle_smt(struct task_s
+ return -1;
+
+ for_each_cpu(cpu, cpu_smt_mask(target)) {
+- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ continue;
+ if (idle_cpu(cpu))
+ return cpu;
+@@ -5805,7 +5805,7 @@ static int select_idle_cpu(struct task_s
+ time = local_clock();
+
+ for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
+- if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
++ if (!cpumask_test_cpu(cpu, p->cpus_ptr))
+ continue;
+ if (idle_cpu(cpu))
+ break;
+@@ -5960,7 +5960,7 @@ select_task_rq_fair(struct task_struct *
+ if (sd_flag & SD_BALANCE_WAKE) {
+ record_wakee(p);
+ want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
+- && cpumask_test_cpu(cpu, &p->cpus_allowed);
++ && cpumask_test_cpu(cpu, p->cpus_ptr);
+ }
+
+ rcu_read_lock();
+@@ -6693,14 +6693,14 @@ int can_migrate_task(struct task_struct
+ /*
+ * We do not migrate tasks that are:
+ * 1) throttled_lb_pair, or
+- * 2) cannot be migrated to this CPU due to cpus_allowed, or
++ * 2) cannot be migrated to this CPU due to cpus_ptr, or
+ * 3) running (obviously), or
+ * 4) are cache-hot on their current CPU.
+ */
+ if (throttled_lb_pair(task_group(p), env->src_cpu, env->dst_cpu))
+ return 0;
+
+- if (!cpumask_test_cpu(env->dst_cpu, &p->cpus_allowed)) {
++ if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
+ int cpu;
+
+ schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
+@@ -6720,7 +6720,7 @@ int can_migrate_task(struct task_struct
+
+ /* Prevent to re-select dst_cpu via env's cpus */
+ for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
+- if (cpumask_test_cpu(cpu, &p->cpus_allowed)) {
++ if (cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ env->flags |= LBF_DST_PINNED;
+ env->new_dst_cpu = cpu;
+ break;
+@@ -7254,7 +7254,7 @@ check_cpu_capacity(struct rq *rq, struct
+
+ /*
+ * Group imbalance indicates (and tries to solve) the problem where balancing
+- * groups is inadequate due to ->cpus_allowed constraints.
++ * groups is inadequate due to ->cpus_ptr constraints.
+ *
+ * Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
+ * cpumask covering 1 cpu of the first group and 3 cpus of the second group.
+@@ -7828,7 +7828,7 @@ static struct sched_group *find_busiest_
+ /*
+ * If the busiest group is imbalanced the below checks don't
+ * work because they assume all things are equal, which typically
+- * isn't true due to cpus_allowed constraints and the like.
++ * isn't true due to cpus_ptr constraints and the like.
+ */
+ if (busiest->group_type == group_imbalanced)
+ goto force_balance;
+@@ -8213,7 +8213,7 @@ static int load_balance(int this_cpu, st
+ * if the curr task on busiest cpu can't be
+ * moved to this_cpu
+ */
+- if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
++ if (!cpumask_test_cpu(this_cpu, busiest->curr->cpus_ptr)) {
+ raw_spin_unlock_irqrestore(&busiest->lock,
+ flags);
+ env.flags |= LBF_ALL_PINNED;
+--- a/kernel/sched/rt.c
++++ b/kernel/sched/rt.c
+@@ -1591,7 +1591,7 @@ static void put_prev_task_rt(struct rq *
+ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
+ {
+ if (!task_running(rq, p) &&
+- cpumask_test_cpu(cpu, &p->cpus_allowed))
++ cpumask_test_cpu(cpu, p->cpus_ptr))
+ return 1;
+ return 0;
+ }
+@@ -1726,7 +1726,7 @@ static struct rq *find_lock_lowest_rq(st
+ * Also make sure that it wasn't scheduled on its rq.
+ */
+ if (unlikely(task_rq(task) != rq ||
+- !cpumask_test_cpu(lowest_rq->cpu, &task->cpus_allowed) ||
++ !cpumask_test_cpu(lowest_rq->cpu, task->cpus_ptr) ||
+ task_running(rq, task) ||
+ !rt_task(task) ||
+ !task_on_rq_queued(task))) {
+--- a/kernel/trace/trace_hwlat.c
++++ b/kernel/trace/trace_hwlat.c
+@@ -279,7 +279,7 @@ static void move_to_next_cpu(void)
+ * of this thread, than stop migrating for the duration
+ * of the current test.
+ */
+- if (!cpumask_equal(current_mask, &current->cpus_allowed))
++ if (!cpumask_equal(current_mask, current->cpus_ptr))
+ goto disable;
+
+ get_online_cpus();
+--- a/lib/smp_processor_id.c
++++ b/lib/smp_processor_id.c
+@@ -22,7 +22,7 @@ notrace static unsigned int check_preemp
+ * Kernel threads bound to a single CPU can safely use
+ * smp_processor_id():
+ */
+- if (cpumask_equal(&current->cpus_allowed, cpumask_of(this_cpu)))
++ if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu)))
+ goto out;
+
+ /*
+--- a/samples/trace_events/trace-events-sample.c
++++ b/samples/trace_events/trace-events-sample.c
+@@ -33,7 +33,7 @@ static void simple_thread_func(int cnt)
+
+ /* Silly tracepoints */
+ trace_foo_bar("hello", cnt, array, random_strings[len],
+- &current->cpus_allowed);
++ current->cpus_ptr);
+
+ trace_foo_with_template_simple("HELLO", cnt);
+
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 986d5e1b3639..ae5699f012b7 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -76,6 +76,7 @@
+@@ -87,6 +87,7 @@
#include <linux/compiler.h>
#include <linux/sysctl.h>
#include <linux/kcov.h>
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
-@@ -385,6 +386,15 @@ void __put_task_struct(struct task_struc
+@@ -398,6 +399,15 @@ void __put_task_struct(struct task_struc
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index ccc19d7a5bcf..76505b19bd58 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -20,8 +20,8 @@ Jason.
---
drivers/tty/serial/8250/8250_port.c | 3 +++
include/linux/kdb.h | 2 ++
- kernel/debug/kdb/kdb_io.c | 6 ++----
- 3 files changed, 7 insertions(+), 4 deletions(-)
+ kernel/debug/kdb/kdb_io.c | 2 ++
+ 3 files changed, 7 insertions(+)
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -33,7 +33,7 @@ Jason.
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/timer.h>
-@@ -3146,6 +3147,8 @@ void serial8250_console_write(struct uar
+@@ -3181,6 +3182,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;
@@ -62,32 +62,7 @@ Jason.
char *help, short minlen) { return 0; }
--- a/kernel/debug/kdb/kdb_io.c
+++ b/kernel/debug/kdb/kdb_io.c
-@@ -554,7 +554,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- int linecount;
- int colcount;
- int logging, saved_loglevel = 0;
-- int saved_trap_printk;
- int got_printf_lock = 0;
- int retlen = 0;
- int fnd, len;
-@@ -565,8 +564,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- unsigned long uninitialized_var(flags);
-
- preempt_disable();
-- saved_trap_printk = kdb_trap_printk;
-- kdb_trap_printk = 0;
-
- /* Serialize kdb_printf if multiple cpus try to write at once.
- * But if any cpu goes recursive in kdb, just print the output,
-@@ -855,7 +852,6 @@ int vkdb_printf(enum kdb_msgsrc src, con
- } else {
- __release(kdb_printf_lock);
- }
-- kdb_trap_printk = saved_trap_printk;
- preempt_enable();
- return retlen;
- }
-@@ -865,9 +861,11 @@ int kdb_printf(const char *fmt, ...)
+@@ -854,9 +854,11 @@ int kdb_printf(const char *fmt, ...)
va_list ap;
int r;
diff --git a/patches/latency-hist.patch b/patches/latency-hist.patch
index 368b063db0d3..59c19381fc0f 100644
--- a/patches/latency-hist.patch
+++ b/patches/latency-hist.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
Documentation/trace/histograms.txt | 186 +++++
include/linux/hrtimer.h | 4
- include/linux/sched.h | 6
+ include/linux/sched.h | 7
include/trace/events/hist.h | 73 ++
include/trace/events/latency_hist.h | 29
kernel/time/hrtimer.c | 21
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kernel/trace/Makefile | 4
kernel/trace/latency_hist.c | 1178 ++++++++++++++++++++++++++++++++++++
kernel/trace/trace_irqsoff.c | 11
- 10 files changed, 1616 insertions(+)
+ 10 files changed, 1616 insertions(+), 1 deletion(-)
--- /dev/null
+++ b/Documentation/trace/histograms.txt
@@ -216,15 +216,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+These data are also reset when the wakeup histogram is reset.
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -87,6 +87,7 @@ enum hrtimer_restart {
+@@ -86,6 +86,7 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
+ * @praecox: timer expiry time if expired at the time of programming
* @is_rel: Set if the timer was armed relative
- * @start_pid: timer statistics field to store the pid of the task which
- * started the timer
-@@ -103,6 +104,9 @@ struct hrtimer {
+ *
+ * The hrtimer structure must be initialized by hrtimer_init()
+@@ -96,6 +97,9 @@ struct hrtimer {
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
u8 state;
@@ -232,14 +232,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ ktime_t praecox;
+#endif
u8 is_rel;
- #ifdef CONFIG_TIMER_STATS
- int start_pid;
+ };
+
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1924,6 +1924,12 @@ struct task_struct {
- /* bitmask and counter of trace recursion */
- unsigned long trace_recursion;
+@@ -1009,7 +1009,12 @@ struct task_struct {
+ /* Bitmask and counter of trace recursion: */
+ unsigned long trace_recursion;
#endif /* CONFIG_TRACING */
+-
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+ u64 preempt_timestamp_hist;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
@@ -247,8 +248,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+#endif
#ifdef CONFIG_KCOV
- /* Coverage collection mode enabled for this task (0 if disabled). */
- enum kcov_mode kcov_mode;
+ /* Coverage collection mode enabled for this task (0 if disabled): */
+ enum kcov_mode kcov_mode;
--- /dev/null
+++ b/include/trace/events/hist.h
@@ -0,0 +1,73 @@
@@ -359,18 +360,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif /* _LATENCY_HIST_H */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -53,6 +53,7 @@
- #include <asm/uaccess.h>
-
- #include <trace/events/timer.h>
+@@ -50,6 +50,7 @@
+ #include <linux/sched/nohz.h>
+ #include <linux/sched/debug.h>
+ #include <linux/timer.h>
+#include <trace/events/hist.h>
+ #include <linux/freezer.h>
- #include "tick-internal.h"
+ #include <linux/uaccess.h>
+@@ -960,7 +961,16 @@ void hrtimer_start_range_ns(struct hrtim
-@@ -991,7 +992,16 @@ void hrtimer_start_range_ns(struct hrtim
+ /* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-
- timer_stats_hrtimer_set_start_info(timer);
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+ {
+ ktime_t now = new_base->get_time();
@@ -384,7 +385,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
-@@ -1265,6 +1275,8 @@ static void __run_hrtimer(struct hrtimer
+@@ -1239,6 +1249,8 @@ static void __run_hrtimer(struct hrtimer
cpu_base->running = NULL;
}
@@ -393,7 +394,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
struct hrtimer_clock_base *base = cpu_base->clock_base;
-@@ -1284,6 +1296,15 @@ static void __hrtimer_run_queues(struct
+@@ -1258,6 +1270,15 @@ static void __hrtimer_run_queues(struct
timer = container_of(node, struct hrtimer, node);
@@ -411,7 +412,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* minimizing wakeups, not running timers at the
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
-@@ -182,6 +182,24 @@ config IRQSOFF_TRACER
+@@ -184,6 +184,24 @@ config IRQSOFF_TRACER
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -436,7 +437,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config PREEMPT_TRACER
bool "Preemption-off Latency Tracer"
default n
-@@ -206,6 +224,24 @@ config PREEMPT_TRACER
+@@ -208,6 +226,24 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
@@ -461,7 +462,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
-@@ -251,6 +287,74 @@ config HWLAT_TRACER
+@@ -253,6 +289,74 @@ config HWLAT_TRACER
file. Every time a latency is greater than tracing_thresh, it will
be recorded into the ring buffer.
@@ -659,7 +660,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ int current_prio;
+ long latency;
+ long timeroffset;
-+ cycle_t timestamp;
++ u64 timestamp;
+};
+#endif
+
@@ -710,7 +711,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+
+void notrace latency_hist(int latency_type, int cpu, long latency,
-+ long timeroffset, cycle_t stop,
++ long timeroffset, u64 stop,
+ struct task_struct *p)
+{
+ struct hist_data *my_hist;
@@ -1301,7 +1302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ int time_set = 0;
+
+ if (starthist) {
-+ cycle_t uninitialized_var(start);
++ u64 uninitialized_var(start);
+
+ if (!preempt_count() && !irqs_disabled())
+ return;
@@ -1337,12 +1338,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ }
+#endif
+ } else {
-+ cycle_t uninitialized_var(stop);
++ u64 uninitialized_var(stop);
+
+#ifdef CONFIG_INTERRUPT_OFF_HIST
+ if ((reason == IRQS_ON || reason == TRACE_STOP) &&
+ per_cpu(hist_irqsoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_irqsoff_start, cpu);
++ u64 start = per_cpu(hist_irqsoff_start, cpu);
+
+ stop = ftrace_now(cpu);
+ time_set++;
@@ -1360,7 +1361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#ifdef CONFIG_PREEMPT_OFF_HIST
+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) &&
+ per_cpu(hist_preemptoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_preemptoff_start, cpu);
++ u64 start = per_cpu(hist_preemptoff_start, cpu);
+
+ if (!(time_set++))
+ stop = ftrace_now(cpu);
@@ -1379,7 +1380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if ((!per_cpu(hist_irqsoff_counting, cpu) ||
+ !per_cpu(hist_preemptoff_counting, cpu)) &&
+ per_cpu(hist_preemptirqsoff_counting, cpu)) {
-+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu);
++ u64 start = per_cpu(hist_preemptirqsoff_start, cpu);
+
+ if (!time_set)
+ stop = ftrace_now(cpu);
@@ -1466,7 +1467,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ unsigned long flags;
+ int cpu = task_cpu(next);
+ long latency;
-+ cycle_t stop;
++ u64 stop;
+ struct task_struct *cpu_wakeup_task;
+
+ raw_spin_lock_irqsave(&wakeup_lock, flags);
@@ -1536,9 +1537,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (latency_ns <= 0 && task != NULL && rt_task(task) &&
+ (task->prio < curr->prio ||
+ (task->prio == curr->prio &&
-+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
++ !cpumask_test_cpu(cpu, task->cpus_ptr)))) {
+ long latency;
-+ cycle_t now;
++ u64 now;
+
+ if (missed_timer_offsets_pid) {
+ if (likely(missed_timer_offsets_pid !=
@@ -1740,7 +1741,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "trace.h"
-@@ -424,11 +425,13 @@ void start_critical_timings(void)
+@@ -436,11 +437,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1754,7 +1755,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -438,6 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -450,6 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
@@ -1762,7 +1763,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -446,6 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -458,6 +462,7 @@ void time_hardirqs_off(unsigned long a0,
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
@@ -1770,7 +1771,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#else /* !CONFIG_PROVE_LOCKING */
-@@ -471,6 +476,7 @@ inline void print_irqtrace_events(struct
+@@ -483,6 +488,7 @@ inline void print_irqtrace_events(struct
*/
void trace_hardirqs_on(void)
{
@@ -1778,7 +1779,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -480,11 +486,13 @@ void trace_hardirqs_off(void)
+@@ -492,11 +498,13 @@ void trace_hardirqs_off(void)
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -1792,7 +1793,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
-@@ -494,6 +502,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+@@ -506,6 +514,7 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
{
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
@@ -1800,7 +1801,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-@@ -503,12 +512,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
+@@ -515,12 +524,14 @@ EXPORT_SYMBOL(trace_hardirqs_off_caller)
#ifdef CONFIG_PREEMPT_TRACER
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
diff --git a/patches/latencyhist-disable-jump-labels.patch b/patches/latencyhist-disable-jump-labels.patch
index 5e3e6e9e4c5e..ee5040cd36db 100644
--- a/patches/latencyhist-disable-jump-labels.patch
+++ b/patches/latencyhist-disable-jump-labels.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -52,6 +52,7 @@ config KPROBES
+@@ -55,6 +55,7 @@ config KPROBES
config JUMP_LABEL
bool "Optimize very unlikely/likely branches"
depends on HAVE_ARCH_JUMP_LABEL
diff --git a/patches/localversion.patch b/patches/localversion.patch
index bba4391fd1bd..a02382e6df70 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt21
++-rt1
diff --git a/patches/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch b/patches/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
index e19e0155e517..d6385cee687e 100644
--- a/patches/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
+++ b/patches/lockdep-Fix-compilation-error-for-CONFIG_MODULES-and.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/module.h
+++ b/include/linux/module.h
-@@ -664,6 +664,11 @@ static inline bool is_module_percpu_addr
+@@ -661,6 +661,11 @@ static inline bool is_module_percpu_addr
return false;
}
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
--- a/kernel/module.c
+++ b/kernel/module.c
-@@ -734,6 +734,11 @@ bool is_module_percpu_address(unsigned l
+@@ -739,6 +739,11 @@ bool is_module_percpu_address(unsigned l
return false;
}
diff --git a/patches/lockdep-Fix-per-cpu-static-objects.patch b/patches/lockdep-Fix-per-cpu-static-objects.patch
index ebf40585fca8..f79a7b7dd829 100644
--- a/patches/lockdep-Fix-per-cpu-static-objects.patch
+++ b/patches/lockdep-Fix-per-cpu-static-objects.patch
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -1240,6 +1240,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
+@@ -1125,6 +1125,8 @@ core_initcall(cpu_hotplug_pm_sync_init);
#endif /* CONFIG_PM_SLEEP_SMP */
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* CONFIG_SMP */
/* Boot processor state steps */
-@@ -1924,6 +1926,10 @@ void __init boot_cpu_init(void)
+@@ -1815,6 +1817,10 @@ void __init boot_cpu_init(void)
set_cpu_active(cpu, true);
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/module.c
+++ b/kernel/module.c
-@@ -677,8 +677,12 @@ bool __is_module_percpu_address(unsigned
+@@ -682,8 +682,12 @@ bool __is_module_percpu_address(unsigned
void *va = (void *)addr;
if (va >= start && va < start + mod->percpu_size) {
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/mm/percpu.c
+++ b/mm/percpu.c
-@@ -1295,8 +1295,11 @@ bool __is_kernel_percpu_address(unsigned
+@@ -1296,8 +1296,11 @@ bool __is_kernel_percpu_address(unsigned
void *va = (void *)addr;
if (va >= start && va < start + static_size) {
diff --git a/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch b/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
index 51783c6aaa56..64a6f75e8665 100644
--- a/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
+++ b/patches/lockdep-Handle-statically-initialized-PER_CPU-locks-.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/module.h
+++ b/include/linux/module.h
-@@ -496,6 +496,7 @@ static inline int module_is_live(struct
+@@ -493,6 +493,7 @@ static inline int module_is_live(struct
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -658,6 +658,7 @@ look_up_lock_class(struct lockdep_map *l
+@@ -660,6 +660,7 @@ look_up_lock_class(struct lockdep_map *l
struct lockdep_subclass_key *key;
struct hlist_head *hash_head;
struct lock_class *class;
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) {
debug_locks_off();
-@@ -671,10 +672,23 @@ look_up_lock_class(struct lockdep_map *l
+@@ -673,10 +674,23 @@ look_up_lock_class(struct lockdep_map *l
/*
* Static locks do not have their class-keys yet - for them the key
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* NOTE: the class-key must be unique. For dynamic locks, a static
-@@ -706,7 +720,7 @@ look_up_lock_class(struct lockdep_map *l
+@@ -708,7 +722,7 @@ look_up_lock_class(struct lockdep_map *l
}
}
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -724,19 +738,18 @@ register_lock_class(struct lockdep_map *
+@@ -726,19 +740,18 @@ register_lock_class(struct lockdep_map *
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
class = look_up_lock_class(lock, subclass);
@@ -132,7 +132,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -3410,7 +3423,7 @@ static int match_held_lock(struct held_l
+@@ -3419,7 +3432,7 @@ static int match_held_lock(struct held_l
* Clearly if the lock hasn't been acquired _ever_, we're not
* holding it either, so report failure.
*/
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
/*
-@@ -4159,7 +4172,7 @@ void lockdep_reset_lock(struct lockdep_m
+@@ -4172,7 +4185,7 @@ void lockdep_reset_lock(struct lockdep_m
* If the class exists we look it up and zap it:
*/
class = look_up_lock_class(lock, j);
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/module.c
+++ b/kernel/module.c
-@@ -660,16 +660,7 @@ static void percpu_modcopy(struct module
+@@ -665,16 +665,7 @@ static void percpu_modcopy(struct module
memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
}
@@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct module *mod;
unsigned int cpu;
-@@ -683,9 +674,11 @@ bool is_module_percpu_address(unsigned l
+@@ -688,9 +679,11 @@ bool is_module_percpu_address(unsigned l
continue;
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(mod->percpu, cpu);
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
return true;
}
-@@ -696,6 +689,20 @@ bool is_module_percpu_address(unsigned l
+@@ -701,6 +694,20 @@ bool is_module_percpu_address(unsigned l
return false;
}
@@ -207,7 +207,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void __percpu *mod_percpu(struct module *mod)
--- a/mm/percpu.c
+++ b/mm/percpu.c
-@@ -1283,18 +1283,7 @@ void free_percpu(void __percpu *ptr)
+@@ -1284,18 +1284,7 @@ void free_percpu(void __percpu *ptr)
}
EXPORT_SYMBOL_GPL(free_percpu);
@@ -227,7 +227,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
#ifdef CONFIG_SMP
const size_t static_size = __per_cpu_end - __per_cpu_start;
-@@ -1303,16 +1292,36 @@ bool is_kernel_percpu_address(unsigned l
+@@ -1304,16 +1293,36 @@ bool is_kernel_percpu_address(unsigned l
for_each_possible_cpu(cpu) {
void *start = per_cpu_ptr(base, cpu);
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index 413c64184f79..fc7e870bc411 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3702,6 +3702,7 @@ static void check_flags(unsigned long fl
+@@ -3715,6 +3715,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3716,6 +3717,7 @@ static void check_flags(unsigned long fl
+@@ -3729,6 +3730,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch b/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
deleted file mode 100644
index 0071ac7ce39c..000000000000
--- a/patches/locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 21 Nov 2016 19:26:15 +0100
-Subject: [PATCH] locking/percpu-rwsem: use swait for the wating writer
-
-Use struct swait_queue_head instead of wait_queue_head_t for the waiting
-writer. The swait implementation is smaller and lightweight compared to
-wait_queue_head_t.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/percpu-rwsem.h | 6 +++---
- kernel/locking/percpu-rwsem.c | 6 +++---
- 2 files changed, 6 insertions(+), 6 deletions(-)
-
---- a/include/linux/percpu-rwsem.h
-+++ b/include/linux/percpu-rwsem.h
-@@ -4,7 +4,7 @@
- #include <linux/atomic.h>
- #include <linux/rwsem.h>
- #include <linux/percpu.h>
--#include <linux/wait.h>
-+#include <linux/swait.h>
- #include <linux/rcu_sync.h>
- #include <linux/lockdep.h>
-
-@@ -12,7 +12,7 @@ struct percpu_rw_semaphore {
- struct rcu_sync rss;
- unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem;
-- wait_queue_head_t writer;
-+ struct swait_queue_head writer;
- int readers_block;
- };
-
-@@ -22,7 +22,7 @@ static struct percpu_rw_semaphore name =
- .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
- .read_count = &__percpu_rwsem_rc_##name, \
- .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
-- .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
-+ .writer = __SWAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
- }
-
- extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
---- a/kernel/locking/percpu-rwsem.c
-+++ b/kernel/locking/percpu-rwsem.c
-@@ -18,7 +18,7 @@ int __percpu_init_rwsem(struct percpu_rw
- /* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
- rcu_sync_init(&sem->rss, RCU_SCHED_SYNC);
- __init_rwsem(&sem->rw_sem, name, rwsem_key);
-- init_waitqueue_head(&sem->writer);
-+ init_swait_queue_head(&sem->writer);
- sem->readers_block = 0;
- return 0;
- }
-@@ -103,7 +103,7 @@ void __percpu_up_read(struct percpu_rw_s
- __this_cpu_dec(*sem->read_count);
-
- /* Prod writer to recheck readers_active */
-- wake_up(&sem->writer);
-+ swake_up(&sem->writer);
- }
- EXPORT_SYMBOL_GPL(__percpu_up_read);
-
-@@ -160,7 +160,7 @@ void percpu_down_write(struct percpu_rw_
- */
-
- /* Wait for all now active readers to complete. */
-- wait_event(sem->writer, readers_active_check(sem));
-+ swait_event(sem->writer, readers_active_check(sem));
- }
- EXPORT_SYMBOL_GPL(percpu_down_write);
-
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index 16e023890d14..59fdfefc2ac8 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -1928,8 +1928,9 @@ static void raid_run_ops(struct stripe_h
+@@ -1986,8 +1986,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -1985,7 +1986,8 @@ static void raid_run_ops(struct stripe_h
+@@ -2043,7 +2044,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,15 +41,15 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp,
-@@ -6393,6 +6395,7 @@ static int raid456_cpu_up_prepare(unsign
- __func__, cpu);
+@@ -6664,6 +6666,7 @@ static int raid456_cpu_up_prepare(unsign
+ __func__, cpu);
return -ENOMEM;
}
+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
return 0;
}
-@@ -6403,7 +6406,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6674,7 +6677,6 @@ static int raid5_alloc_percpu(struct r5c
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
@@ -59,7 +59,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
conf->scribble_disks = max(conf->raid_disks,
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
-@@ -504,6 +504,7 @@ struct r5conf {
+@@ -643,6 +643,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 0ff1fe5302bc..34b86bdd04e4 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2516,7 +2516,7 @@ config MIPS_ASID_BITS_VARIABLE
+@@ -2520,7 +2520,7 @@ config MIPS_ASID_BITS_VARIABLE
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch b/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
index 3c210118ea66..a9faebd35840 100644
--- a/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
+++ b/patches/mm-backing-dev-don-t-disable-IRQs-in-wb_congested_pu.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
-@@ -457,9 +457,9 @@ void wb_congested_put(struct bdi_writeba
+@@ -459,9 +459,9 @@ void wb_congested_put(struct bdi_writeba
{
unsigned long flags;
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 817ae137493c..c40c2d08a062 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -11,13 +11,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/swap.h | 1 +
mm/compaction.c | 6 ++++--
- mm/page_alloc.c | 2 ++
+ mm/page_alloc.c | 3 ++-
mm/swap.c | 38 ++++++++++++++++++++++----------------
- 4 files changed, 29 insertions(+), 18 deletions(-)
+ 4 files changed, 29 insertions(+), 19 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
-@@ -294,6 +294,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -269,6 +269,7 @@ extern unsigned long nr_free_pagecache_p
/* linux/mm/swap.c */
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1593,10 +1593,12 @@ static enum compact_result compact_zone(
+@@ -1601,10 +1601,12 @@ static enum compact_result compact_zone(
block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
@@ -44,16 +44,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6594,7 +6594,9 @@ static int page_alloc_cpu_notify(struct
- int cpu = (unsigned long)hcpu;
+@@ -6787,8 +6787,9 @@ void __init free_area_init(unsigned long
- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
-+ local_lock_irq_on(swapvec_lock, cpu);
- lru_add_drain_cpu(cpu);
-+ local_unlock_irq_on(swapvec_lock, cpu);
- drain_pages(cpu);
+ static int page_alloc_cpu_dead(unsigned int cpu)
+ {
+-
++ local_lock_irq_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
++ local_unlock_irq_on(swapvec_lock, cpu);
+ drain_pages(cpu);
- /*
+ /*
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -32,6 +32,7 @@
@@ -73,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This path almost never happens for VM activity - pages are normally
-@@ -240,11 +243,11 @@ void rotate_reclaimable_page(struct page
+@@ -242,11 +245,11 @@ void rotate_reclaimable_page(struct page
unsigned long flags;
get_page(page);
@@ -87,7 +88,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -294,12 +297,13 @@ void activate_page(struct page *page)
+@@ -296,12 +299,13 @@ void activate_page(struct page *page)
{
page = compound_head(page);
if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
@@ -103,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -326,7 +330,7 @@ void activate_page(struct page *page)
+@@ -328,7 +332,7 @@ void activate_page(struct page *page)
static void __lru_cache_activate_page(struct page *page)
{
@@ -112,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/*
-@@ -348,7 +352,7 @@ static void __lru_cache_activate_page(st
+@@ -350,7 +354,7 @@ static void __lru_cache_activate_page(st
}
}
@@ -121,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -390,12 +394,12 @@ EXPORT_SYMBOL(mark_page_accessed);
+@@ -392,12 +396,12 @@ EXPORT_SYMBOL(mark_page_accessed);
static void __lru_cache_add(struct page *page)
{
@@ -136,7 +137,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -593,9 +597,9 @@ void lru_add_drain_cpu(int cpu)
+@@ -595,9 +599,9 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -148,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -627,11 +631,12 @@ void deactivate_file_page(struct page *p
+@@ -629,11 +633,12 @@ void deactivate_file_page(struct page *p
return;
if (likely(get_page_unless_zero(page))) {
@@ -163,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -646,19 +651,20 @@ void deactivate_file_page(struct page *p
+@@ -648,19 +653,20 @@ void deactivate_file_page(struct page *p
void deactivate_page(struct page *page)
{
if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index b081c354eac1..e8de9f89cbbd 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1759,6 +1759,7 @@ choice
+@@ -1825,6 +1825,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1779,6 +1780,7 @@ config SLUB
+@@ -1845,6 +1846,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index f8d47f2f97a6..c2962f641ae6 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -426,7 +426,11 @@ static inline void slab_post_alloc_hook(
+@@ -465,7 +465,11 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1141,7 +1141,7 @@ static noinline int free_debug_processin
+@@ -1146,7 +1146,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1176,7 +1176,7 @@ static noinline int free_debug_processin
+@@ -1181,7 +1181,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1304,6 +1304,12 @@ static inline void dec_slabs_node(struct
+@@ -1309,6 +1309,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -58,31 +58,31 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1530,7 +1536,11 @@ static struct page *allocate_slab(struct
+@@ -1535,7 +1541,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (system_state == SYSTEM_RUNNING)
++ if (system_state > SYSTEM_BOOTING)
+#else
if (gfpflags_allow_blocking(flags))
+#endif
local_irq_enable();
flags |= s->allocflags;
-@@ -1605,7 +1615,11 @@ static struct page *allocate_slab(struct
+@@ -1610,7 +1620,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (system_state == SYSTEM_RUNNING)
++ if (system_state > SYSTEM_BOOTING)
+#else
if (gfpflags_allow_blocking(flags))
+#endif
local_irq_disable();
if (!page)
return NULL;
-@@ -1664,6 +1678,16 @@ static void __free_slab(struct kmem_cach
+@@ -1670,6 +1684,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1695,6 +1719,12 @@ static void free_slab(struct kmem_cache
+@@ -1701,6 +1725,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1802,7 +1832,7 @@ static void *get_partial_node(struct kme
+@@ -1808,7 +1838,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1827,7 +1857,7 @@ static void *get_partial_node(struct kme
+@@ -1833,7 +1863,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -2073,7 +2103,7 @@ static void deactivate_slab(struct kmem_
+@@ -2079,7 +2109,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2084,7 +2114,7 @@ static void deactivate_slab(struct kmem_
+@@ -2090,7 +2120,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2119,7 +2149,7 @@ static void deactivate_slab(struct kmem_
+@@ -2125,7 +2155,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2151,10 +2181,10 @@ static void unfreeze_partials(struct kme
+@@ -2157,10 +2187,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2183,7 +2213,7 @@ static void unfreeze_partials(struct kme
+@@ -2189,7 +2219,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2222,14 +2252,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2228,14 +2258,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2301,7 +2338,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2307,7 +2344,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2356,10 +2408,10 @@ static unsigned long count_partial(struc
+@@ -2362,10 +2414,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2497,8 +2549,10 @@ static inline void *get_freelist(struct
+@@ -2503,8 +2555,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *freelist;
struct page *page;
-@@ -2558,6 +2612,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2564,6 +2618,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return freelist;
new_slab:
-@@ -2589,7 +2650,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2595,7 +2656,7 @@ static void *___slab_alloc(struct kmem_c
deactivate_slab(s, page, get_freepointer(s, freelist));
c->page = NULL;
c->freelist = NULL;
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2601,6 +2662,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2607,6 +2668,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -280,7 +280,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2612,8 +2674,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2618,8 +2680,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return p;
}
-@@ -2799,7 +2862,7 @@ static void __slab_free(struct kmem_cach
+@@ -2805,7 +2868,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2831,7 +2894,7 @@ static void __slab_free(struct kmem_cach
+@@ -2837,7 +2900,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2873,7 +2936,7 @@ static void __slab_free(struct kmem_cach
+@@ -2879,7 +2942,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2888,7 +2951,7 @@ static void __slab_free(struct kmem_cach
+@@ -2894,7 +2957,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3093,6 +3156,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3099,6 +3162,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -3116,7 +3180,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3122,7 +3186,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -3128,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3134,6 +3198,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -352,7 +352,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3275,7 +3340,7 @@ static void
+@@ -3281,7 +3346,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3619,6 +3684,10 @@ static void list_slab_objects(struct kme
+@@ -3625,6 +3690,10 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -372,7 +372,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3639,6 +3708,7 @@ static void list_slab_objects(struct kme
+@@ -3645,6 +3714,7 @@ static void list_slab_objects(struct kme
slab_unlock(page);
kfree(map);
#endif
@@ -380,7 +380,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -3652,7 +3722,7 @@ static void free_partial(struct kmem_cac
+@@ -3658,7 +3728,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -389,7 +389,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3662,7 +3732,7 @@ static void free_partial(struct kmem_cac
+@@ -3668,7 +3738,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@@ -398,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3905,7 +3975,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3912,7 +3982,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3936,7 +4006,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3943,7 +4013,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4112,6 +4182,12 @@ void __init kmem_cache_init(void)
+@@ -4156,6 +4226,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -429,7 +429,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4320,7 +4396,7 @@ static int validate_slab_node(struct kme
+@@ -4364,7 +4440,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4342,7 +4418,7 @@ static int validate_slab_node(struct kme
+@@ -4386,7 +4462,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4530,12 +4606,12 @@ static int list_locations(struct kmem_ca
+@@ -4574,12 +4650,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 06f6ccc39466..59c4c0f0412a 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1792,7 +1792,7 @@ static void drain_all_stock(struct mem_c
+@@ -1782,7 +1782,7 @@ static void drain_all_stock(struct mem_c
return;
/* Notify other cpus that system-wide "drain" is running */
get_online_cpus();
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1809,7 +1809,7 @@ static void drain_all_stock(struct mem_c
+@@ -1799,7 +1799,7 @@ static void drain_all_stock(struct mem_c
schedule_work_on(cpu, &stock->work);
}
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index a4549a5955df..cddbaeaf4ff6 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -12,15 +12,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -67,6 +67,7 @@
+@@ -69,6 +69,7 @@
#include <net/sock.h>
#include <net/ip.h>
#include "slab.h"
+#include <linux/locallock.h>
- #include <asm/uaccess.h>
+ #include <linux/uaccess.h>
-@@ -92,6 +93,8 @@ int do_swap_account __read_mostly;
+@@ -94,6 +95,8 @@ int do_swap_account __read_mostly;
#define do_swap_account 0
#endif
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4555,12 +4558,12 @@ static int mem_cgroup_move_account(struc
+@@ -4535,12 +4538,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5435,10 +5438,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5422,10 +5425,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5494,14 +5497,14 @@ static void uncharge_batch(struct mem_cg
+@@ -5481,14 +5484,14 @@ static void uncharge_batch(struct mem_cg
memcg_oom_recover(memcg);
}
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(memcg))
css_put_many(&memcg->css, nr_pages);
-@@ -5850,6 +5853,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5838,6 +5841,7 @@ void mem_cgroup_swapout(struct page *pag
{
struct mem_cgroup *memcg, *swap_memcg;
unsigned short oldid;
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5890,12 +5894,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -5878,12 +5882,16 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
diff --git a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
index 2ff2b53ab10c..4c40b9559bb5 100644
--- a/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
+++ b/patches/mm-memcontrol-mem_cgroup_migrate-replace-another-loc.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -5659,10 +5659,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -5646,10 +5646,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 152606c2f649..7e3e0389a653 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -286,9 +286,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -291,9 +291,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
#ifdef CONFIG_PREEMPT_RT_BASE
# define cpu_lock_irqsave(cpu, flags) \
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index ec723b03314a..f6adc0f79567 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1085,7 +1085,7 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1099,7 +1099,7 @@ static bool bulkfree_pcp_prepare(struct
#endif /* CONFIG_DEBUG_VM */
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1096,19 +1096,58 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1110,19 +1110,58 @@ static bool bulkfree_pcp_prepare(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (count) {
struct page *page;
struct list_head *list;
-@@ -1124,7 +1163,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1138,7 +1177,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -1132,27 +1171,12 @@ static void free_pcppages_bulk(struct zo
+@@ -1146,27 +1185,12 @@ static void free_pcppages_bulk(struct zo
batch_free = count;
do {
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -1161,7 +1185,9 @@ static void free_one_page(struct zone *z
+@@ -1175,7 +1199,9 @@ static void free_one_page(struct zone *z
int migratetype)
{
unsigned long nr_scanned;
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nr_scanned = node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED);
if (nr_scanned)
__mod_node_page_state(zone->zone_pgdat, NR_PAGES_SCANNED, -nr_scanned);
-@@ -1171,7 +1197,7 @@ static void free_one_page(struct zone *z
+@@ -1185,7 +1211,7 @@ static void free_one_page(struct zone *z
migratetype = get_pfnblock_migratetype(page, pfn);
}
__free_one_page(page, pfn, zone, order, migratetype);
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -2259,16 +2285,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2299,16 +2325,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2284,16 +2312,21 @@ static void drain_pages_zone(unsigned in
+@@ -2324,16 +2352,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2475,8 +2508,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2556,8 +2589,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index 7f9bca2c23a3..2e38cdb16267 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -12,20 +12,20 @@ Contains fixes from:
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- mm/page_alloc.c | 57 ++++++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 41 insertions(+), 16 deletions(-)
+ mm/page_alloc.c | 55 +++++++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 39 insertions(+), 16 deletions(-)
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -61,6 +61,7 @@
- #include <linux/page_ext.h>
+@@ -62,6 +62,7 @@
#include <linux/hugetlb.h>
#include <linux/sched/rt.h>
+ #include <linux/sched/mm.h>
+#include <linux/locallock.h>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -281,6 +282,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -286,6 +287,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1244,10 +1257,10 @@ static void __free_pages_ok(struct page
+@@ -1258,10 +1271,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2248,14 +2261,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2288,14 +2301,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2272,7 +2285,7 @@ static void drain_pages_zone(unsigned in
+@@ -2312,7 +2325,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2280,7 +2293,7 @@ static void drain_pages_zone(unsigned in
+@@ -2320,7 +2333,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,25 +92,47 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2366,8 +2379,17 @@ void drain_all_pages(struct zone *zone)
+@@ -2355,6 +2368,7 @@ void drain_local_pages(struct zone *zone
+ drain_pages(cpu);
+ }
+
++#ifndef CONFIG_PREEMPT_RT_BASE
+ static void drain_local_pages_wq(struct work_struct *work)
+ {
+ /*
+@@ -2368,6 +2382,7 @@ static void drain_local_pages_wq(struct
+ drain_local_pages(NULL);
+ preempt_enable();
+ }
++#endif
+
+ /*
+ * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
+@@ -2438,7 +2453,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
-+#ifndef CONFIG_PREEMPT_RT_BASE
- on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages,
- zone, 1);
-+#else
+-
++#ifdef CONFIG_PREEMPT_RT_BASE
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ if (zone)
+ drain_pages_zone(cpu, zone);
+ else
+ drain_pages(cpu);
+ }
++#else
+ for_each_cpu(cpu, &cpus_with_pcps) {
+ struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
+ INIT_WORK(work, drain_local_pages_wq);
+@@ -2446,6 +2468,7 @@ void drain_all_pages(struct zone *zone)
+ }
+ for_each_cpu(cpu, &cpus_with_pcps)
+ flush_work(per_cpu_ptr(&pcpu_drain, cpu));
+#endif
- }
- #ifdef CONFIG_HIBERNATION
-@@ -2427,7 +2449,7 @@ void free_hot_cold_page(struct page *pag
+ mutex_unlock(&pcpu_drain_mutex);
+ }
+@@ -2507,7 +2530,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -119,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2458,7 +2480,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2538,7 +2561,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -128,45 +150,42 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2600,7 +2622,7 @@ struct page *buffered_rmqueue(struct zon
- struct per_cpu_pages *pcp;
- struct list_head *list;
-
-- local_irq_save(flags);
-+ local_lock_irqsave(pa_lock, flags);
- do {
- pcp = &this_cpu_ptr(zone->pageset)->pcp;
- list = &pcp->lists[migratetype];
-@@ -2627,7 +2649,7 @@ struct page *buffered_rmqueue(struct zon
- * allocate greater than order-1 page units with __GFP_NOFAIL.
- */
- WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
-- spin_lock_irqsave(&zone->lock, flags);
-+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
-
- do {
- page = NULL;
-@@ -2639,22 +2661,24 @@ struct page *buffered_rmqueue(struct zon
- if (!page)
- page = __rmqueue(zone, order, migratetype);
- } while (page && check_new_pages(page, order));
-- spin_unlock(&zone->lock);
-- if (!page)
-+ if (!page) {
-+ spin_unlock(&zone->lock);
- goto failed;
-+ }
- __mod_zone_freepage_state(zone, -(1 << order),
- get_pcppage_migratetype(page));
-+ spin_unlock(&zone->lock);
+@@ -2695,7 +2718,7 @@ static struct page *rmqueue_pcplist(stru
+ struct page *page;
+ unsigned long flags;
+
+- local_irq_save(flags);
++ local_lock_irqsave(pa_lock, flags);
+ pcp = &this_cpu_ptr(zone->pageset)->pcp;
+ list = &pcp->lists[migratetype];
+ page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
+@@ -2703,7 +2726,7 @@ static struct page *rmqueue_pcplist(stru
+ __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+ zone_statistics(preferred_zone, zone);
}
+- local_irq_restore(flags);
++ local_unlock_irqrestore(pa_lock, flags);
+ return page;
+ }
+
+@@ -2730,7 +2753,7 @@ struct page *rmqueue(struct zone *prefer
+ * allocate greater than order-1 page units with __GFP_NOFAIL.
+ */
+ WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
+- spin_lock_irqsave(&zone->lock, flags);
++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags);
+
+ do {
+ page = NULL;
+@@ -2750,14 +2773,14 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
+ zone_statistics(preferred_zone, zone);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pa_lock, flags);
- VM_BUG_ON_PAGE(bad_range(zone, page), page);
+ out:
+ VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
return page;
failed:
@@ -175,15 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -6558,6 +6582,7 @@ static int page_alloc_cpu_notify(struct
- void __init page_alloc_init(void)
- {
- hotcpu_notifier(page_alloc_cpu_notify, 0);
-+ local_irq_lock_init(pa_lock);
- }
-
- /*
-@@ -7386,7 +7411,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7591,7 +7614,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -192,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7395,7 +7420,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7600,7 +7623,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-perform-lru_add_drain_all-remotely.patch b/patches/mm-perform-lru_add_drain_all-remotely.patch
index 63193f823abf..10f37c213381 100644
--- a/patches/mm-perform-lru_add_drain_all-remotely.patch
+++ b/patches/mm-perform-lru_add_drain_all-remotely.patch
@@ -19,12 +19,12 @@ Signed-off-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Luiz Capitulino <lcapitulino@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- mm/swap.c | 42 ++++++++++++++++++++++++++++++++----------
- 1 file changed, 32 insertions(+), 10 deletions(-)
+ mm/swap.c | 37 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 30 insertions(+), 7 deletions(-)
--- a/mm/swap.c
+++ b/mm/swap.c
-@@ -597,9 +597,15 @@ void lru_add_drain_cpu(int cpu)
+@@ -599,9 +599,15 @@ void lru_add_drain_cpu(int cpu)
unsigned long flags;
/* No harm done if a racing interrupt already did this */
@@ -40,41 +40,33 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
pvec = &per_cpu(lru_deactivate_file_pvecs, cpu);
-@@ -667,12 +673,15 @@ void lru_add_drain(void)
+@@ -669,6 +675,16 @@ void lru_add_drain(void)
local_unlock_cpu(swapvec_lock);
}
--static void lru_add_drain_per_cpu(struct work_struct *dummy)
+#ifdef CONFIG_PREEMPT_RT_BASE
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
- {
-- lru_add_drain();
++{
+ local_lock_on(swapvec_lock, cpu);
+ lru_add_drain_cpu(cpu);
+ local_unlock_on(swapvec_lock, cpu);
- }
-
--static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
++}
++
+#else
++
+ static void lru_add_drain_per_cpu(struct work_struct *dummy)
+ {
+ lru_add_drain();
+@@ -676,6 +692,16 @@ static void lru_add_drain_per_cpu(struct
- /*
- * lru_add_drain_wq is used to do lru_add_drain_all() from a WQ_MEM_RECLAIM
-@@ -692,6 +701,22 @@ static int __init lru_init(void)
- }
- early_initcall(lru_init);
+ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
-+static void lru_add_drain_per_cpu(struct work_struct *dummy)
-+{
-+ lru_add_drain();
-+}
-+
-+static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work);
+static inline void remote_lru_add_drain(int cpu, struct cpumask *has_work)
+{
+ struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
+
+ INIT_WORK(work, lru_add_drain_per_cpu);
-+ queue_work_on(cpu, lru_add_drain_wq, work);
++ queue_work_on(cpu, mm_percpu_wq, work);
+ cpumask_set_cpu(cpu, has_work);
+}
+#endif
@@ -82,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void lru_add_drain_all(void)
{
static DEFINE_MUTEX(lock);
-@@ -703,21 +728,18 @@ void lru_add_drain_all(void)
+@@ -694,21 +720,18 @@ void lru_add_drain_all(void)
cpumask_clear(&has_work);
for_each_online_cpu(cpu) {
@@ -94,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) ||
- need_activate_page_drain(cpu)) {
- INIT_WORK(work, lru_add_drain_per_cpu);
-- queue_work_on(cpu, lru_add_drain_wq, work);
+- queue_work_on(cpu, mm_percpu_wq, work);
- cpumask_set_cpu(cpu, &has_work);
- }
+ need_activate_page_drain(cpu))
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index 0cbc888b9faf..6c54732a3a66 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -1017,12 +1017,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1022,12 +1022,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
@@ -53,15 +53,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
up_read(&old_mm->mmap_sem);
--- a/mm/mmu_context.c
+++ b/mm/mmu_context.c
-@@ -23,6 +23,7 @@ void use_mm(struct mm_struct *mm)
+@@ -25,6 +25,7 @@ void use_mm(struct mm_struct *mm)
struct task_struct *tsk = current;
task_lock(tsk);
+ preempt_disable_rt();
active_mm = tsk->active_mm;
if (active_mm != mm) {
- atomic_inc(&mm->mm_count);
-@@ -30,6 +31,7 @@ void use_mm(struct mm_struct *mm)
+ mmgrab(mm);
+@@ -32,6 +33,7 @@ void use_mm(struct mm_struct *mm)
}
tsk->mm = mm;
switch_mm(active_mm, mm, tsk);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 0b521f4b9a92..e6825847fe62 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -30,7 +30,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
-@@ -35,6 +35,7 @@
+@@ -37,6 +37,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/kdebug.h>
@@ -38,7 +38,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#include <asm/pgtable.h>
#include <asm/ldt.h>
-@@ -195,6 +196,35 @@ start_thread(struct pt_regs *regs, unsig
+@@ -196,6 +197,35 @@ start_thread(struct pt_regs *regs, unsig
}
EXPORT_SYMBOL_GPL(start_thread);
@@ -221,26 +221,26 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -26,6 +26,7 @@ struct sched_param {
- #include <linux/nodemask.h>
- #include <linux/mm_types.h>
- #include <linux/preempt.h>
+@@ -26,6 +26,7 @@
+ #include <linux/signal_types.h>
+ #include <linux/mm_types_task.h>
+ #include <linux/task_io_accounting.h>
+#include <asm/kmap_types.h>
- #include <asm/page.h>
- #include <asm/ptrace.h>
-@@ -1985,6 +1986,12 @@ struct task_struct {
- int softirq_nestcnt;
- unsigned int softirqs_raised;
+ /* task_struct member predeclarations (sorted alphabetically): */
+ struct audit_context;
+@@ -1062,6 +1063,12 @@ struct task_struct {
+ int softirq_nestcnt;
+ unsigned int softirqs_raised;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
-+ int kmap_idx;
-+ pte_t kmap_pte[KM_TYPE_NR];
++ int kmap_idx;
++ pte_t kmap_pte[KM_TYPE_NR];
+# endif
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
+ unsigned long task_state_change;
#endif
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
diff --git a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
index 3990c550b7c6..2072310b61fb 100644
--- a/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
+++ b/patches/mm-scatterlist-dont-disable-irqs-on-RT.patch
@@ -2,15 +2,13 @@ From: Thomas Gleixner <tglx@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:34 -0500
Subject: mm/scatterlist: Do not disable irqs on RT
-The local_irq_save() is not only used to get things done "fast" but
-also to ensure that in case of SG_MITER_ATOMIC we are in "atomic"
-context for kmap_atomic(). For -RT it is enough to keep pagefault
-disabled (which is currently handled by kmap_atomic()).
+For -RT it is enough to keep pagefault disabled (which is currently handled by
+kmap_atomic()).
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- lib/scatterlist.c | 6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
+ lib/scatterlist.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -23,21 +21,3 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
kunmap_atomic(miter->addr);
} else
kunmap(miter->page);
-@@ -664,7 +664,7 @@ size_t sg_copy_buffer(struct scatterlist
- if (!sg_miter_skip(&miter, skip))
- return false;
-
-- local_irq_save(flags);
-+ local_irq_save_nort(flags);
-
- while (sg_miter_next(&miter) && offset < buflen) {
- unsigned int len;
-@@ -681,7 +681,7 @@ size_t sg_copy_buffer(struct scatterlist
-
- sg_miter_stop(&miter);
-
-- local_irq_restore(flags);
-+ local_irq_restore_nort(flags);
- return offset;
- }
- EXPORT_SYMBOL(sg_copy_buffer);
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index d43b7bda9481..16c743428f61 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -845,7 +845,7 @@ static void *new_vmap_block(unsigned int
+@@ -855,7 +855,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -888,11 +888,12 @@ static void *new_vmap_block(unsigned int
+@@ -898,11 +898,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -961,6 +962,7 @@ static void *vb_alloc(unsigned long size
+@@ -971,6 +972,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -975,7 +977,8 @@ static void *vb_alloc(unsigned long size
+@@ -985,7 +987,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -998,7 +1001,7 @@ static void *vb_alloc(unsigned long size
+@@ -1008,7 +1011,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index f8a0f682f839..8d6ea6c2e3b0 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -10,10 +10,10 @@ so I catch users of it which will be introduced later.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/swap.h | 4 +++-
- mm/filemap.c | 13 +++++++++----
- mm/truncate.c | 7 +++++--
- mm/workingset.c | 23 ++++++++++++-----------
- 4 files changed, 29 insertions(+), 18 deletions(-)
+ mm/filemap.c | 9 +++++++--
+ mm/truncate.c | 4 +++-
+ mm/workingset.c | 31 ++++++++++++++++---------------
+ 4 files changed, 29 insertions(+), 19 deletions(-)
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -25,129 +25,159 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/page.h>
struct notifier_block;
-@@ -247,7 +248,8 @@ struct swap_info_struct {
+@@ -254,7 +255,8 @@ struct swap_info_struct {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
--extern struct list_lru workingset_shadow_nodes;
-+extern struct list_lru __workingset_shadow_nodes;
-+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+-void workingset_update_node(struct radix_tree_node *node, void *private);
++void __workingset_update_node(struct radix_tree_node *node, void *private);
++DECLARE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
- static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
- {
+ /* linux/mm/page_alloc.c */
+ extern unsigned long totalram_pages;
--- a/mm/filemap.c
+++ b/mm/filemap.c
-@@ -159,9 +159,12 @@ static int page_cache_tree_insert(struct
- * node->private_list is protected by
- * mapping->tree_lock.
- */
-- if (!list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes,
-+ if (!list_empty(&node->private_list)) {
-+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
-+ local_unlock(workingset_shadow_lock);
-+ }
+@@ -110,6 +110,7 @@
+ * ->i_mmap_rwsem
+ * ->tasklist_lock (memory_failure, collect_procs_ao)
+ */
++DECLARE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
+
+ static int page_cache_tree_insert(struct address_space *mapping,
+ struct page *page, void **shadowp)
+@@ -142,8 +143,10 @@ static int page_cache_tree_insert(struct
+ true);
+ }
}
++ local_lock(shadow_nodes_lock);
+ __radix_tree_replace(&mapping->page_tree, node, slot, page,
+- workingset_update_node, mapping);
++ __workingset_update_node, mapping);
++ local_unlock(shadow_nodes_lock);
+ mapping->nrpages++;
return 0;
}
-@@ -217,8 +220,10 @@ static void page_cache_tree_delete(struc
- if (!dax_mapping(mapping) && !workingset_node_pages(node) &&
- list_empty(&node->private_list)) {
- node->private_data = mapping;
-- list_lru_add(&workingset_shadow_nodes,
-- &node->private_list);
-+ local_lock(workingset_shadow_lock);
-+ list_lru_add(&__workingset_shadow_nodes,
-+ &node->private_list);
-+ local_unlock(workingset_shadow_lock);
- }
+@@ -160,6 +163,7 @@ static void page_cache_tree_delete(struc
+ VM_BUG_ON_PAGE(PageTail(page), page);
+ VM_BUG_ON_PAGE(nr != 1 && shadow, page);
+
++ local_lock(shadow_nodes_lock);
+ for (i = 0; i < nr; i++) {
+ struct radix_tree_node *node;
+ void **slot;
+@@ -171,8 +175,9 @@ static void page_cache_tree_delete(struc
+
+ radix_tree_clear_tags(&mapping->page_tree, node, slot);
+ __radix_tree_replace(&mapping->page_tree, node, slot, shadow,
+- workingset_update_node, mapping);
++ __workingset_update_node, mapping);
}
++ local_unlock(shadow_nodes_lock);
+ if (shadow) {
+ mapping->nrexceptional += nr;
--- a/mm/truncate.c
+++ b/mm/truncate.c
-@@ -62,9 +62,12 @@ static void clear_exceptional_entry(stru
- * protected by mapping->tree_lock.
- */
- if (!workingset_node_shadows(node) &&
-- !list_empty(&node->private_list))
-- list_lru_del(&workingset_shadow_nodes,
-+ !list_empty(&node->private_list)) {
-+ local_lock(workingset_shadow_lock);
-+ list_lru_del(&__workingset_shadow_nodes,
- &node->private_list);
-+ local_unlock(workingset_shadow_lock);
-+ }
- __radix_tree_delete_node(&mapping->page_tree, node);
+@@ -41,8 +41,10 @@ static void clear_shadow_entry(struct ad
+ goto unlock;
+ if (*slot != entry)
+ goto unlock;
++ local_lock(shadow_nodes_lock);
+ __radix_tree_replace(&mapping->page_tree, node, slot, NULL,
+- workingset_update_node, mapping);
++ __workingset_update_node, mapping);
++ local_unlock(shadow_nodes_lock);
+ mapping->nrexceptional--;
unlock:
spin_unlock_irq(&mapping->tree_lock);
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -334,7 +334,8 @@ void workingset_activation(struct page *
+@@ -339,9 +339,10 @@ void workingset_activation(struct page *
* point where they would still be useful.
*/
--struct list_lru workingset_shadow_nodes;
-+struct list_lru __workingset_shadow_nodes;
-+DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
+-static struct list_lru shadow_nodes;
++static struct list_lru __shadow_nodes;
++DEFINE_LOCAL_IRQ_LOCK(shadow_nodes_lock);
- static unsigned long count_shadow_nodes(struct shrinker *shrinker,
- struct shrink_control *sc)
-@@ -344,9 +345,9 @@ static unsigned long count_shadow_nodes(
- unsigned long pages;
+-void workingset_update_node(struct radix_tree_node *node, void *private)
++void __workingset_update_node(struct radix_tree_node *node, void *private)
+ {
+ struct address_space *mapping = private;
+
+@@ -359,10 +360,10 @@ void workingset_update_node(struct radix
+ */
+ if (node->count && node->count == node->exceptional) {
+ if (list_empty(&node->private_list))
+- list_lru_add(&shadow_nodes, &node->private_list);
++ list_lru_add(&__shadow_nodes, &node->private_list);
+ } else {
+ if (!list_empty(&node->private_list))
+- list_lru_del(&shadow_nodes, &node->private_list);
++ list_lru_del(&__shadow_nodes, &node->private_list);
+ }
+ }
+
+@@ -374,9 +375,9 @@ static unsigned long count_shadow_nodes(
+ unsigned long cache;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- local_irq_disable();
-- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc);
+- nodes = list_lru_shrink_count(&shadow_nodes, sc);
- local_irq_enable();
-+ local_lock_irq(workingset_shadow_lock);
-+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc);
-+ local_unlock_irq(workingset_shadow_lock);
++ local_lock_irq(shadow_nodes_lock);
++ nodes = list_lru_shrink_count(&__shadow_nodes, sc);
++ local_unlock_irq(shadow_nodes_lock);
+
+ /*
+ * Approximate a reasonable limit for the radix tree nodes
+@@ -478,15 +479,15 @@ static enum lru_status shadow_lru_isolat
+ mem_cgroup_inc_page_stat(virt_to_page(node),
+ MEMCG_WORKINGSET_NODERECLAIM);
+ __radix_tree_delete_node(&mapping->page_tree, node,
+- workingset_update_node, mapping);
++ __workingset_update_node, mapping);
- if (sc->memcg) {
- pages = mem_cgroup_node_nr_lru_pages(sc->memcg, sc->nid,
-@@ -438,9 +439,9 @@ static enum lru_status shadow_lru_isolat
+ out_invalid:
spin_unlock(&mapping->tree_lock);
ret = LRU_REMOVED_RETRY;
out:
- local_irq_enable();
-+ local_unlock_irq(workingset_shadow_lock);
++ local_unlock_irq(shadow_nodes_lock);
cond_resched();
- local_irq_disable();
-+ local_lock_irq(workingset_shadow_lock);
++ local_lock_irq(shadow_nodes_lock);
spin_lock(lru_lock);
return ret;
}
-@@ -451,10 +452,10 @@ static unsigned long scan_shadow_nodes(s
+@@ -497,9 +498,9 @@ static unsigned long scan_shadow_nodes(s
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
- local_irq_disable();
-- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc,
-+ local_lock_irq(workingset_shadow_lock);
-+ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc,
- shadow_lru_isolate, NULL);
+- ret = list_lru_shrink_walk(&shadow_nodes, sc, shadow_lru_isolate, NULL);
- local_irq_enable();
-+ local_unlock_irq(workingset_shadow_lock);
++ local_lock_irq(shadow_nodes_lock);
++ ret = list_lru_shrink_walk(&__shadow_nodes, sc, shadow_lru_isolate, NULL);
++ local_unlock_irq(shadow_nodes_lock);
return ret;
}
-@@ -492,7 +493,7 @@ static int __init workingset_init(void)
+@@ -537,7 +538,7 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
-- ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
-+ ret = __list_lru_init(&__workingset_shadow_nodes, true, &shadow_nodes_key);
+- ret = __list_lru_init(&shadow_nodes, true, &shadow_nodes_key);
++ ret = __list_lru_init(&__shadow_nodes, true, &shadow_nodes_key);
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -500,7 +501,7 @@ static int __init workingset_init(void)
+@@ -545,7 +546,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
-- list_lru_destroy(&workingset_shadow_nodes);
-+ list_lru_destroy(&__workingset_shadow_nodes);
+- list_lru_destroy(&shadow_nodes);
++ list_lru_destroy(&__shadow_nodes);
err:
return ret;
}
diff --git a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 9b329559fc87..8c42f6781770 100644
--- a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
-@@ -327,7 +341,7 @@ static void SetZsPageMovable(struct zs_p
+@@ -323,7 +337,7 @@ static void SetZsPageMovable(struct zs_p
static int create_cache(struct zs_pool *pool)
{
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
0, 0, NULL);
if (!pool->handle_cachep)
return 1;
-@@ -351,10 +365,27 @@ static void destroy_cache(struct zs_pool
+@@ -347,10 +361,27 @@ static void destroy_cache(struct zs_pool
static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
kmem_cache_free(pool->handle_cachep, (void *)handle);
-@@ -373,12 +404,18 @@ static void cache_free_zspage(struct zs_
+@@ -369,12 +400,18 @@ static void cache_free_zspage(struct zs_
static void record_obj(unsigned long handle, unsigned long obj)
{
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* zpool driver */
-@@ -467,6 +504,7 @@ MODULE_ALIAS("zpool-zsmalloc");
+@@ -463,6 +500,7 @@ MODULE_ALIAS("zpool-zsmalloc");
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool is_zspage_isolated(struct zspage *zspage)
{
-@@ -902,7 +940,13 @@ static unsigned long location_to_obj(str
+@@ -898,7 +936,13 @@ static unsigned long location_to_obj(str
static unsigned long handle_to_obj(unsigned long handle)
{
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned long obj_to_head(struct page *page, void *obj)
-@@ -916,22 +960,46 @@ static unsigned long obj_to_head(struct
+@@ -912,22 +956,46 @@ static unsigned long obj_to_head(struct
static inline int testpin_tag(unsigned long handle)
{
@@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void reset_page(struct page *page)
-@@ -1423,7 +1491,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -1376,7 +1444,7 @@ void *zs_map_object(struct zs_pool *pool
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK;
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1477,7 +1545,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1430,7 +1498,7 @@ void zs_unmap_object(struct zs_pool *poo
__zs_unmap_object(area, pages, off, class->size);
}
diff --git a/patches/mmci-remove-bogus-irq-save.patch b/patches/mmci-remove-bogus-irq-save.patch
index 058493ddb583..6ee92924ebe8 100644
--- a/patches/mmci-remove-bogus-irq-save.patch
+++ b/patches/mmci-remove-bogus-irq-save.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
-@@ -1147,15 +1147,12 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1204,15 +1204,12 @@ static irqreturn_t mmci_pio_irq(int irq,
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
void __iomem *base = host->base;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
unsigned int remain, len;
char *buffer;
-@@ -1195,8 +1192,6 @@ static irqreturn_t mmci_pio_irq(int irq,
+@@ -1252,8 +1249,6 @@ static irqreturn_t mmci_pio_irq(int irq,
sg_miter_stop(sg_miter);
diff --git a/patches/mutex-no-spin-on-rt.patch b/patches/mutex-no-spin-on-rt.patch
index e5308f6dd3df..6f7ca0e2e89e 100644
--- a/patches/mutex-no-spin-on-rt.patch
+++ b/patches/mutex-no-spin-on-rt.patch
@@ -16,8 +16,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config MUTEX_SPIN_ON_OWNER
def_bool y
-- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
-+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
+- depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW
++ depends on SMP && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
config RWSEM_SPIN_ON_OWNER
def_bool y
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 0e0cdc3d0646..3a1979885423 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -396,7 +396,19 @@ typedef enum rx_handler_result rx_handle
+@@ -409,7 +409,19 @@ typedef enum rx_handler_result rx_handle
typedef rx_handler_result_t rx_handler_func_t(struct sk_buff **pskb);
void __napi_schedule(struct napi_struct *n);
@@ -50,19 +50,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -4938,6 +4938,7 @@ void __napi_schedule(struct napi_struct
+@@ -4961,6 +4961,7 @@ bool napi_schedule_prep(struct napi_stru
}
- EXPORT_SYMBOL(__napi_schedule);
+ EXPORT_SYMBOL(napi_schedule_prep);
+#ifndef CONFIG_PREEMPT_RT_FULL
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -4949,6 +4950,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -4972,6 +4973,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
+#endif
- void __napi_complete(struct napi_struct *n)
+ bool napi_complete_done(struct napi_struct *n, int work_done)
{
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index fd46e559bcd9..2ebf380ccc0d 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
-@@ -33,11 +34,11 @@ int gnet_stats_start_copy_compat(struct
+@@ -35,11 +36,11 @@ int gnet_stats_start_copy_compat(struct
spinlock_t *lock, struct gnet_dump *d,
int padattr);
@@ -62,23 +62,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
-@@ -55,14 +56,14 @@ int gen_new_estimator(struct gnet_stats_
+@@ -56,13 +57,13 @@ int gen_new_estimator(struct gnet_stats_
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
- seqcount_t *running, struct nlattr *opt);
+ net_seqlock_t *running, struct nlattr *opt);
- void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
- struct gnet_stats_rate_est64 *rate_est);
+ void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **ptr,
spinlock_t *stats_lock,
- seqcount_t *running, struct nlattr *opt);
+ net_seqlock_t *running, struct nlattr *opt);
- bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
- const struct gnet_stats_rate_est64 *rate_est);
- #endif
+ bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
+ bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
+ struct gnet_stats_rate_est64 *sample);
--- /dev/null
+++ b/include/net/net_seq_lock.h
@@ -0,0 +1,15 @@
@@ -168,33 +167,33 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/gen_estimator.c
+++ b/net/core/gen_estimator.c
-@@ -84,7 +84,7 @@ struct gen_estimator
+@@ -46,7 +46,7 @@
+ struct net_rate_estimator {
struct gnet_stats_basic_packed *bstats;
- struct gnet_stats_rate_est64 *rate_est;
spinlock_t *stats_lock;
- seqcount_t *running;
+ net_seqlock_t *running;
- int ewma_log;
- u32 last_packets;
- unsigned long avpps;
-@@ -213,7 +213,7 @@ int gen_new_estimator(struct gnet_stats_
+ struct gnet_stats_basic_cpu __percpu *cpu_bstats;
+ u8 ewma_log;
+ u8 intvl_log; /* period : (250ms << intvl_log) */
+@@ -128,7 +128,7 @@ int gen_new_estimator(struct gnet_stats_
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
- seqcount_t *running,
+ net_seqlock_t *running,
struct nlattr *opt)
{
- struct gen_estimator *est;
-@@ -309,7 +309,7 @@ int gen_replace_estimator(struct gnet_st
+ struct gnet_estimator *parm = nla_data(opt);
+@@ -217,7 +217,7 @@ int gen_replace_estimator(struct gnet_st
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
- struct gnet_stats_rate_est64 *rate_est,
+ struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
- seqcount_t *running, struct nlattr *opt)
+ net_seqlock_t *running, struct nlattr *opt)
{
- gen_kill_estimator(bstats, rate_est);
- return gen_new_estimator(bstats, cpu_bstats, rate_est, stats_lock, running, opt);
+ return gen_new_estimator(bstats, cpu_bstats, rate_est,
+ stats_lock, running, opt);
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -130,7 +130,7 @@ static void
@@ -230,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *b)
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
-@@ -981,7 +981,7 @@ static struct Qdisc *qdisc_create(struct
+@@ -980,7 +980,7 @@ static struct Qdisc *qdisc_create(struct
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
diff --git a/patches/net-add-a-lock-around-icmp_sk.patch b/patches/net-add-a-lock-around-icmp_sk.patch
index 5734d88ef3da..523f6fbe4352 100644
--- a/patches/net-add-a-lock-around-icmp_sk.patch
+++ b/patches/net-add-a-lock-around-icmp_sk.patch
@@ -9,8 +9,8 @@ here a local lock. No crash has been observed, this is just precaution.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- net/ipv4/icmp.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
+ net/ipv4/icmp.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -31,42 +31,35 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct sock *icmp_sk(struct net *net)
{
return *this_cpu_ptr(net->ipv4.icmp_sk);
-@@ -215,12 +218,14 @@ static inline struct sock *icmp_xmit_loc
+@@ -417,6 +420,7 @@ static void icmp_reply(struct icmp_bxm *
+ /* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
-
+ local_lock(icmp_sk_lock);
- sk = icmp_sk(net);
- if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
- /* This can happen if the output path signals a
- * dst_link_failure() for an outgoing ICMP packet.
- */
-+ local_unlock(icmp_sk_lock);
- local_bh_enable();
- return NULL;
- }
-@@ -230,6 +235,7 @@ static inline struct sock *icmp_xmit_loc
- static inline void icmp_xmit_unlock(struct sock *sk)
- {
- spin_unlock_bh(&sk->sk_lock.slock);
+ /* global icmp_msgs_per_sec */
+ if (!icmpv4_global_allow(net, type, code))
+@@ -461,6 +465,7 @@ static void icmp_reply(struct icmp_bxm *
+ out_unlock:
+ icmp_xmit_unlock(sk);
+ out_bh_enable:
+ local_unlock(icmp_sk_lock);
+ local_bh_enable();
}
- int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
-@@ -358,6 +364,7 @@ static void icmp_push_reply(struct icmp_
- struct sock *sk;
- struct sk_buff *skb;
+@@ -673,6 +678,7 @@ void icmp_send(struct sk_buff *skb_in, i
+ /* Needed by both icmp_global_allow and icmp_xmit_lock */
+ local_bh_disable();
+ local_lock(icmp_sk_lock);
- sk = icmp_sk(dev_net((*rt)->dst.dev));
- if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
- icmp_param->data_len+icmp_param->head_len,
-@@ -380,6 +387,7 @@ static void icmp_push_reply(struct icmp_
- skb->ip_summed = CHECKSUM_NONE;
- ip_push_pending_frames(sk, fl4);
- }
+
+ /* Check global sysctl_icmp_msgs_per_sec ratelimit */
+ if (!icmpv4_global_allow(net, type, code))
+@@ -757,6 +763,7 @@ void icmp_send(struct sk_buff *skb_in, i
+ out_unlock:
+ icmp_xmit_unlock(sk);
+ out_bh_enable:
+ local_unlock(icmp_sk_lock);
+ local_bh_enable();
+ out:;
}
-
- /*
diff --git a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 75e5d32538e2..4d66b9506f4c 100644
--- a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -37,8 +37,8 @@ This is brings back the old locks.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- net/ipv4/tcp_ipv4.c | 7 +++++++
- 1 file changed, 7 insertions(+)
+ net/ipv4/tcp_ipv4.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <net/net_namespace.h>
#include <net/icmp.h>
-@@ -568,6 +569,7 @@ void tcp_v4_send_check(struct sock *sk,
+@@ -583,6 +584,7 @@ void tcp_v4_send_check(struct sock *sk,
}
EXPORT_SYMBOL(tcp_v4_send_check);
@@ -58,16 +58,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This routine will send an RST to the other tcp.
*
-@@ -695,6 +697,8 @@ static void tcp_v4_send_reset(const stru
- offsetof(struct inet_timewait_sock, tw_bound_dev_if));
+@@ -711,6 +713,7 @@ static void tcp_v4_send_reset(const stru
arg.tos = ip_hdr(skb)->tos;
-+
+ arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
+ local_lock(tcp_sk_lock);
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -704,6 +708,7 @@ static void tcp_v4_send_reset(const stru
+@@ -720,6 +723,7 @@ static void tcp_v4_send_reset(const stru
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
local_bh_enable();
@@ -75,15 +74,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_TCP_MD5SIG
out:
-@@ -779,6 +784,7 @@ static void tcp_v4_send_ack(struct net *
- if (oif)
+@@ -797,6 +801,7 @@ static void tcp_v4_send_ack(const struct
arg.bound_dev_if = oif;
arg.tos = tos;
+ arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
+ local_lock(tcp_sk_lock);
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -787,6 +793,7 @@ static void tcp_v4_send_ack(struct net *
+@@ -805,6 +810,7 @@ static void tcp_v4_send_ack(const struct
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
local_bh_enable();
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index a8609207de3d..13693942050a 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -360,6 +361,7 @@ struct napi_alloc_cache {
+@@ -359,6 +360,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -27,20 +27,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -367,10 +369,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
- local_irq_save(flags);
+ local_lock_irqsave(netdev_alloc_lock, flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, fragsz, gfp_mask);
+ data = page_frag_alloc(nc, fragsz, gfp_mask);
- local_irq_restore(flags);
+ local_unlock_irqrestore(netdev_alloc_lock, flags);
return data;
}
-@@ -438,13 +440,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ local_lock_irqsave(netdev_alloc_lock, flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, len, gfp_mask);
+ data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
- local_irq_restore(flags);
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 33dfee0edfc4..2084dbdb06ff 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8046,7 +8046,7 @@ static int dev_cpu_callback(struct notif
+@@ -8093,7 +8093,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index bf948beeb63e..d5cf0e7c1ce0 100644
--- a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -362,6 +362,7 @@ struct napi_alloc_cache {
+@@ -361,6 +361,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -391,9 +392,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -33,15 +33,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct napi_alloc_cache *nc;
+ void *data;
-- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+- return page_frag_alloc(&nc->page, fragsz, gfp_mask);
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
-+ data = __alloc_page_frag(&nc->page, fragsz, gfp_mask);
++ data = page_frag_alloc(&nc->page, fragsz, gfp_mask);
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
+ return data;
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -487,9 +492,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
@@ -53,18 +53,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -507,7 +513,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
- data = __alloc_page_frag(&nc->page, len, gfp_mask);
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
+ pfmemalloc = nc->page.pfmemalloc;
+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache);
if (unlikely(!data))
return NULL;
-@@ -518,7 +527,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
}
/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb->pfmemalloc = 1;
skb->head_frag = 1;
-@@ -762,23 +771,26 @@ EXPORT_SYMBOL(consume_skb);
+@@ -761,23 +770,26 @@ EXPORT_SYMBOL(consume_skb);
void __kfree_skb_flush(void)
{
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-@@ -793,6 +805,7 @@ static inline void _kfree_skb_defer(stru
+@@ -792,6 +804,7 @@ static inline void _kfree_skb_defer(stru
nc->skb_cache);
nc->skb_count = 0;
}
diff --git a/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
new file mode 100644
index 000000000000..553ab5e14cbf
--- /dev/null
+++ b/patches/net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
@@ -0,0 +1,27 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Mon, 22 May 2017 21:08:08 +0200
+Subject: net/core: remove explicit do_softirq() from busy_poll_stop()
+
+Since commit 217f69743681 ("net: busy-poll: allow preemption in
+sk_busy_loop()") there is an explicit do_softirq() invocation after
+local_bh_enable() has been invoked.
+I don't understand why we need this because local_bh_enable() will
+invoke do_softirq() once the softirq counter reached zero and we have
+softirq-related work pending.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/core/dev.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -5059,8 +5059,6 @@ static void busy_poll_stop(struct napi_s
+ if (rc == BUSY_POLL_BUDGET)
+ __napi_schedule(napi);
+ local_bh_enable();
+- if (local_softirq_pending())
+- do_softirq();
+ }
+
+ bool sk_busy_loop(struct sock *sk, int nonblock)
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index ff17e1ab50ba..46e8b749e597 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3102,7 +3102,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3077,7 +3077,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index e66f42d473a2..ca650f4b9b03 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -16,15 +16,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
-@@ -4,6 +4,7 @@
-
+@@ -5,6 +5,7 @@
#include <linux/netdevice.h>
#include <linux/static_key.h>
+ #include <linux/netfilter.h>
+#include <linux/locallock.h>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
-@@ -300,6 +301,8 @@ void xt_free_table_info(struct xt_table_
+@@ -337,6 +338,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -320,6 +323,9 @@ static inline unsigned int xt_write_recs
+@@ -357,6 +360,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -350,6 +356,7 @@ static inline void xt_write_recseq_end(u
+@@ -387,6 +393,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index c731a8f7a695..bf05acbd9e98 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -190,6 +190,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -189,6 +189,7 @@ static unsigned int napi_gen_id = NR_CPU
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -888,7 +889,8 @@ int netdev_get_name(struct net *net, cha
+@@ -889,7 +890,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1157,20 +1159,17 @@ int dev_change_name(struct net_device *d
+@@ -1158,20 +1160,17 @@ int dev_change_name(struct net_device *d
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1183,11 +1182,12 @@ int dev_change_name(struct net_device *d
+@@ -1184,11 +1183,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1208,7 +1208,8 @@ int dev_change_name(struct net_device *d
+@@ -1209,7 +1209,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1221,6 +1222,11 @@ int dev_change_name(struct net_device *d
+@@ -1222,6 +1223,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 6c6125c6d2af..7b8f4e800c50 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2463,14 +2463,53 @@ void netdev_freemem(struct net_device *d
+@@ -2427,14 +2427,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -80,19 +80,19 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1988,6 +1988,9 @@ struct task_struct {
+@@ -1065,6 +1065,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
+ unsigned long task_state_change;
#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ int xmit_recursion;
++ int xmit_recursion;
+#endif
- int pagefault_disabled;
+ int pagefault_disabled;
#ifdef CONFIG_MMU
- struct task_struct *oom_reaper_list;
+ struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3165,8 +3165,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3140,8 +3140,10 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* dev_loopback_xmit - loop back @skb
-@@ -3400,8 +3402,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3381,8 +3383,7 @@ static int __dev_queue_xmit(struct sk_bu
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3411,9 +3412,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3392,9 +3393,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
--- a/net/core/filter.c
+++ b/net/core/filter.c
-@@ -1645,7 +1645,7 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1652,7 +1652,7 @@ static inline int __bpf_tx_skb(struct ne
{
int ret;
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
-@@ -1653,9 +1653,9 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1660,9 +1660,9 @@ static inline int __bpf_tx_skb(struct ne
skb->dev = dev;
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 87ae1b25f18f..562d7f1052a7 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2499,12 +2499,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2541,12 +2541,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 3ae0f474db36..e2d3b322dad6 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -496,6 +496,14 @@ extern void thread_do_softirq(void);
+@@ -508,6 +508,14 @@ extern void thread_do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void raise_softirq(unsigned int nr);
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -685,6 +685,27 @@ void __raise_softirq_irqoff(unsigned int
+@@ -686,6 +686,27 @@ void __raise_softirq_irqoff(unsigned int
}
/*
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5279,7 +5279,7 @@ static __latent_entropy void net_rx_acti
+@@ -5367,7 +5367,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
@@ -75,4 +75,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
- }
+ out:
diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch
index a5ff10f48b1a..9c583c082b0a 100644
--- a/patches/net-use-cpu-chill.patch
+++ b/patches/net-use-cpu-chill.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -694,7 +695,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -702,7 +703,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -956,7 +957,7 @@ static void prb_retire_current_block(str
+@@ -964,7 +965,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
diff --git a/patches/net-wireless-warn-nort.patch b/patches/net-wireless-warn-nort.patch
index ad48bc535240..603bf54f6904 100644
--- a/patches/net-wireless-warn-nort.patch
+++ b/patches/net-wireless-warn-nort.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
-@@ -4230,7 +4230,7 @@ void ieee80211_rx_napi(struct ieee80211_
+@@ -4224,7 +4224,7 @@ void ieee80211_rx_napi(struct ieee80211_
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
diff --git a/patches/net_disable_NET_RX_BUSY_POLL.patch b/patches/net_disable_NET_RX_BUSY_POLL.patch
new file mode 100644
index 000000000000..9a030b337efe
--- /dev/null
+++ b/patches/net_disable_NET_RX_BUSY_POLL.patch
@@ -0,0 +1,28 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Sat, 27 May 2017 19:02:06 +0200
+Subject: net/core: disable NET_RX_BUSY_POLL
+
+sk_busy_loop() does preempt_disable() followed by a few operations which can
+take sleeping locks and may get long.
+I _think_ that we could use preempt_disable_nort() (in sk_busy_loop()) instead
+but after a successfull cmpxchg(&napi->state, …) we would gain the ressource
+and could be scheduled out. At this point nobody knows who (which context) owns
+it and so it could take a while until the state is realeased and napi_poll()
+could be invoked again.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ net/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/Kconfig
++++ b/net/Kconfig
+@@ -277,7 +277,7 @@ config CGROUP_NET_CLASSID
+
+ config NET_RX_BUSY_POLL
+ bool
+- default y
++ default y if !PREEMPT_RT_FULL
+
+ config BQL
+ bool
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 3eda2adab467..356cf401d281 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -148,6 +148,13 @@ static void exit_to_usermode_loop(struct
+@@ -149,6 +149,13 @@ static void exit_to_usermode_loop(struct
if (cached_flags & _TIF_NEED_RESCHED)
schedule();
@@ -76,20 +76,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1700,6 +1700,10 @@ struct task_struct {
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
- struct sigpending pending;
+@@ -760,6 +760,10 @@ struct task_struct {
+ /* Restored if set_restore_sigmask() was used: */
+ sigset_t saved_sigmask;
+ struct sigpending pending;
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /* TODO: move me into ->restart_block ? */
-+ struct siginfo forced_info;
++ struct siginfo forced_info;
+#endif
-
- unsigned long sas_ss_sp;
- size_t sas_ss_size;
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ unsigned int sas_ss_flags;
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1216,8 +1216,8 @@ int do_send_sig_info(int sig, struct sig
+@@ -1227,8 +1227,8 @@ int do_send_sig_info(int sig, struct sig
* We don't want to have recursive SIGSEGV's etc, for example,
* that is why we also clear SIGNAL_UNKILLABLE.
*/
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long int flags;
int ret, blocked, ignored;
-@@ -1242,6 +1242,39 @@ force_sig_info(int sig, struct siginfo *
+@@ -1253,6 +1253,39 @@ force_sig_info(int sig, struct siginfo *
return ret;
}
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index c21c7d0160ab..71e256b060f0 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -482,9 +482,11 @@ static u64 oops_id;
+@@ -481,9 +481,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index d870ca29a7bb..cd3ec8bb3f7a 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -343,11 +343,7 @@ static inline int rcu_preempt_depth(void
+@@ -303,11 +303,7 @@ static inline int rcu_preempt_depth(void
/* Internal to kernel */
void rcu_init(void);
void rcu_sched_qs(void);
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_cpu_starting(unsigned int cpu);
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -260,7 +260,14 @@ void rcu_sched_qs(void)
+@@ -262,7 +262,14 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -62,15 +62,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -28,6 +28,7 @@
- #include <linux/gfp.h>
+@@ -29,6 +29,7 @@
#include <linux/oom.h>
+ #include <linux/sched/debug.h>
#include <linux/smpboot.h>
+#include <linux/jiffies.h>
+ #include <uapi/linux/sched/types.h>
#include "../time/tick-internal.h"
- #ifdef CONFIG_RCU_BOOST
-@@ -1244,7 +1245,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1246,7 +1247,7 @@ static void rcu_prepare_kthreads(int cpu
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1261,7 +1262,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1263,7 +1264,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
? 0 : rcu_cpu_has_callbacks(NULL);
}
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1357,6 +1360,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1359,6 +1362,8 @@ static bool __maybe_unused rcu_try_advan
return cbs_ready;
}
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1402,6 +1407,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1404,6 +1409,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
diff --git a/patches/pci-access-use-__wake_up_all_locked.patch b/patches/pci-access-use-__wake_up_all_locked.patch
deleted file mode 100644
index 15fa74557ae0..000000000000
--- a/patches/pci-access-use-__wake_up_all_locked.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Subject: pci: Use __wake_up_all_locked in pci_unblock_user_cfg_access()
-From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 01 Dec 2011 00:07:16 +0100
-
-The waitqueue is protected by the pci_lock, so we can just avoid to
-lock the waitqueue lock itself. That prevents the
-might_sleep()/scheduling while atomic problem on RT
-
-Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
----
- drivers/pci/access.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/pci/access.c
-+++ b/drivers/pci/access.c
-@@ -672,7 +672,7 @@ void pci_cfg_access_unlock(struct pci_de
- WARN_ON(!dev->block_cfg_access);
-
- dev->block_cfg_access = 0;
-- wake_up_all(&pci_cfg_wait);
-+ wake_up_all_locked(&pci_cfg_wait);
- raw_spin_unlock_irqrestore(&pci_lock, flags);
- }
- EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
diff --git a/patches/percpu_ida-use-locklocks.patch b/patches/percpu_ida-use-locklocks.patch
index c5edf437a4d0..9e4a3c38c79c 100644
--- a/patches/percpu_ida-use-locklocks.patch
+++ b/patches/percpu_ida-use-locklocks.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
-@@ -26,6 +26,9 @@
+@@ -27,6 +27,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/percpu_ida.h>
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct percpu_ida_cpu {
/*
-@@ -148,13 +151,13 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -149,13 +152,13 @@ int percpu_ida_alloc(struct percpu_ida *
unsigned long flags;
int tag;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return tag;
}
-@@ -173,6 +176,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -174,6 +177,7 @@ int percpu_ida_alloc(struct percpu_ida *
if (!tags->nr_free)
alloc_global_tags(pool, tags);
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!tags->nr_free)
steal_tags(pool, tags);
-@@ -184,7 +188,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -185,7 +189,7 @@ int percpu_ida_alloc(struct percpu_ida *
}
spin_unlock(&pool->lock);
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (tag >= 0 || state == TASK_RUNNING)
break;
-@@ -196,7 +200,7 @@ int percpu_ida_alloc(struct percpu_ida *
+@@ -197,7 +201,7 @@ int percpu_ida_alloc(struct percpu_ida *
schedule();
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tags = this_cpu_ptr(pool->tag_cpu);
}
if (state != TASK_RUNNING)
-@@ -221,7 +225,7 @@ void percpu_ida_free(struct percpu_ida *
+@@ -222,7 +226,7 @@ void percpu_ida_free(struct percpu_ida *
BUG_ON(tag >= pool->nr_tags);
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
tags = this_cpu_ptr(pool->tag_cpu);
spin_lock(&tags->lock);
-@@ -253,7 +257,7 @@ void percpu_ida_free(struct percpu_ida *
+@@ -254,7 +258,7 @@ void percpu_ida_free(struct percpu_ida *
spin_unlock(&pool->lock);
}
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(percpu_ida_free);
-@@ -345,7 +349,7 @@ int percpu_ida_for_each_free(struct perc
+@@ -346,7 +350,7 @@ int percpu_ida_for_each_free(struct perc
struct percpu_ida_cpu *remote;
unsigned cpu, i, err = 0;
@@ -90,7 +90,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_possible_cpu(cpu) {
remote = per_cpu_ptr(pool->tag_cpu, cpu);
spin_lock(&remote->lock);
-@@ -367,7 +371,7 @@ int percpu_ida_for_each_free(struct perc
+@@ -368,7 +372,7 @@ int percpu_ida_for_each_free(struct perc
}
spin_unlock(&pool->lock);
out:
diff --git a/patches/perf-make-swevent-hrtimer-irqsafe.patch b/patches/perf-make-swevent-hrtimer-irqsafe.patch
index 038bfcd4098a..5633d77d886a 100644
--- a/patches/perf-make-swevent-hrtimer-irqsafe.patch
+++ b/patches/perf-make-swevent-hrtimer-irqsafe.patch
@@ -58,7 +58,7 @@ Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -8363,6 +8363,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8495,6 +8495,7 @@ static void perf_swevent_init_hrtimer(st
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch
index bf2e31c9c219..85b033777069 100644
--- a/patches/peter_zijlstra-frob-rcu.patch
+++ b/patches/peter_zijlstra-frob-rcu.patch
@@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -426,7 +426,7 @@ void rcu_read_unlock_special(struct task
+@@ -428,7 +428,7 @@ void rcu_read_unlock_special(struct task
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/patches/peterz-srcu-crypto-chain.patch b/patches/peterz-srcu-crypto-chain.patch
index b91875c0a3c9..b612cb52ccb3 100644
--- a/patches/peterz-srcu-crypto-chain.patch
+++ b/patches/peterz-srcu-crypto-chain.patch
@@ -171,7 +171,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PROC_FS
void __init crypto_init_proc(void);
-@@ -146,7 +146,7 @@ static inline int crypto_is_moribund(str
+@@ -143,7 +143,7 @@ static inline int crypto_is_moribund(str
static inline void crypto_notify(unsigned long val, void *v)
{
diff --git a/patches/pid.h-include-atomic.h.patch b/patches/pid.h-include-atomic.h.patch
index ffb10e10d4d9..8803ac8cd388 100644
--- a/patches/pid.h-include-atomic.h.patch
+++ b/patches/pid.h-include-atomic.h.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
- #include <linux/rcupdate.h>
+ #include <linux/rculist.h>
+#include <linux/atomic.h>
enum pid_type
diff --git a/patches/pinctrl-qcom-Use-raw-spinlock-variants.patch b/patches/pinctrl-qcom-Use-raw-spinlock-variants.patch
deleted file mode 100644
index be2933bdd23d..000000000000
--- a/patches/pinctrl-qcom-Use-raw-spinlock-variants.patch
+++ /dev/null
@@ -1,252 +0,0 @@
-From: Julia Cartwright <julia@ni.com>
-Date: Fri, 20 Jan 2017 10:13:47 -0600
-Subject: [PATCH] pinctrl: qcom: Use raw spinlock variants
-
-The MSM pinctrl driver currently implements an irq_chip for handling
-GPIO interrupts; due to how irq_chip handling is done, it's necessary
-for the irq_chip methods to be invoked from hardirq context, even on a
-a real-time kernel. Because the spinlock_t type becomes a "sleeping"
-spinlock w/ RT kernels, it is not suitable to be used with irq_chips.
-
-A quick audit of the operations under the lock reveal that they do only
-minimal, bounded work, and are therefore safe to do under a raw
-spinlock.
-
-On real-time kernels, this fixes an OOPs which looks like the following,
-as reported by Brian Wrenn:
-
- kernel BUG at kernel/locking/rtmutex.c:1014!
- Internal error: Oops - BUG: 0 [#1] PREEMPT SMP
- Modules linked in: spidev_irq(O) smsc75xx wcn36xx [last unloaded: spidev]
- CPU: 0 PID: 1163 Comm: irq/144-mmc0 Tainted: G W O 4.4.9-linaro-lt-qcom #1
- PC is at rt_spin_lock_slowlock+0x80/0x2d8
- LR is at rt_spin_lock_slowlock+0x68/0x2d8
- [..]
- Call trace:
- rt_spin_lock_slowlock
- rt_spin_lock
- msm_gpio_irq_ack
- handle_edge_irq
- generic_handle_irq
- msm_gpio_irq_handler
- generic_handle_irq
- __handle_domain_irq
- gic_handle_irq
-
-Cc: stable-rt@vger.kernel.org
-Cc: Bjorn Andersson <bjorn.andersson@linaro.org>
-Reported-by: Brian Wrenn <dcbrianw@gmail.com>
-Tested-by: Brian Wrenn <dcbrianw@gmail.com>
-Signed-off-by: Julia Cartwright <julia@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/pinctrl/qcom/pinctrl-msm.c | 48 ++++++++++++++++++-------------------
- 1 file changed, 24 insertions(+), 24 deletions(-)
-
---- a/drivers/pinctrl/qcom/pinctrl-msm.c
-+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
-@@ -61,7 +61,7 @@ struct msm_pinctrl {
- struct notifier_block restart_nb;
- int irq;
-
-- spinlock_t lock;
-+ raw_spinlock_t lock;
-
- DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
- DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
-@@ -153,14 +153,14 @@ static int msm_pinmux_set_mux(struct pin
- if (WARN_ON(i == g->nfuncs))
- return -EINVAL;
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~mask;
- val |= i << g->mux_bit;
- writel(val, pctrl->regs + g->ctl_reg);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
-@@ -323,14 +323,14 @@ static int msm_config_group_set(struct p
- break;
- case PIN_CONFIG_OUTPUT:
- /* set output value */
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->io_reg);
- if (arg)
- val |= BIT(g->out_bit);
- else
- val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- /* enable output */
- arg = 1;
-@@ -351,12 +351,12 @@ static int msm_config_group_set(struct p
- return -EINVAL;
- }
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~(mask << bit);
- val |= arg << bit;
- writel(val, pctrl->regs + g->ctl_reg);
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- return 0;
-@@ -384,13 +384,13 @@ static int msm_gpio_direction_input(stru
-
- g = &pctrl->soc->groups[offset];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->ctl_reg);
- val &= ~BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
-@@ -404,7 +404,7 @@ static int msm_gpio_direction_output(str
-
- g = &pctrl->soc->groups[offset];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->io_reg);
- if (value)
-@@ -417,7 +417,7 @@ static int msm_gpio_direction_output(str
- val |= BIT(g->oe_bit);
- writel(val, pctrl->regs + g->ctl_reg);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
-@@ -443,7 +443,7 @@ static void msm_gpio_set(struct gpio_chi
-
- g = &pctrl->soc->groups[offset];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->io_reg);
- if (value)
-@@ -452,7 +452,7 @@ static void msm_gpio_set(struct gpio_chi
- val &= ~BIT(g->out_bit);
- writel(val, pctrl->regs + g->io_reg);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- #ifdef CONFIG_DEBUG_FS
-@@ -571,7 +571,7 @@ static void msm_gpio_irq_mask(struct irq
-
- g = &pctrl->soc->groups[d->hwirq];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_cfg_reg);
- val &= ~BIT(g->intr_enable_bit);
-@@ -579,7 +579,7 @@ static void msm_gpio_irq_mask(struct irq
-
- clear_bit(d->hwirq, pctrl->enabled_irqs);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static void msm_gpio_irq_unmask(struct irq_data *d)
-@@ -592,7 +592,7 @@ static void msm_gpio_irq_unmask(struct i
-
- g = &pctrl->soc->groups[d->hwirq];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_cfg_reg);
- val |= BIT(g->intr_enable_bit);
-@@ -600,7 +600,7 @@ static void msm_gpio_irq_unmask(struct i
-
- set_bit(d->hwirq, pctrl->enabled_irqs);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static void msm_gpio_irq_ack(struct irq_data *d)
-@@ -613,7 +613,7 @@ static void msm_gpio_irq_ack(struct irq_
-
- g = &pctrl->soc->groups[d->hwirq];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- val = readl(pctrl->regs + g->intr_status_reg);
- if (g->intr_ack_high)
-@@ -625,7 +625,7 @@ static void msm_gpio_irq_ack(struct irq_
- if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
- }
-
- static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type)
-@@ -638,7 +638,7 @@ static int msm_gpio_irq_set_type(struct
-
- g = &pctrl->soc->groups[d->hwirq];
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- /*
- * For hw without possibility of detecting both edges
-@@ -712,7 +712,7 @@ static int msm_gpio_irq_set_type(struct
- if (test_bit(d->hwirq, pctrl->dual_edge_irqs))
- msm_gpio_update_dual_edge_pos(pctrl, g, d);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
- irq_set_handler_locked(d, handle_level_irq);
-@@ -728,11 +728,11 @@ static int msm_gpio_irq_set_wake(struct
- struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
- unsigned long flags;
-
-- spin_lock_irqsave(&pctrl->lock, flags);
-+ raw_spin_lock_irqsave(&pctrl->lock, flags);
-
- irq_set_irq_wake(pctrl->irq, on);
-
-- spin_unlock_irqrestore(&pctrl->lock, flags);
-+ raw_spin_unlock_irqrestore(&pctrl->lock, flags);
-
- return 0;
- }
-@@ -878,7 +878,7 @@ int msm_pinctrl_probe(struct platform_de
- pctrl->soc = soc_data;
- pctrl->chip = msm_gpio_template;
-
-- spin_lock_init(&pctrl->lock);
-+ raw_spin_lock_init(&pctrl->lock);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pctrl->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index c337982847ea..de3a89ac5491 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -12,37 +12,36 @@ Documentation/sysrq.txt for details.
Signed-off-by: Carsten Emde <C.Emde@osadl.org>
---
- Documentation/sysrq.txt | 11 +++++++++--
- include/net/netns/ipv4.h | 1 +
- net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++
- net/ipv4/sysctl_net_ipv4.c | 7 +++++++
- 4 files changed, 47 insertions(+), 2 deletions(-)
+ Documentation/admin-guide/sysrq.rst | 12 ++++++++++++
+ include/net/netns/ipv4.h | 1 +
+ net/ipv4/icmp.c | 30 ++++++++++++++++++++++++++++++
+ net/ipv4/sysctl_net_ipv4.c | 7 +++++++
+ 4 files changed, 50 insertions(+)
---- a/Documentation/sysrq.txt
-+++ b/Documentation/sysrq.txt
-@@ -59,10 +59,17 @@ On PowerPC - Press 'ALT - Print Screen (
- On other - If you know of the key combos for other architectures, please
- let me know so I can add them to this section.
+--- a/Documentation/admin-guide/sysrq.rst
++++ b/Documentation/admin-guide/sysrq.rst
+@@ -77,6 +77,18 @@ On all
--On all - write a character to /proc/sysrq-trigger. e.g.:
--
-+On all - write a character to /proc/sysrq-trigger, e.g.:
echo t > /proc/sysrq-trigger
-+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
-+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
-+ Send an ICMP echo request with this pattern plus the particular
-+ SysRq command key. Example:
-+ # ping -c1 -s57 -p0102030468
-+ will trigger the SysRq-H (help) command.
++On all
++ Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.::
+
++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
+
- * What are the 'command' keys?
- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- 'b' - Will immediately reboot the system without syncing or unmounting
++ Send an ICMP echo request with this pattern plus the particular
++ SysRq command key. Example::
++
++ ping -c1 -s57 -p0102030468
++
++ will trigger the SysRq-H (help) command.
++
+ What are the 'command' keys?
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
-@@ -69,6 +69,7 @@ struct netns_ipv4 {
+@@ -79,6 +79,7 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
@@ -60,7 +59,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
-@@ -899,6 +900,30 @@ static bool icmp_redirect(struct sk_buff
+@@ -927,6 +928,30 @@ static bool icmp_redirect(struct sk_buff
}
/*
@@ -91,7 +90,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
* Handle ICMP_ECHO ("ping") requests.
*
* RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo
-@@ -925,6 +950,11 @@ static bool icmp_echo(struct sk_buff *sk
+@@ -953,6 +978,11 @@ static bool icmp_echo(struct sk_buff *sk
icmp_param.data_len = skb->len;
icmp_param.head_len = sizeof(struct icmphdr);
icmp_reply(&icmp_param, skb);
@@ -105,7 +104,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
return true;
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -681,6 +681,13 @@ static struct ctl_table ipv4_net_table[]
+@@ -687,6 +687,13 @@ static struct ctl_table ipv4_net_table[]
.proc_handler = proc_dointvec
},
{
diff --git a/patches/posix-timers-no-broadcast.patch b/patches/posix-timers-no-broadcast.patch
index 287b8f083d48..581dcb2885b6 100644
--- a/patches/posix-timers-no-broadcast.patch
+++ b/patches/posix-timers-no-broadcast.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
-@@ -506,6 +506,7 @@ static enum hrtimer_restart posix_timer_
+@@ -507,6 +507,7 @@ static enum hrtimer_restart posix_timer_
static struct pid *good_sigevent(sigevent_t * event)
{
struct task_struct *rtn = current->group_leader;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
(!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
-@@ -514,7 +515,8 @@ static struct pid *good_sigevent(sigeven
+@@ -515,7 +516,8 @@ static struct pid *good_sigevent(sigeven
return NULL;
if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) &&
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index 2207f3b078c4..284179e10cb2 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -14,12 +14,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/init_task.h | 7 +
include/linux/sched.h | 3
kernel/fork.c | 3
- kernel/time/posix-cpu-timers.c | 193 ++++++++++++++++++++++++++++++++++++++++-
- 4 files changed, 202 insertions(+), 4 deletions(-)
+ kernel/time/posix-cpu-timers.c | 157 +++++++++++++++++++++++++++++++++++++++--
+ 4 files changed, 166 insertions(+), 4 deletions(-)
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -150,6 +150,12 @@ extern struct task_group root_task_group
+@@ -167,6 +167,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -32,8 +32,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \
-@@ -251,6 +257,7 @@ extern struct task_group root_task_group
- .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+@@ -269,6 +275,7 @@ extern struct cred init_cred;
+ INIT_CPU_TIMERS(tsk) \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
+ INIT_TIMER_LIST \
@@ -42,19 +42,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1664,6 +1664,9 @@ struct task_struct {
-
- struct task_cputime cputime_expires;
- struct list_head cpu_timers[3];
+@@ -710,6 +710,9 @@ struct task_struct {
+ #ifdef CONFIG_POSIX_TIMERS
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct task_struct *posix_timer_list;
++ struct task_struct *posix_timer_list;
+#endif
+ #endif
- /* process credentials */
- const struct cred __rcu *ptracer_cred; /* Tracer's credentials at attach */
+ /* Process credentials: */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1427,6 +1427,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1451,6 +1451,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -66,15 +66,26 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
tsk->cputime_expires.sched_exp = 0;
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
-@@ -3,6 +3,7 @@
+@@ -2,8 +2,10 @@
+ * Implement CPU time clocks for the POSIX clock interface.
*/
- #include <linux/sched.h>
++#include <uapi/linux/sched/types.h>
+ #include <linux/sched/signal.h>
+ #include <linux/sched/cputime.h>
+#include <linux/sched/rt.h>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
-@@ -620,7 +621,7 @@ static int posix_cpu_timer_set(struct k_
+@@ -12,6 +14,7 @@
+ #include <trace/events/timer.h>
+ #include <linux/tick.h>
+ #include <linux/workqueue.h>
++#include <linux/smpboot.h>
+
+ /*
+ * Called after updating RLIMIT_CPU to run cpu timer and update
+@@ -590,7 +593,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
@@ -83,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1064,7 +1065,7 @@ void posix_cpu_timer_schedule(struct k_i
+@@ -1014,7 +1017,7 @@ void posix_cpu_timer_schedule(struct k_i
/*
* Now re-arm for the new expiry time.
*/
@@ -92,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arm_timer(timer);
unlock_task_sighand(p, &flags);
-@@ -1153,13 +1154,13 @@ static inline int fastpath_timer_check(s
+@@ -1103,13 +1106,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -108,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
-@@ -1213,6 +1214,190 @@ void run_posix_cpu_timers(struct task_st
+@@ -1163,6 +1166,152 @@ void run_posix_cpu_timers(struct task_st
}
}
@@ -117,63 +128,42 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/cpu.h>
+DEFINE_PER_CPU(struct task_struct *, posix_timer_task);
+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist);
++DEFINE_PER_CPU(bool, posix_timer_th_active);
+
-+static int posix_cpu_timers_thread(void *data)
++static void posix_cpu_kthread_fn(unsigned int cpu)
+{
-+ int cpu = (long)data;
++ struct task_struct *tsk = NULL;
++ struct task_struct *next = NULL;
+
-+ BUG_ON(per_cpu(posix_timer_task,cpu) != current);
++ BUG_ON(per_cpu(posix_timer_task, cpu) != current);
+
-+ while (!kthread_should_stop()) {
-+ struct task_struct *tsk = NULL;
-+ struct task_struct *next = NULL;
++ /* grab task list */
++ raw_local_irq_disable();
++ tsk = per_cpu(posix_timer_tasklist, cpu);
++ per_cpu(posix_timer_tasklist, cpu) = NULL;
++ raw_local_irq_enable();
+
-+ if (cpu_is_offline(cpu))
-+ goto wait_to_die;
-+
-+ /* grab task list */
-+ raw_local_irq_disable();
-+ tsk = per_cpu(posix_timer_tasklist, cpu);
-+ per_cpu(posix_timer_tasklist, cpu) = NULL;
-+ raw_local_irq_enable();
-+
-+ /* its possible the list is empty, just return */
-+ if (!tsk) {
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ schedule();
-+ __set_current_state(TASK_RUNNING);
-+ continue;
-+ }
++ /* its possible the list is empty, just return */
++ if (!tsk)
++ return;
+
-+ /* Process task list */
-+ while (1) {
-+ /* save next */
-+ next = tsk->posix_timer_list;
++ /* Process task list */
++ while (1) {
++ /* save next */
++ next = tsk->posix_timer_list;
+
-+ /* run the task timers, clear its ptr and
-+ * unreference it
-+ */
-+ __run_posix_cpu_timers(tsk);
-+ tsk->posix_timer_list = NULL;
-+ put_task_struct(tsk);
++ /* run the task timers, clear its ptr and
++ * unreference it
++ */
++ __run_posix_cpu_timers(tsk);
++ tsk->posix_timer_list = NULL;
++ put_task_struct(tsk);
+
-+ /* check if this is the last on the list */
-+ if (next == tsk)
-+ break;
-+ tsk = next;
-+ }
++ /* check if this is the last on the list */
++ if (next == tsk)
++ break;
++ tsk = next;
+ }
-+ return 0;
-+
-+wait_to_die:
-+ /* Wait for kthread_stop */
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ while (!kthread_should_stop()) {
-+ schedule();
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ }
-+ __set_current_state(TASK_RUNNING);
-+ return 0;
+}
+
+static inline int __fastpath_timer_check(struct task_struct *tsk)
@@ -193,12 +183,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+void run_posix_cpu_timers(struct task_struct *tsk)
+{
-+ unsigned long cpu = smp_processor_id();
++ unsigned int cpu = smp_processor_id();
+ struct task_struct *tasklist;
+
+ BUG_ON(!irqs_disabled());
-+ if(!per_cpu(posix_timer_task, cpu))
++
++ if (per_cpu(posix_timer_th_active, cpu) != true)
+ return;
++
+ /* get per-cpu references */
+ tasklist = per_cpu(posix_timer_tasklist, cpu);
+
@@ -220,72 +212,53 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ }
+}
+
-+/*
-+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added.
-+ * Here we can start up the necessary migration thread for the new CPU.
-+ */
-+static int posix_cpu_thread_call(struct notifier_block *nfb,
-+ unsigned long action, void *hcpu)
++static int posix_cpu_kthread_should_run(unsigned int cpu)
+{
-+ int cpu = (long)hcpu;
-+ struct task_struct *p;
-+ struct sched_param param;
++ return __this_cpu_read(posix_timer_tasklist) != NULL;
++}
+
-+ switch (action) {
-+ case CPU_UP_PREPARE:
-+ p = kthread_create(posix_cpu_timers_thread, hcpu,
-+ "posixcputmr/%d",cpu);
-+ if (IS_ERR(p))
-+ return NOTIFY_BAD;
-+ p->flags |= PF_NOFREEZE;
-+ kthread_bind(p, cpu);
-+ /* Must be high prio to avoid getting starved */
-+ param.sched_priority = MAX_RT_PRIO-1;
-+ sched_setscheduler(p, SCHED_FIFO, &param);
-+ per_cpu(posix_timer_task,cpu) = p;
-+ break;
-+ case CPU_ONLINE:
-+ /* Strictly unneccessary, as first user will wake it. */
-+ wake_up_process(per_cpu(posix_timer_task,cpu));
-+ break;
-+#ifdef CONFIG_HOTPLUG_CPU
-+ case CPU_UP_CANCELED:
-+ /* Unbind it from offline cpu so it can run. Fall thru. */
-+ kthread_bind(per_cpu(posix_timer_task, cpu),
-+ cpumask_any(cpu_online_mask));
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+ case CPU_DEAD:
-+ kthread_stop(per_cpu(posix_timer_task,cpu));
-+ per_cpu(posix_timer_task,cpu) = NULL;
-+ break;
-+#endif
-+ }
-+ return NOTIFY_OK;
++static void posix_cpu_kthread_park(unsigned int cpu)
++{
++ this_cpu_write(posix_timer_th_active, false);
++}
++
++static void posix_cpu_kthread_unpark(unsigned int cpu)
++{
++ this_cpu_write(posix_timer_th_active, true);
+}
+
-+/* Register at highest priority so that task migration (migrate_all_tasks)
-+ * happens before everything else.
-+ */
-+static struct notifier_block posix_cpu_thread_notifier = {
-+ .notifier_call = posix_cpu_thread_call,
-+ .priority = 10
++static void posix_cpu_kthread_setup(unsigned int cpu)
++{
++ struct sched_param sp;
++
++ sp.sched_priority = MAX_RT_PRIO - 1;
++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
++ posix_cpu_kthread_unpark(cpu);
++}
++
++static struct smp_hotplug_thread posix_cpu_thread = {
++ .store = &posix_timer_task,
++ .thread_should_run = posix_cpu_kthread_should_run,
++ .thread_fn = posix_cpu_kthread_fn,
++ .thread_comm = "posixcputmr/%u",
++ .setup = posix_cpu_kthread_setup,
++ .park = posix_cpu_kthread_park,
++ .unpark = posix_cpu_kthread_unpark,
+};
+
+static int __init posix_cpu_thread_init(void)
+{
-+ void *hcpu = (void *)(long)smp_processor_id();
+ /* Start one for boot CPU. */
+ unsigned long cpu;
++ int ret;
+
+ /* init the per-cpu posix_timer_tasklets */
+ for_each_possible_cpu(cpu)
+ per_cpu(posix_timer_tasklist, cpu) = NULL;
+
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu);
-+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu);
-+ register_cpu_notifier(&posix_cpu_thread_notifier);
++ ret = smpboot_register_percpu_thread(&posix_cpu_thread);
++ WARN_ON(ret);
++
+ return 0;
+}
+early_initcall(posix_cpu_thread_init);
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 3141f5b60b52..4eb27c466774 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -322,7 +322,7 @@ menu "Kernel options"
+@@ -333,7 +333,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index 09aba5682169..facf5b101973 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,14 +15,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -135,6 +135,7 @@ config PPC
- select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
- select GENERIC_STRNCPY_FROM_USER
- select GENERIC_STRNLEN_USER
+@@ -155,6 +155,7 @@ config PPC
+ select HAVE_PERF_EVENTS_NMI if PPC64
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_PREEMPT_LAZY
- select HAVE_MOD_ARCH_SPECIFIC
- select MODULES_USE_ELF_RELA
- select CLONE_BACKWARDS
+ select HAVE_RCU_TABLE_FREE if SMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -43,6 +43,8 @@ struct thread_info {
@@ -74,16 +74,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -156,6 +156,7 @@ int main(void)
- DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
- DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
- DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
-+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
- DEFINE(TI_TASK, offsetof(struct thread_info, task));
- DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_LOCAL_FLAGS, thread_info, local_flags);
+ OFFSET(TI_PREEMPT, thread_info, preempt_count);
++ OFFSET(TI_PREEMPT_LAZY, thread_info, preempt_lazy_count);
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_CPU, thread_info, cpu);
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -835,7 +835,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -845,7 +845,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -846,11 +853,11 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -856,11 +863,11 @@ user_exc_return: /* r10 contains MSR_KE
*/
bl trace_hardirqs_off
#endif
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1171,7 +1178,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1183,7 +1190,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1192,7 +1199,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1204,7 +1211,7 @@ do_resched: /* r10 contains MSR_KERNEL
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 131245a37735..fe5c3b0c1541 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -52,23 +52,23 @@ performance.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/preempt.h | 29 ++++++++++++++++-
- include/linux/sched.h | 37 ++++++++++++++++++++++
- include/linux/thread_info.h | 12 ++++++-
+ include/linux/preempt.h | 35 +++++++++++++++++-
+ include/linux/sched.h | 38 +++++++++++++++++++
+ include/linux/thread_info.h | 12 +++++-
include/linux/trace_events.h | 1
kernel/Kconfig.preempt | 6 +++
- kernel/sched/core.c | 72 +++++++++++++++++++++++++++++++++++++++++--
- kernel/sched/fair.c | 16 ++++-----
+ kernel/sched/core.c | 83 +++++++++++++++++++++++++++++++++++++++++--
+ kernel/sched/fair.c | 16 ++++----
kernel/sched/features.h | 3 +
- kernel/sched/sched.h | 9 +++++
- kernel/trace/trace.c | 37 +++++++++++++---------
+ kernel/sched/sched.h | 9 ++++
+ kernel/trace/trace.c | 37 +++++++++++--------
kernel/trace/trace.h | 2 +
- kernel/trace/trace_output.c | 14 +++++++-
- 12 files changed, 209 insertions(+), 29 deletions(-)
+ kernel/trace/trace_output.c | 14 ++++++-
+ 12 files changed, 227 insertions(+), 29 deletions(-)
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -153,6 +153,20 @@ extern void preempt_count_sub(int val);
+@@ -179,6 +179,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -161,6 +175,12 @@ do { \
+@@ -187,6 +201,12 @@ do { \
barrier(); \
} while (0)
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
-@@ -198,6 +218,13 @@ do { \
+@@ -240,6 +260,13 @@ do { \
__preempt_schedule(); \
} while (0)
@@ -116,7 +116,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
-@@ -264,7 +291,7 @@ do { \
+@@ -247,6 +274,12 @@ do { \
+ preempt_count_dec(); \
+ } while (0)
+
++#define preempt_lazy_enable() \
++do { \
++ dec_preempt_lazy_count(); \
++ barrier(); \
++} while (0)
++
+ #define preempt_enable_notrace() \
+ do { \
+ barrier(); \
+@@ -313,7 +346,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -127,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -3348,6 +3348,43 @@ static inline int test_tsk_need_resched(
+@@ -1513,6 +1513,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -168,12 +181,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+#endif
+
- static inline int restart_syscall(void)
++
+ static inline bool __task_is_stopped_or_traced(struct task_struct *task)
{
- set_tsk_thread_flag(current, TIF_SIGPENDING);
+ if (task->state & (__TASK_STOPPED | __TASK_TRACED))
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -107,7 +107,17 @@ static inline int test_ti_thread_flag(st
+@@ -74,7 +74,17 @@ static inline int test_ti_thread_flag(st
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -194,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int arch_within_stack_frames(const void * const stack,
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -58,6 +58,7 @@ struct trace_entry {
+@@ -63,6 +63,7 @@ struct trace_entry {
int pid;
unsigned short migrate_disable;
unsigned short padding;
@@ -219,11 +233,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default PREEMPT_NONE
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -510,6 +510,38 @@ void resched_curr(struct rq *rq)
+@@ -517,6 +517,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
+#ifdef CONFIG_PREEMPT_LAZY
++
++static int tsk_is_polling(struct task_struct *p)
++{
++#ifdef TIF_POLLING_NRFLAG
++ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
++#else
++ return 0;
++#endif
++}
++
+void resched_curr_lazy(struct rq *rq)
+{
+ struct task_struct *curr = rq->curr;
@@ -258,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2531,6 +2563,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2525,6 +2567,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -268,31 +292,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3362,6 +3397,7 @@ void migrate_disable(void)
- }
-
- preempt_disable();
-+ preempt_lazy_disable();
- pin_current_cpu();
- p->migrate_disable = 1;
- preempt_enable();
-@@ -3401,6 +3437,7 @@ void migrate_enable(void)
+@@ -3516,6 +3561,7 @@ static void __sched notrace __schedule(b
- unpin_current_cpu();
- preempt_enable();
-+ preempt_lazy_enable();
- }
- EXPORT_SYMBOL(migrate_enable);
- #endif
-@@ -3530,6 +3567,7 @@ static void __sched notrace __schedule(b
-
- next = pick_next_task(rq, prev, cookie);
+ next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
+ clear_tsk_need_resched_lazy(prev);
clear_preempt_need_resched();
- rq->clock_skip_update = 0;
-@@ -3675,6 +3713,30 @@ static void __sched notrace preempt_sche
+ if (likely(prev != next)) {
+@@ -3667,6 +3713,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -323,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3689,7 +3751,8 @@ asmlinkage __visible void __sched notrac
+@@ -3681,7 +3751,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -333,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3716,6 +3779,9 @@ asmlinkage __visible void __sched notrac
+@@ -3708,6 +3779,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -343,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5523,7 +5589,9 @@ void init_idle(struct task_struct *idle,
+@@ -5537,7 +5611,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -354,9 +362,33 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
+@@ -7512,6 +7588,7 @@ void migrate_disable(void)
+ /* get_online_cpus(); */
+
+ preempt_disable();
++ preempt_lazy_disable();
+ pin_current_cpu();
+ p->migrate_disable = 1;
+
+@@ -7581,6 +7658,7 @@ void migrate_enable(void)
+ arg.dest_cpu = dest_cpu;
+
+ unpin_current_cpu();
++ preempt_lazy_enable();
+ preempt_enable();
+ stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
+ tlb_migrate_finish(p->mm);
+@@ -7591,6 +7669,7 @@ void migrate_enable(void)
+ }
+ unpin_current_cpu();
+ /* put_online_cpus(); */
++ preempt_lazy_enable();
+ preempt_enable();
+ }
+ EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3518,7 +3518,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3742,7 +3742,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -365,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3542,7 +3542,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3766,7 +3766,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -374,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3684,7 +3684,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -3908,7 +3908,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -383,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -3866,7 +3866,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4090,7 +4090,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -392,7 +424,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4494,7 +4494,7 @@ static void hrtick_start_fair(struct rq
+@@ -4718,7 +4718,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -401,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -5905,7 +5905,7 @@ static void check_preempt_wakeup(struct
+@@ -6231,7 +6231,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -410,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8631,7 +8631,7 @@ static void task_fork_fair(struct task_s
+@@ -9006,7 +9006,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -419,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -8655,7 +8655,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9030,7 +9030,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -442,7 +474,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1347,6 +1347,15 @@ extern void init_sched_fair_class(void);
+@@ -1477,6 +1477,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -460,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -1897,6 +1897,7 @@ tracing_generic_entry_update(struct trac
+@@ -1934,6 +1934,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -468,17 +500,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -1907,7 +1908,8 @@ tracing_generic_entry_update(struct trac
+@@ -1944,7 +1945,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
- ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
+ ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) |
+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -2894,15 +2896,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3111,15 +3113,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -505,7 +537,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -2928,11 +2932,14 @@ static void print_func_help_header_irq(s
+@@ -3145,11 +3149,14 @@ static void print_func_help_header_irq(s
print_event_info(buf, m);
seq_puts(m, "# _-----=> irqs-off\n"
"# / _----=> need-resched\n"
@@ -527,7 +559,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -124,6 +124,7 @@ struct kretprobe_trace_entry_head {
+@@ -126,6 +126,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -535,7 +567,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -133,6 +134,7 @@ enum trace_flag_type {
+@@ -135,6 +136,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
@@ -545,7 +577,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_BUF_SIZE 1024
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
-@@ -386,6 +386,7 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -438,6 +438,7 @@ int trace_print_lat_fmt(struct trace_seq
{
char hardsoft_irq;
char need_resched;
@@ -553,7 +585,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
char irqs_off;
int hardirq;
int softirq;
-@@ -416,6 +417,9 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -468,6 +469,9 @@ int trace_print_lat_fmt(struct trace_seq
break;
}
@@ -563,7 +595,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
hardsoft_irq =
(nmi && hardirq) ? 'Z' :
nmi ? 'z' :
-@@ -424,14 +428,20 @@ int trace_print_lat_fmt(struct trace_seq
+@@ -476,14 +480,20 @@ int trace_print_lat_fmt(struct trace_seq
softirq ? 's' :
'.' ;
diff --git a/patches/preempt-nort-rt-variants.patch b/patches/preempt-nort-rt-variants.patch
index f34f4418a221..3eeaeb360737 100644
--- a/patches/preempt-nort-rt-variants.patch
+++ b/patches/preempt-nort-rt-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -154,7 +154,11 @@ do { \
+@@ -180,7 +180,11 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -248,6 +252,18 @@ do { \
+@@ -297,6 +301,18 @@ do { \
set_preempt_need_resched(); \
} while (0)
diff --git a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
index 7158abb2a6a2..9883c1c91caf 100644
--- a/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
+++ b/patches/printk-27force_early_printk-27-boot-param-to-help-with-debugging.patch
@@ -15,7 +15,7 @@ Link: http://lkml.kernel.org/n/tip-ykb97nsfmobq44xketrxs977@git.kernel.org
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -381,6 +381,13 @@ asmlinkage void early_printk(const char
+@@ -431,6 +431,13 @@ asmlinkage void early_printk(const char
*/
static bool __read_mostly printk_killswitch;
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index d5d24ac94162..3d82464f730d 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -9,12 +9,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/printk.h | 2 +
kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++++-------------
- kernel/watchdog.c | 10 ++++++
- 3 files changed, 71 insertions(+), 20 deletions(-)
+ kernel/watchdog_hld.c | 9 +++++
+ 3 files changed, 70 insertions(+), 20 deletions(-)
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -126,9 +126,11 @@ struct va_format {
+@@ -141,9 +141,11 @@ struct va_format {
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -28,9 +28,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK_NMI
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -351,6 +351,58 @@ struct printk_log {
- */
- DEFINE_RAW_SPINLOCK(logbuf_lock);
+@@ -401,6 +401,58 @@ DEFINE_RAW_SPINLOCK(logbuf_lock);
+ printk_safe_exit_irqrestore(flags); \
+ } while (0)
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
@@ -87,9 +87,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1781,6 +1833,13 @@ asmlinkage int vprintk_emit(int facility
- /* cpu currently holding logbuf_lock in this function */
- static unsigned int logbuf_cpu = UINT_MAX;
+@@ -1705,6 +1757,13 @@ asmlinkage int vprintk_emit(int facility
+ int printed_len = 0;
+ bool in_sched = false;
+ /*
+ * Fall back to early_printk if a debugging subsystem has
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -2014,26 +2073,6 @@ DEFINE_PER_CPU(printk_func_t, printk_fun
+@@ -1876,26 +1935,6 @@ static bool suppress_message_printing(in
#endif /* CONFIG_PRINTK */
@@ -128,18 +128,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int __add_preferred_console(char *name, int idx, char *options,
char *brl_options)
{
---- a/kernel/watchdog.c
-+++ b/kernel/watchdog.c
-@@ -315,6 +315,8 @@ static int is_softlockup(unsigned long t
-
- #ifdef CONFIG_HARDLOCKUP_DETECTOR
-
+--- a/kernel/watchdog_hld.c
++++ b/kernel/watchdog_hld.c
+@@ -21,6 +21,7 @@
+ static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
+ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
-+
- static struct perf_event_attr wd_hw_attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
-@@ -348,6 +350,13 @@ static void watchdog_overflow_callback(s
+
+ /* boot commands */
+ /*
+@@ -106,6 +107,13 @@ static void watchdog_overflow_callback(s
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -153,7 +152,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
-@@ -365,6 +374,7 @@ static void watchdog_overflow_callback(s
+@@ -123,6 +131,7 @@ static void watchdog_overflow_callback(s
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index 88f73f7298fb..882f07cd7c2f 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -7,12 +7,12 @@ interrupts while printing to a serial console.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/printk/printk.c | 25 +++++++++++++++++++++++--
- 1 file changed, 23 insertions(+), 2 deletions(-)
+ kernel/printk/printk.c | 19 ++++++++++++++++++-
+ 1 file changed, 18 insertions(+), 1 deletion(-)
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1628,6 +1628,7 @@ static void call_console_drivers(int lev
+@@ -1630,6 +1630,7 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -20,21 +20,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1643,6 +1644,7 @@ static void call_console_drivers(int lev
+@@ -1645,6 +1646,7 @@ static void call_console_drivers(const c
else
con->write(con, text, len);
}
+ migrate_enable();
}
- /*
-@@ -1951,13 +1953,23 @@ asmlinkage int vprintk_emit(int facility
+ int printk_delay_msec __read_mostly;
+@@ -1827,12 +1829,22 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
+ int may_trylock = 1;
+
- lockdep_off();
+#ifdef CONFIG_PREEMPT_RT_FULL
+ /*
+ * we can't take a sleeping lock with IRQs or preeption disabled
@@ -51,41 +50,21 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- if (console_trylock())
+ if (may_trylock && console_trylock())
console_unlock();
- lockdep_on();
}
-@@ -2349,11 +2361,16 @@ static void console_cont_flush(char *tex
- goto out;
- len = cont_print_text(text, size);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(cont.level, NULL, 0, text, len);
-+#else
- raw_spin_unlock(&logbuf_lock);
- stop_critical_timings();
- call_console_drivers(cont.level, NULL, 0, text, len);
- start_critical_timings();
- local_irq_restore(flags);
-+#endif
- return;
- out:
- raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-@@ -2477,13 +2494,17 @@ void console_unlock(void)
- console_idx = log_next(console_idx);
+@@ -2283,10 +2295,15 @@ void console_unlock(void)
console_seq++;
- console_prev = msg->flags;
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ raw_spin_unlock_irqrestore(&logbuf_lock, flags);
-+ call_console_drivers(level, ext_text, ext_len, text, len);
-+#else
raw_spin_unlock(&logbuf_lock);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ printk_safe_exit_irqrestore(flags);
++ call_console_drivers(ext_text, ext_len, text, len);
++#else
stop_critical_timings(); /* don't trace print latency */
- call_console_drivers(level, ext_text, ext_len, text, len);
+ call_console_drivers(ext_text, ext_len, text, len);
start_critical_timings();
- local_irq_restore(flags);
--
+ printk_safe_exit_irqrestore(flags);
+#endif
+
if (do_cond_resched)
cond_resched();
- }
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index dd83349b49d1..c512ed2bfbe3 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -23,26 +23,28 @@ taken in case the caller is interrupted between looking into ->state and
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/sched.h | 48 +++++++++++++++++++++++++++++++++++++++++++++---
+ include/linux/sched.h | 49 +++++++++++++++++++++++++++++++++++++++++++++----
kernel/ptrace.c | 9 ++++++++-
kernel/sched/core.c | 17 +++++++++++++++--
- 3 files changed, 68 insertions(+), 6 deletions(-)
+ 3 files changed, 68 insertions(+), 7 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -242,10 +242,7 @@ extern char ___assert_task_state[1 - 2*!
- TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+@@ -100,12 +100,8 @@ struct task_group;
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
--#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
- #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
--#define task_is_stopped_or_traced(task) \
-- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
- #define task_contributes_to_load(task) \
- ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
- (task->flags & PF_FROZEN) == 0 && \
-@@ -3365,6 +3362,51 @@ static inline int signal_pending_state(l
- return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
+-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+-
+ #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+
+-#define task_is_stopped_or_traced(task) ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+-
+ #define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
+ (task->flags & PF_FROZEN) == 0 && \
+ (task->state & TASK_NOLOAD) == 0)
+@@ -1500,6 +1496,51 @@ static inline int test_tsk_need_resched(
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
@@ -95,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* explicit rescheduling in places that are safe. The return
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
-@@ -166,7 +166,14 @@ static bool ptrace_freeze_traced(struct
+@@ -175,7 +175,14 @@ static bool ptrace_freeze_traced(struct
spin_lock_irq(&task->sighand->siglock);
if (task_is_traced(task) && !__fatal_signal_pending(task)) {
@@ -113,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1384,6 +1384,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1363,6 +1363,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -132,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1428,7 +1440,7 @@ unsigned long wait_task_inactive(struct
+@@ -1407,7 +1419,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -141,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1443,7 +1455,8 @@ unsigned long wait_task_inactive(struct
+@@ -1422,7 +1434,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/radix-tree-use-local-locks.patch b/patches/radix-tree-use-local-locks.patch
index 3a4f57ef3b81..9450534e4302 100644
--- a/patches/radix-tree-use-local-locks.patch
+++ b/patches/radix-tree-use-local-locks.patch
@@ -11,53 +11,68 @@ Cc: stable-rt@vger.kernel.org
Reported-and-debugged-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
+ include/linux/idr.h | 5 +----
include/linux/radix-tree.h | 7 ++-----
- lib/radix-tree.c | 22 +++++++++++++++-------
- 2 files changed, 17 insertions(+), 12 deletions(-)
+ lib/radix-tree.c | 30 ++++++++++++++++++++++--------
+ 3 files changed, 25 insertions(+), 17 deletions(-)
+--- a/include/linux/idr.h
++++ b/include/linux/idr.h
+@@ -111,10 +111,7 @@ static inline bool idr_is_empty(const st
+ * Each idr_preload() should be matched with an invocation of this
+ * function. See idr_preload() for details.
+ */
+-static inline void idr_preload_end(void)
+-{
+- preempt_enable();
+-}
++void idr_preload_end(void);
+
+ /**
+ * idr_find - return pointer for given id
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
-@@ -292,6 +292,8 @@ unsigned int radix_tree_gang_lookup_slot
+@@ -328,6 +328,8 @@ unsigned int radix_tree_gang_lookup_slot
int radix_tree_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload(gfp_t gfp_mask);
int radix_tree_maybe_preload_order(gfp_t gfp_mask, int order);
+void radix_tree_preload_end(void);
+
void radix_tree_init(void);
- void *radix_tree_tag_set(struct radix_tree_root *root,
+ void *radix_tree_tag_set(struct radix_tree_root *,
unsigned long index, unsigned int tag);
-@@ -314,11 +316,6 @@ unsigned long radix_tree_range_tag_if_ta
- int radix_tree_tagged(struct radix_tree_root *root, unsigned int tag);
- unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item);
+@@ -347,11 +349,6 @@ unsigned int radix_tree_gang_lookup_tag_
+ unsigned int max_items, unsigned int tag);
+ int radix_tree_tagged(const struct radix_tree_root *, unsigned int tag);
-static inline void radix_tree_preload_end(void)
-{
- preempt_enable();
-}
-
- /**
- * struct radix_tree_iter - radix tree iterator state
- *
+ int radix_tree_split_preload(unsigned old_order, unsigned new_order, gfp_t);
+ int radix_tree_split(struct radix_tree_root *, unsigned long index,
+ unsigned new_order);
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
-@@ -36,7 +36,7 @@
- #include <linux/bitops.h>
+@@ -37,7 +37,7 @@
#include <linux/rcupdate.h>
- #include <linux/preempt.h> /* in_interrupt() */
+ #include <linux/slab.h>
+ #include <linux/string.h>
-
+#include <linux/locallock.h>
/* Number of nodes in fully populated tree of given height */
static unsigned long height_to_maxnodes[RADIX_TREE_MAX_PATH + 1] __read_mostly;
-@@ -68,6 +68,7 @@ struct radix_tree_preload {
+@@ -86,6 +86,7 @@ struct radix_tree_preload {
struct radix_tree_node *nodes;
};
static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
+static DEFINE_LOCAL_IRQ_LOCK(radix_tree_preloads_lock);
- static inline void *node_to_entry(void *ptr)
+ static inline struct radix_tree_node *entry_to_node(void *ptr)
{
-@@ -290,13 +291,14 @@ radix_tree_node_alloc(struct radix_tree_
+@@ -404,12 +405,13 @@ radix_tree_node_alloc(gfp_t gfp_mask, st
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
*/
@@ -65,15 +80,14 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ rtp = &get_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
if (rtp->nr) {
ret = rtp->nodes;
- rtp->nodes = ret->private_data;
- ret->private_data = NULL;
+ rtp->nodes = ret->parent;
rtp->nr--;
}
+ put_locked_var(radix_tree_preloads_lock, radix_tree_preloads);
/*
* Update the allocation stack trace as this is more useful
* for debugging.
-@@ -357,14 +359,14 @@ static int __radix_tree_preload(gfp_t gf
+@@ -475,14 +477,14 @@ static int __radix_tree_preload(gfp_t gf
*/
gfp_mask &= ~__GFP_ACCOUNT;
@@ -90,8 +104,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ local_lock(radix_tree_preloads_lock);
rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < nr) {
- node->private_data = rtp->nodes;
-@@ -406,7 +408,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
+ node->parent = rtp->nodes;
+@@ -524,7 +526,7 @@ int radix_tree_maybe_preload(gfp_t gfp_m
if (gfpflags_allow_blocking(gfp_mask))
return __radix_tree_preload(gfp_mask, RADIX_TREE_PRELOAD_SIZE);
/* Preloading doesn't help anything with this gfp mask, skip it */
@@ -100,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
EXPORT_SYMBOL(radix_tree_maybe_preload);
-@@ -422,7 +424,7 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -562,7 +564,7 @@ int radix_tree_maybe_preload_order(gfp_t
/* Preloading doesn't help anything with this gfp mask, skip it */
if (!gfpflags_allow_blocking(gfp_mask)) {
@@ -109,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -456,6 +458,12 @@ int radix_tree_maybe_preload_order(gfp_t
+@@ -596,6 +598,12 @@ int radix_tree_maybe_preload_order(gfp_t
return __radix_tree_preload(gfp_mask, nr_nodes);
}
@@ -119,6 +133,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+}
+EXPORT_SYMBOL(radix_tree_preload_end);
+
- /*
- * The maximum index which can be stored in a radix tree
- */
+ static unsigned radix_tree_load_root(const struct radix_tree_root *root,
+ struct radix_tree_node **nodep, unsigned long *maxindex)
+ {
+@@ -2107,6 +2115,12 @@ void idr_preload(gfp_t gfp_mask)
+ }
+ EXPORT_SYMBOL(idr_preload);
+
++void idr_preload_end(void)
++{
++ local_unlock(radix_tree_preloads_lock);
++}
++EXPORT_SYMBOL(idr_preload_end);
++
+ /**
+ * ida_pre_get - reserve resources for ida allocation
+ * @ida: ida handle
+@@ -2123,7 +2137,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
+ * ida_get_new() can return -EAGAIN, prompting the caller
+ * to return to the ida_pre_get() step.
+ */
+- preempt_enable();
++ local_unlock(radix_tree_preloads_lock);
+
+ if (!this_cpu_read(ida_bitmap)) {
+ struct ida_bitmap *bitmap = kmalloc(sizeof(*bitmap), gfp);
diff --git a/patches/random-avoid-preempt_disable-ed-section.patch b/patches/random-avoid-preempt_disable-ed-section.patch
index 0e7343b1c7fc..ceb67726a893 100644
--- a/patches/random-avoid-preempt_disable-ed-section.patch
+++ b/patches/random-avoid-preempt_disable-ed-section.patch
@@ -10,8 +10,8 @@ Work around it with local_locks.
Cc: stable-rt@vger.kernel.org # where it applies to
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/char/random.c | 12 ++++++++----
- 1 file changed, 8 insertions(+), 4 deletions(-)
+ drivers/char/random.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -23,52 +23,49 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <crypto/chacha20.h>
#include <asm/processor.h>
-@@ -2052,6 +2053,7 @@ struct batched_entropy {
+@@ -2022,6 +2023,7 @@ struct batched_entropy {
* goal of being quite fast and not depleting entropy.
*/
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_long);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_long_lock);
- unsigned long get_random_long(void)
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u64_lock);
+ u64 get_random_u64(void)
{
- unsigned long ret;
-@@ -2060,13 +2062,13 @@ unsigned long get_random_long(void)
- if (arch_get_random_long(&ret))
- return ret;
+ u64 ret;
+@@ -2036,18 +2038,19 @@ u64 get_random_u64(void)
+ return ret;
+ #endif
-- batch = &get_cpu_var(batched_entropy_long);
-+ batch = &get_locked_var(batched_entropy_long_lock, batched_entropy_long);
- if (batch->position % ARRAY_SIZE(batch->entropy_long) == 0) {
- extract_crng((u8 *)batch->entropy_long);
+- batch = &get_cpu_var(batched_entropy_u64);
++ batch = &get_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
+ extract_crng((u8 *)batch->entropy_u64);
batch->position = 0;
}
- ret = batch->entropy_long[batch->position++];
-- put_cpu_var(batched_entropy_long);
-+ put_locked_var(batched_entropy_long_lock, batched_entropy_long);
+ ret = batch->entropy_u64[batch->position++];
+- put_cpu_var(batched_entropy_u64);
++ put_locked_var(batched_entropy_u64_lock, batched_entropy_u64);
return ret;
}
- EXPORT_SYMBOL(get_random_long);
-@@ -2078,6 +2080,8 @@ unsigned int get_random_int(void)
- }
- #else
- static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_int);
-+static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_int_lock);
-+
- unsigned int get_random_int(void)
+ EXPORT_SYMBOL(get_random_u64);
+
+ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
++static DEFINE_LOCAL_IRQ_LOCK(batched_entropy_u32_lock);
+ u32 get_random_u32(void)
{
- unsigned int ret;
-@@ -2086,13 +2090,13 @@ unsigned int get_random_int(void)
+ u32 ret;
+@@ -2056,13 +2059,13 @@ u32 get_random_u32(void)
if (arch_get_random_int(&ret))
return ret;
-- batch = &get_cpu_var(batched_entropy_int);
-+ batch = &get_locked_var(batched_entropy_int_lock, batched_entropy_int);
- if (batch->position % ARRAY_SIZE(batch->entropy_int) == 0) {
- extract_crng((u8 *)batch->entropy_int);
+- batch = &get_cpu_var(batched_entropy_u32);
++ batch = &get_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
+ if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
+ extract_crng((u8 *)batch->entropy_u32);
batch->position = 0;
}
- ret = batch->entropy_int[batch->position++];
-- put_cpu_var(batched_entropy_int);
-+ put_locked_var(batched_entropy_int_lock, batched_entropy_int);
+ ret = batch->entropy_u32[batch->position++];
+- put_cpu_var(batched_entropy_u32);
++ put_locked_var(batched_entropy_u32_lock, batched_entropy_u32);
return ret;
}
- #endif
+ EXPORT_SYMBOL(get_random_u32);
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index 8e66a85089c3..8082c554cdf2 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
-@@ -1120,28 +1120,27 @@ static __u32 get_reg(struct fast_pool *f
+@@ -1102,28 +1102,27 @@ static __u32 get_reg(struct fast_pool *f
return *(ptr + f->reg_idx++);
}
@@ -56,17 +56,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
add_interrupt_bench(cycles);
--- a/drivers/hv/vmbus_drv.c
+++ b/drivers/hv/vmbus_drv.c
-@@ -761,6 +761,8 @@ static void vmbus_isr(void)
- void *page_addr;
+@@ -970,6 +970,8 @@ static void vmbus_isr(void)
+ void *page_addr = hv_cpu->synic_event_page;
struct hv_message *msg;
union hv_synic_event_flags *event;
+ struct pt_regs *regs = get_irq_regs();
+ u64 ip = regs ? instruction_pointer(regs) : 0;
bool handled = false;
- page_addr = hv_context.synic_event_page[cpu];
-@@ -808,7 +810,7 @@ static void vmbus_isr(void)
- tasklet_schedule(hv_context.msg_dpc[cpu]);
+ if (unlikely(page_addr == NULL))
+@@ -1013,7 +1015,7 @@ static void vmbus_isr(void)
+ tasklet_schedule(&hv_cpu->msg_dpc);
}
- add_interrupt_randomness(HYPERVISOR_CALLBACK_VECTOR, 0);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1023,6 +1023,12 @@ static int irq_thread(void *data)
+@@ -1025,6 +1025,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rbtree-include-rcu.h-because-we-use-it.patch b/patches/rbtree-include-rcu.h-because-we-use-it.patch
index d1eca93c0437..93595e83829e 100644
--- a/patches/rbtree-include-rcu.h-because-we-use-it.patch
+++ b/patches/rbtree-include-rcu.h-because-we-use-it.patch
@@ -10,7 +10,8 @@ otherwise.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/rbtree_augmented.h | 1 +
- 1 file changed, 1 insertion(+)
+ include/linux/rbtree_latch.h | 1 +
+ 2 files changed, 2 insertions(+)
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -22,3 +23,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Please note - only struct rb_augment_callbacks and the prototypes for
+--- a/include/linux/rbtree_latch.h
++++ b/include/linux/rbtree_latch.h
+@@ -34,6 +34,7 @@
+
+ #include <linux/rbtree.h>
+ #include <linux/seqlock.h>
++#include <linux/rcupdate.h>
+
+ struct latch_tree_node {
+ struct rb_node node[2];
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index d215a243447f..5de6ff8522ad 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -18,12 +18,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/rcu/tree.c | 110 ++++++++++++++++++++++++++++++---
kernel/rcu/tree.h | 5 -
- kernel/rcu/tree_plugin.h | 153 ++++++-----------------------------------------
- 3 files changed, 122 insertions(+), 146 deletions(-)
+ kernel/rcu/tree_plugin.h | 155 ++++++-----------------------------------------
+ 3 files changed, 122 insertions(+), 148 deletions(-)
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -55,6 +55,11 @@
+@@ -57,6 +57,11 @@
#include <linux/random.h>
#include <linux/trace_events.h>
#include <linux/suspend.h>
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -3044,18 +3049,17 @@ static void
+@@ -3143,18 +3148,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -3067,18 +3071,105 @@ static void invoke_rcu_callbacks(struct
+@@ -3166,18 +3170,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4268,7 +4359,6 @@ void __init rcu_init(void)
+@@ -4357,7 +4448,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
@@ -178,9 +178,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -596,12 +596,10 @@ extern struct rcu_state rcu_bh_state;
- extern struct rcu_state rcu_preempt_state;
- #endif /* #ifdef CONFIG_PREEMPT_RCU */
+@@ -599,12 +599,10 @@ extern struct rcu_state rcu_preempt_stat
+
+ int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
-#ifdef CONFIG_RCU_BOOST
DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -621,10 +619,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -624,10 +622,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -205,15 +205,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* #ifdef CONFIG_RCU_BOOST */
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -24,26 +24,10 @@
+@@ -24,28 +24,10 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/oom.h>
+-#include <linux/sched/debug.h>
-#include <linux/smpboot.h>
-#include <linux/jiffies.h>
+-#include <uapi/linux/sched/types.h>
-#include "../time/tick-internal.h"
-
#ifdef CONFIG_RCU_BOOST
@@ -232,7 +234,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#else /* #ifdef CONFIG_RCU_BOOST */
/*
-@@ -56,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
+@@ -58,6 +40,14 @@ DEFINE_PER_CPU(char, rcu_cpu_has_work);
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -247,7 +249,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -633,15 +625,6 @@ static void rcu_preempt_check_callbacks(
+@@ -635,15 +625,6 @@ static void rcu_preempt_check_callbacks(
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -263,7 +265,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Queue a preemptible-RCU callback for invocation after a grace period.
*/
-@@ -830,6 +813,19 @@ void exit_rcu(void)
+@@ -832,6 +813,19 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -283,7 +285,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_BOOST
#include "../locking/rtmutex_common.h"
-@@ -861,16 +857,6 @@ static void rcu_initiate_boost_trace(str
+@@ -863,16 +857,6 @@ static void rcu_initiate_boost_trace(str
#endif /* #else #ifdef CONFIG_RCU_TRACE */
@@ -300,7 +302,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1014,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1016,23 +1000,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -324,7 +326,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1084,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1086,67 +1053,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
@@ -392,7 +394,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1175,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1177,26 +1083,12 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
@@ -419,7 +421,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1217,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1219,11 +1111,6 @@ static void rcu_initiate_boost(struct rc
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index b9f24c803d20..bb3cc11c220d 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -623,7 +623,7 @@ config RCU_FANOUT_LEAF
+@@ -622,7 +622,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 59a7dd8145fe..1f45e1c6094b 100644
--- a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -62,7 +62,7 @@
+@@ -64,7 +64,7 @@
#ifndef CONFIG_TINY_RCU
module_param(rcu_expedited, int, 0);
module_param(rcu_normal, int, 0);
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index 27053c1477b0..9c343b466d1e 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
help
This option needs to be enabled if you wish to make
expert-level adjustments to RCU configuration. By default,
-@@ -650,7 +650,7 @@ config TREE_RCU_TRACE
+@@ -649,7 +649,7 @@ config TREE_RCU_TRACE
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index ad447fd633cf..af6e8e692d42 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -179,6 +179,9 @@ void call_rcu(struct rcu_head *head,
+@@ -178,6 +178,9 @@ void call_rcu(struct rcu_head *head,
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -202,6 +205,7 @@ void call_rcu(struct rcu_head *head,
+@@ -201,6 +204,7 @@ void call_rcu(struct rcu_head *head,
*/
void call_rcu_bh(struct rcu_head *head,
rcu_callback_t func);
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
-@@ -339,7 +343,11 @@ static inline int rcu_preempt_depth(void
+@@ -299,7 +303,11 @@ static inline int rcu_preempt_depth(void
/* Internal to kernel */
void rcu_init(void);
void rcu_sched_qs(void);
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
-@@ -513,7 +521,14 @@ extern struct lockdep_map rcu_callback_m
+@@ -473,7 +481,14 @@ extern struct lockdep_map rcu_callback_m
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
-@@ -911,10 +926,14 @@ static inline void rcu_read_unlock(void)
+@@ -871,10 +886,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -924,10 +943,14 @@ static inline void rcu_read_lock_bh(void
+@@ -884,10 +903,14 @@ static inline void rcu_read_lock_bh(void
*/
static inline void rcu_read_unlock_bh(void)
{
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* RCUtree hotplug events */
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -404,6 +404,7 @@ static struct rcu_torture_ops rcu_ops =
+@@ -414,6 +414,7 @@ static struct rcu_torture_ops rcu_ops =
.name = "rcu"
};
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Definitions for rcu_bh torture testing.
*/
-@@ -443,6 +444,12 @@ static struct rcu_torture_ops rcu_bh_ops
+@@ -453,6 +454,12 @@ static struct rcu_torture_ops rcu_bh_ops
.name = "rcu_bh"
};
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* The names includes "busted", and they really means it!
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -260,6 +260,7 @@ void rcu_sched_qs(void)
+@@ -262,6 +262,7 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_bh_qs(void)
{
if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) {
-@@ -269,6 +270,7 @@ void rcu_bh_qs(void)
+@@ -271,6 +272,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
-@@ -449,11 +451,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+@@ -557,11 +559,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
/*
* Return the number of RCU BH batches started thus far for debug & stats.
*/
@@ -225,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU batches completed thus far for debug & stats.
-@@ -473,6 +477,7 @@ unsigned long rcu_batches_completed_sche
+@@ -581,6 +585,7 @@ unsigned long rcu_batches_completed_sche
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -481,6 +486,7 @@ unsigned long rcu_batches_completed_bh(v
+@@ -589,6 +594,7 @@ unsigned long rcu_batches_completed_bh(v
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
@@ -241,7 +241,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU expedited batches completed thus far for
-@@ -504,6 +510,7 @@ unsigned long rcu_exp_batches_completed_
+@@ -612,6 +618,7 @@ unsigned long rcu_exp_batches_completed_
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state.
*/
-@@ -522,6 +529,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -630,6 +637,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -572,9 +586,11 @@ void rcutorture_get_gp_data(enum rcutort
+@@ -680,9 +694,11 @@ void rcutorture_get_gp_data(enum rcutort
case RCU_FLAVOR:
rsp = rcu_state_p;
break;
@@ -275,7 +275,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -3195,6 +3211,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3289,6 +3305,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -283,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for invocation after a quicker grace period.
*/
-@@ -3203,6 +3220,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3297,6 +3314,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3294,6 +3312,7 @@ void synchronize_sched(void)
+@@ -3388,6 +3406,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3320,6 +3339,7 @@ void synchronize_rcu_bh(void)
+@@ -3414,6 +3433,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -307,7 +307,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3698,6 +3718,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3790,6 +3810,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -315,7 +315,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3706,6 +3727,7 @@ void rcu_barrier_bh(void)
+@@ -3798,6 +3819,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -323,7 +323,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4227,7 +4249,9 @@ void __init rcu_init(void)
+@@ -4316,7 +4338,9 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
@@ -335,7 +335,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_dump_rcu_node_tree(&rcu_sched_state);
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -588,7 +588,9 @@ extern struct list_head rcu_struct_flavo
+@@ -589,7 +589,9 @@ extern struct list_head rcu_struct_flavo
*/
extern struct rcu_state rcu_sched_state;
@@ -347,7 +347,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern struct rcu_state rcu_preempt_state;
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -296,6 +296,7 @@ int rcu_read_lock_held(void)
+@@ -298,6 +298,7 @@ int rcu_read_lock_held(void)
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -355,7 +355,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
*
-@@ -322,6 +323,7 @@ int rcu_read_lock_bh_held(void)
+@@ -324,6 +325,7 @@ int rcu_read_lock_bh_held(void)
return in_softirq() || irqs_disabled();
}
EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
diff --git a/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch b/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
deleted file mode 100644
index 9e08fc5a359a..000000000000
--- a/patches/rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
+++ /dev/null
@@ -1,60 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 2 Nov 2016 16:45:58 +0100
-Subject: [PATCH] rcu: update: make RCU_EXPEDITE_BOOT default
-
-RCU_EXPEDITE_BOOT should speed up the boot process by enforcing
-synchronize_rcu_expedited() instead of synchronize_rcu() during the boot
-process. There should be no reason why one does not want this and there
-is no need worry about real time latency at this point.
-Therefore make it default.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- init/Kconfig | 13 -------------
- kernel/rcu/update.c | 6 ++----
- 2 files changed, 2 insertions(+), 17 deletions(-)
-
---- a/init/Kconfig
-+++ b/init/Kconfig
-@@ -781,19 +781,6 @@ config RCU_NOCB_CPU_ALL
-
- endchoice
-
--config RCU_EXPEDITE_BOOT
-- bool
-- default n
-- help
-- This option enables expedited grace periods at boot time,
-- as if rcu_expedite_gp() had been invoked early in boot.
-- The corresponding rcu_unexpedite_gp() is invoked from
-- rcu_end_inkernel_boot(), which is intended to be invoked
-- at the end of the kernel-only boot sequence, just before
-- init is exec'ed.
--
-- Accept the default if unsure.
--
- endmenu # "RCU Subsystem"
-
- config BUILD_BIN2C
---- a/kernel/rcu/update.c
-+++ b/kernel/rcu/update.c
-@@ -132,8 +132,7 @@ bool rcu_gp_is_normal(void)
- }
- EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
-
--static atomic_t rcu_expedited_nesting =
-- ATOMIC_INIT(IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT) ? 1 : 0);
-+static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
-
- /*
- * Should normal grace-period primitives be expedited? Intended for
-@@ -182,8 +181,7 @@ EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
- */
- void rcu_end_inkernel_boot(void)
- {
-- if (IS_ENABLED(CONFIG_RCU_EXPEDITE_BOOT))
-- rcu_unexpedite_gp();
-+ rcu_unexpedite_gp();
- if (rcu_normal_after_boot)
- WRITE_ONCE(rcu_normal, 1);
- }
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 053ff6eb7513..ae2c5b679a35 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -265,7 +265,12 @@ static void rcu_preempt_qs(void);
+@@ -267,7 +267,12 @@ static void rcu_preempt_qs(void);
void rcu_bh_qs(void)
{
diff --git a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
index 897b4087b687..e29478f68b9d 100644
--- a/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
+++ b/patches/re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -257,9 +257,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
+@@ -252,9 +252,11 @@ static DEFINE_PER_CPU(struct hotplug_pcp
*/
void pin_current_cpu(void)
{
diff --git a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
index 55290c0d4b46..6c857ac9a40e 100644
--- a/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
+++ b/patches/re-preempt_rt_full-arm-coredump-fails-for-cpu-3e-3d-4.patch
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
-@@ -322,6 +322,30 @@ unsigned long arch_randomize_brk(struct
+@@ -325,6 +325,30 @@ unsigned long arch_randomize_brk(struct
}
#ifdef CONFIG_MMU
diff --git a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
index e1f9487d39ff..325a7c0a2e6c 100644
--- a/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
+++ b/patches/rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patch
@@ -69,12 +69,12 @@ general.
This issue was first reported in:
http://www.spinics.net/lists/linux-rt-users/msg13752.html
- arch/arm/kernel/smp.c | 5 +++--
- 1 file changed, 3 insertions(+), 2 deletions(-)
+ arch/arm/kernel/smp.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -234,8 +234,6 @@ int __cpu_disable(void)
+@@ -236,8 +236,6 @@ int __cpu_disable(void)
flush_cache_louis();
local_flush_tlb_all();
@@ -83,13 +83,11 @@ This issue was first reported in:
return 0;
}
-@@ -251,6 +249,9 @@ void __cpu_die(unsigned int cpu)
- pr_err("CPU%u: cpu didn't die\n", cpu);
- return;
+@@ -255,6 +253,7 @@ void __cpu_die(unsigned int cpu)
}
-+
-+ clear_tasks_mm_cpumask(cpu);
-+
- pr_notice("CPU%u: shutdown\n", cpu);
+ pr_debug("CPU%u: shutdown\n", cpu);
++ clear_tasks_mm_cpumask(cpu);
/*
+ * platform_cpu_kill() is generally expected to do the powering off
+ * and/or cutting of clocks to the dying CPU. Optionally, this may
diff --git a/patches/rt-add-rt-locks.patch b/patches/rt-add-rt-locks.patch
index e53cd235d57b..aa886020d9ef 100644
--- a/patches/rt-add-rt-locks.patch
+++ b/patches/rt-add-rt-locks.patch
@@ -12,31 +12,32 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
include/linux/kernel.h | 4
include/linux/locallock.h | 6
include/linux/mutex.h | 20 -
- include/linux/mutex_rt.h | 84 ++++++
+ include/linux/mutex_rt.h | 130 +++++++++
include/linux/rtmutex.h | 29 +-
include/linux/rwlock_rt.h | 99 +++++++
include/linux/rwlock_types_rt.h | 33 ++
include/linux/rwsem.h | 6
include/linux/rwsem_rt.h | 167 ++++++++++++
- include/linux/sched.h | 19 +
+ include/linux/sched.h | 8
+ include/linux/sched/wake_q.h | 11
include/linux/spinlock.h | 12
include/linux/spinlock_api_smp.h | 4
- include/linux/spinlock_rt.h | 162 ++++++++++++
+ include/linux/spinlock_rt.h | 162 +++++++++++
include/linux/spinlock_types.h | 11
include/linux/spinlock_types_rt.h | 48 +++
kernel/futex.c | 11
kernel/locking/Makefile | 9
- kernel/locking/rt.c | 498 ++++++++++++++++++++++++++++++++++++++
- kernel/locking/rtmutex.c | 479 +++++++++++++++++++++++++++++++++---
- kernel/locking/rtmutex_common.h | 9
+ kernel/locking/rt.c | 521 ++++++++++++++++++++++++++++++++++++++
+ kernel/locking/rtmutex.c | 480 ++++++++++++++++++++++++++++++++---
+ kernel/locking/rtmutex_common.h | 10
kernel/locking/spinlock.c | 7
kernel/locking/spinlock_debug.c | 5
kernel/sched/core.c | 7
- 23 files changed, 1663 insertions(+), 66 deletions(-)
+ 24 files changed, 1734 insertions(+), 66 deletions(-)
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -194,6 +194,9 @@ extern int _cond_resched(void);
+@@ -201,6 +201,9 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -46,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
-@@ -201,6 +204,7 @@ extern int _cond_resched(void);
+@@ -208,6 +211,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
@@ -74,13 +75,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
-@@ -19,6 +19,17 @@
- #include <asm/processor.h>
- #include <linux/osq_lock.h>
+@@ -22,6 +22,17 @@
+
+ struct ww_acquire_ctx;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
-+ , .dep_map = { .name = #lockname }
++ , .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
@@ -92,9 +93,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Simple, straightforward mutexes with strict semantics:
*
-@@ -99,13 +110,6 @@ do { \
- static inline void mutex_destroy(struct mutex *lock) {}
- #endif
+@@ -113,13 +124,6 @@ do { \
+ __mutex_init((mutex), #mutex, &__key); \
+ } while (0)
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
@@ -104,20 +105,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-#endif
-
#define __MUTEX_INITIALIZER(lockname) \
- { .count = ATOMIC_INIT(1) \
+ { .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -173,6 +177,8 @@ extern int __must_check mutex_lock_killa
- extern int mutex_trylock(struct mutex *lock);
- extern void mutex_unlock(struct mutex *lock);
+@@ -227,4 +231,6 @@ mutex_trylock_recursive(struct mutex *lo
+ return mutex_trylock(lock);
+ }
+#endif /* !PREEMPT_RT_FULL */
+
- extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
-
#endif /* __LINUX_MUTEX_H */
--- /dev/null
+++ b/include/linux/mutex_rt.h
-@@ -0,0 +1,84 @@
+@@ -0,0 +1,130 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
+
@@ -148,6 +147,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
+extern void __lockfunc _mutex_lock(struct mutex *lock);
++extern void __lockfunc _mutex_lock_io(struct mutex *lock);
++extern void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
@@ -163,7 +164,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#define mutex_lock_killable(l) _mutex_lock_killable(l)
+#define mutex_trylock(l) _mutex_trylock(l)
+#define mutex_unlock(l) _mutex_unlock(l)
++#define mutex_lock_io(l) _mutex_lock_io(l);
++
++#define __mutex_owner(l) ((l)->lock.owner)
++
++#ifdef CONFIG_DEBUG_MUTEXES
+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
++#else
++static inline void mutex_destroy(struct mutex *lock) {}
++#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
@@ -171,6 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ _mutex_lock_interruptible_nested(l, s)
+# define mutex_lock_killable_nested(l, s) \
+ _mutex_lock_killable_nested(l, s)
++# define mutex_lock_io_nested(l, s) _mutex_lock_io_nested(l, s)
+
+# define mutex_lock_nest_lock(lock, nest_lock) \
+do { \
@@ -185,6 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+# define mutex_lock_killable_nested(l, s) \
+ _mutex_lock_killable(l)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
++# define mutex_lock_io_nested(l, s) _mutex_lock_io(l)
+#endif
+
+# define mutex_init(mutex) \
@@ -201,6 +212,40 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ __mutex_do_init((mutex), name, key); \
+} while (0)
+
++/**
++ * These values are chosen such that FAIL and SUCCESS match the
++ * values of the regular mutex_trylock().
++ */
++enum mutex_trylock_recursive_enum {
++ MUTEX_TRYLOCK_FAILED = 0,
++ MUTEX_TRYLOCK_SUCCESS = 1,
++ MUTEX_TRYLOCK_RECURSIVE,
++};
++/**
++ * mutex_trylock_recursive - trylock variant that allows recursive locking
++ * @lock: mutex to be locked
++ *
++ * This function should not be used, _ever_. It is purely for hysterical GEM
++ * raisins, and once those are gone this will be removed.
++ *
++ * Returns:
++ * MUTEX_TRYLOCK_FAILED - trylock failed,
++ * MUTEX_TRYLOCK_SUCCESS - lock acquired,
++ * MUTEX_TRYLOCK_RECURSIVE - we already owned the lock.
++ */
++int __rt_mutex_owner_current(struct rt_mutex *lock);
++
++static inline /* __deprecated */ __must_check enum mutex_trylock_recursive_enum
++mutex_trylock_recursive(struct mutex *lock)
++{
++ if (unlikely(__rt_mutex_owner_current(&lock->lock)))
++ return MUTEX_TRYLOCK_RECURSIVE;
++
++ return mutex_trylock(lock);
++}
++
++extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
++
+#endif
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -600,27 +645,36 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -311,6 +311,11 @@ extern char ___assert_task_state[1 - 2*!
-
- #endif
+@@ -123,6 +123,11 @@ struct task_group;
+ smp_store_mb(current->state, (state_value)); \
+ } while (0)
-+#define __set_current_state_no_track(state_value) \
-+ do { current->state = (state_value); } while (0)
-+#define set_current_state_no_track(state_value) \
-+ set_mb(current->state, (state_value))
++#define __set_current_state_no_track(state_value) \
++ current->state = (state_value);
++#define set_current_state_no_track(state_value) \
++ smp_store_mb(current->state, (state_value));
++
+ #else
+ /*
+ * set_current_state() includes a barrier so that the write of current->state
+@@ -160,6 +165,9 @@ struct task_group;
+ */
+ #define __set_current_state(state_value) do { current->state = (state_value); } while (0)
+ #define set_current_state(state_value) smp_store_mb(current->state, (state_value))
+
- /* Task command name length */
- #define TASK_COMM_LEN 16
++#define __set_current_state_no_track(state_value) __set_current_state(state_value)
++#define set_current_state_no_track(state_value) set_current_state(state_value)
+ #endif
-@@ -1012,8 +1017,18 @@ struct wake_q_head {
- struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+ /* Task command name length: */
+--- a/include/linux/sched/wake_q.h
++++ b/include/linux/sched/wake_q.h
+@@ -48,6 +48,15 @@ static inline void wake_q_init(struct wa
extern void wake_q_add(struct wake_q_head *head,
-- struct task_struct *task);
+ struct task_struct *task);
-extern void wake_up_q(struct wake_q_head *head);
-+ struct task_struct *task);
+extern void __wake_up_q(struct wake_q_head *head, bool sleeper);
-+
+static inline void wake_up_q(struct wake_q_head *head)
+{
+ __wake_up_q(head, false);
@@ -631,11 +685,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ __wake_up_q(head, true);
+}
- /*
- * sched-domains (multiprocessor balancing) declarations:
+ #endif /* _LINUX_SCHED_WAKE_Q_H */
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
-@@ -271,7 +271,11 @@ static inline void do_raw_spin_unlock(ra
+@@ -268,7 +268,11 @@ static inline void do_raw_spin_unlock(ra
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
@@ -648,7 +701,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -282,6 +286,10 @@ static inline void do_raw_spin_unlock(ra
+@@ -279,6 +283,10 @@ static inline void do_raw_spin_unlock(ra
# include <linux/spinlock_api_up.h>
#endif
@@ -659,7 +712,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -416,4 +424,6 @@ extern int _atomic_dec_and_lock(atomic_t
+@@ -408,4 +416,6 @@ extern int _atomic_dec_and_lock(atomic_t
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
@@ -668,7 +721,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* __LINUX_SPINLOCK_H */
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
-@@ -189,6 +189,8 @@ static inline int __raw_spin_trylock_bh(
+@@ -187,6 +187,8 @@ static inline int __raw_spin_trylock_bh(
return 0;
}
@@ -691,7 +744,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/bug.h>
+
+extern void
-+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key);
+
+#define spin_lock_init(slock) \
+do { \
@@ -915,15 +968,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1398,6 +1398,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1400,6 +1400,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
- WAKE_Q(wake_q);
-+ WAKE_Q(wake_sleeper_q);
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1459,13 +1460,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1461,13 +1462,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -940,7 +993,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -2666,7 +2667,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2668,7 +2669,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -949,7 +1002,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3033,7 +3034,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3035,7 +3036,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -981,7 +1034,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -24,7 +28,10 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -24,8 +28,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
@@ -992,9 +1045,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
--- /dev/null
+++ b/kernel/locking/rt.c
-@@ -0,0 +1,498 @@
+@@ -0,0 +1,521 @@
+/*
+ * kernel/rt.c
+ *
@@ -1090,6 +1144,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(_mutex_lock);
+
++void __lockfunc _mutex_lock_io(struct mutex *lock)
++{
++ int token;
++
++ token = io_schedule_prepare();
++ _mutex_lock(lock);
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL_GPL(_mutex_lock_io);
++
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
+{
+ int ret;
@@ -1122,6 +1186,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+}
+EXPORT_SYMBOL(_mutex_lock_nested);
+
++void __lockfunc _mutex_lock_io_nested(struct mutex *lock, int subclass)
++{
++ int token;
++
++ token = io_schedule_prepare();
++
++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
++ rt_mutex_lock(&lock->lock);
++
++ io_schedule_finish(token);
++}
++EXPORT_SYMBOL_GPL(_mutex_lock_io_nested);
++
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+{
+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
@@ -1507,16 +1584,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* See Documentation/locking/rt-mutex-design.txt for details.
*/
-@@ -228,6 +233,8 @@ static inline bool unlock_rt_mutex_safe(
+@@ -230,6 +235,9 @@ static inline bool unlock_rt_mutex_safe(
}
#endif
+#define STEAL_NORMAL 0
+#define STEAL_LATERAL 1
++
/*
* Only use with rt_mutex_waiter_{less,equal}()
*/
-@@ -236,10 +243,15 @@ static inline bool unlock_rt_mutex_safe(
+@@ -238,11 +246,15 @@ static inline bool unlock_rt_mutex_safe(
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
@@ -1525,6 +1603,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
- if (left->prio < right->prio)
- return 1;
+-
+ if (mode == STEAL_NORMAL) {
+ if (left->prio < right->prio)
+ return 1;
@@ -1532,10 +1611,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (left->prio <= right->prio)
+ return 1;
+ }
-
/*
* If both waiters have dl_prio(), we check the deadlines of the
-@@ -283,7 +295,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
+ * associated tasks.
+@@ -285,7 +297,7 @@ rt_mutex_enqueue(struct rt_mutex *lock,
while (*link) {
parent = *link;
entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
@@ -1544,7 +1623,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
link = &parent->rb_left;
} else {
link = &parent->rb_right;
-@@ -322,7 +334,7 @@ rt_mutex_enqueue_pi(struct task_struct *
+@@ -324,7 +336,7 @@ rt_mutex_enqueue_pi(struct task_struct *
while (*link) {
parent = *link;
entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
@@ -1553,7 +1632,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
link = &parent->rb_left;
} else {
link = &parent->rb_right;
-@@ -388,6 +400,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -390,6 +402,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -1568,7 +1647,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -713,13 +733,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -715,13 +735,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -1587,15 +1666,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -812,6 +835,7 @@ static int rt_mutex_adjust_prio_chain(st
- return ret;
- }
-
-+
- /*
- * Try to take an rt-mutex
- *
-@@ -822,8 +846,9 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -824,8 +847,9 @@ static int rt_mutex_adjust_prio_chain(st
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
*/
@@ -1607,7 +1678,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -862,8 +887,10 @@ static int try_to_take_rt_mutex(struct r
+@@ -864,8 +888,10 @@ static int try_to_take_rt_mutex(struct r
* If waiter is not the highest priority waiter of
* @lock, give up.
*/
@@ -1619,7 +1690,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
-@@ -881,15 +908,26 @@ static int try_to_take_rt_mutex(struct r
+@@ -883,15 +909,26 @@ static int try_to_take_rt_mutex(struct r
* not need to be dequeued.
*/
if (rt_mutex_has_waiters(lock)) {
@@ -1648,7 +1719,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -936,6 +974,339 @@ static int try_to_take_rt_mutex(struct r
+@@ -938,6 +975,339 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -1811,8 +1882,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+ unsigned long flags;
-+ WAKE_Q(wake_q);
-+ WAKE_Q(wake_sleeper_q);
++ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
+ bool postunlock;
+
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -1988,7 +2059,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1051,6 +1422,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1053,6 +1423,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -1996,7 +2067,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1090,7 +1462,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1092,7 +1463,10 @@ static void mark_wakeup_next_waiter(stru
* Pairs with preempt_enable() in rt_mutex_postunlock();
*/
preempt_disable();
@@ -2008,7 +2079,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&current->pi_lock);
}
-@@ -1174,21 +1549,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1176,21 +1550,22 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -2033,7 +2104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1268,7 +1644,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1270,7 +1645,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -2042,7 +2113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1363,7 +1739,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1365,7 +1740,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -2052,7 +2123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned long flags;
-@@ -1417,7 +1794,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1419,7 +1795,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -2061,20 +2132,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true; /* call rt_mutex_postunlock() */
-@@ -1469,9 +1846,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1471,9 +1847,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
*/
-void rt_mutex_postunlock(struct wake_q_head *wake_q)
+void rt_mutex_postunlock(struct wake_q_head *wake_q,
-+ struct wake_q_head *wq_sleeper)
++ struct wake_q_head *wake_sleeper_q)
{
wake_up_q(wake_q);
-+ wake_up_q_sleeper(wq_sleeper);
++ wake_up_q_sleeper(wake_sleeper_q);
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
preempt_enable();
-@@ -1480,15 +1859,17 @@ void rt_mutex_postunlock(struct wake_q_h
+@@ -1482,15 +1860,17 @@ void rt_mutex_postunlock(struct wake_q_h
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -2082,20 +2153,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ struct wake_q_head *wqh,
+ struct wake_q_head *wq_sleeper))
{
- WAKE_Q(wake_q);
-+ WAKE_Q(wake_sleeper_q);
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;
- if (slowfn(lock, &wake_q))
- rt_mutex_postunlock(&wake_q);
-+ if (slowfn(lock, &wake_q, &wake_sleeper_q))
++ if (slowfn(lock, &wake_q, &wake_sleeper_q))
+ rt_mutex_postunlock(&wake_q, &wake_sleeper_q);
}
/**
-@@ -1607,12 +1988,9 @@ void __sched rt_mutex_unlock(struct rt_m
+@@ -1609,12 +1989,9 @@ void __sched rt_mutex_unlock(struct rt_m
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -2111,7 +2182,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1629,22 +2007,34 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1631,22 +2008,34 @@ bool __sched __rt_mutex_futex_unlock(str
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
@@ -2134,8 +2205,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
{
- WAKE_Q(wake_q);
-+ WAKE_Q(wake_sleeper_q);
+ DEFINE_WAKE_Q(wake_q);
++ DEFINE_WAKE_Q(wake_sleeper_q);
bool postunlock;
raw_spin_lock_irq(&lock->wait_lock);
@@ -2149,7 +2220,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -1677,13 +2067,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
+@@ -1679,13 +2068,12 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
lock->owner = NULL;
@@ -2164,7 +2235,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1698,7 +2087,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
+@@ -1704,7 +2092,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner)
{
@@ -2173,7 +2244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
-@@ -1916,3 +2305,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -1926,3 +2314,25 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -2201,7 +2272,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -27,6 +27,7 @@ struct rt_mutex_waiter {
+@@ -14,6 +14,7 @@
+
+ #include <linux/rtmutex.h>
+ #include <linux/sched/wake_q.h>
++#include <linux/sched/debug.h>
+
+ /*
+ * This is the control structure for tasks blocked on a rt_mutex,
+@@ -28,6 +29,7 @@ struct rt_mutex_waiter {
struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
@@ -2209,7 +2288,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -107,7 +108,7 @@ extern void rt_mutex_init_proxy_locked(s
+@@ -107,7 +109,7 @@ extern void rt_mutex_init_proxy_locked(s
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -2218,7 +2297,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -124,9 +125,11 @@ extern int rt_mutex_futex_trylock(struct
+@@ -124,9 +126,11 @@ extern int rt_mutex_futex_trylock(struct
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -2228,7 +2307,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
+extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
-+ struct wake_q_head *wq_sleeper);
++ struct wake_q_head *wake_sleeper_q);
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
@@ -2282,7 +2361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
-@@ -159,6 +161,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
+@@ -135,6 +137,7 @@ void do_raw_spin_unlock(raw_spinlock_t *
arch_spin_unlock(&lock->raw_lock);
}
@@ -2290,7 +2369,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void rwlock_bug(rwlock_t *lock, const char *msg)
{
if (!debug_locks_off())
-@@ -300,3 +303,5 @@ void do_raw_write_unlock(rwlock_t *lock)
+@@ -224,3 +227,5 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
@@ -2298,7 +2377,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -454,7 +454,7 @@ void wake_q_add(struct wake_q_head *head
+@@ -461,7 +461,7 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
@@ -2307,7 +2386,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct wake_q_node *node = head->first;
-@@ -471,7 +471,10 @@ void wake_up_q(struct wake_q_head *head)
+@@ -478,7 +478,10 @@ void wake_up_q(struct wake_q_head *head)
* wake_up_process() implies a wmb() to pair with the queueing
* in wake_q_add() so as not to miss wakeups.
*/
diff --git a/patches/rt-drop_mutex_disable_on_not_debug.patch b/patches/rt-drop_mutex_disable_on_not_debug.patch
deleted file mode 100644
index f99a1ad5f49e..000000000000
--- a/patches/rt-drop_mutex_disable_on_not_debug.patch
+++ /dev/null
@@ -1,32 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 10 Feb 2017 18:21:04 +0100
-Subject: rt: Drop mutex_disable() on !DEBUG configs and the GPL suffix from export symbol
-
-Alex Goins reported that mutex_destroy() on RT will force a GPL only symbol
-which won't link and therefore fail on a non-GPL kernel module.
-This does not happen on !RT and is a regression on RT which we would like to
-avoid.
-I try here the easy thing and to not use rt_mutex_destroy() if
-CONFIG_DEBUG_MUTEXES is not enabled.
-
-Reported-by: Alex Goins <agoins@nvidia.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/mutex_rt.h | 5 +++++
- 1 file changed, 5 insertions(+)
-
---- a/include/linux/mutex_rt.h
-+++ b/include/linux/mutex_rt.h
-@@ -43,7 +43,12 @@ extern void __lockfunc _mutex_unlock(str
- #define mutex_lock_killable(l) _mutex_lock_killable(l)
- #define mutex_trylock(l) _mutex_trylock(l)
- #define mutex_unlock(l) _mutex_unlock(l)
-+
-+#ifdef CONFIG_DEBUG_MUTEXES
- #define mutex_destroy(l) rt_mutex_destroy(&(l)->lock)
-+#else
-+static inline void mutex_destroy(struct mutex *lock) {}
-+#endif
-
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- # define mutex_lock_nested(l, s) _mutex_lock_nested(l, s)
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 917f783b1318..d484d224ba72 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
-@@ -52,4 +52,10 @@ static inline void ssleep(unsigned int s
+@@ -63,4 +63,10 @@ static inline void ssleep(unsigned int s
msleep(seconds * 1000);
}
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1768,6 +1768,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
+@@ -1741,6 +1741,25 @@ SYSCALL_DEFINE2(nanosleep, struct timesp
return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
}
diff --git a/patches/rt-locking-Reenable-migration-accross-schedule.patch b/patches/rt-locking-Reenable-migration-accross-schedule.patch
index 7a5f17a30c8b..3fb05c261c1c 100644
--- a/patches/rt-locking-Reenable-migration-accross-schedule.patch
+++ b/patches/rt-locking-Reenable-migration-accross-schedule.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -980,14 +980,19 @@ static int __try_to_take_rt_mutex(struct
+@@ -981,14 +981,19 @@ static int __try_to_take_rt_mutex(struct
* preemptible spin_lock functions:
*/
static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
-@@ -1045,7 +1050,8 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1046,7 +1051,8 @@ static int task_blocks_on_rt_mutex(struc
* We store the current state under p->pi_lock in p->saved_state and
* the try_to_wake_up() code handles this accordingly.
*/
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct task_struct *lock_owner, *self = current;
struct rt_mutex_waiter waiter, *top_waiter;
-@@ -1089,8 +1095,13 @@ static void noinline __sched rt_spin_lo
+@@ -1090,8 +1096,13 @@ static void noinline __sched rt_spin_lo
debug_rt_mutex_print_deadlock(&waiter);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_lock_irqsave(&lock->wait_lock, flags);
-@@ -1148,38 +1159,35 @@ static void noinline __sched rt_spin_lo
+@@ -1149,38 +1160,35 @@ static void noinline __sched rt_spin_lo
void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock)
{
diff --git a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 56cebf3789d8..8b8dee043188 100644
--- a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -21,7 +21,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1697,7 +1697,7 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1707,7 +1707,7 @@ int __rt_mutex_start_proxy_lock(struct r
ret = 0;
}
diff --git a/patches/rtmutex-Make-lock_killable-work.patch b/patches/rtmutex-Make-lock_killable-work.patch
index 7fb5801e083e..38bde9b0c31a 100644
--- a/patches/rtmutex-Make-lock_killable-work.patch
+++ b/patches/rtmutex-Make-lock_killable-work.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1213,18 +1213,13 @@ static int __sched
+@@ -1215,18 +1215,13 @@ static int __sched
if (try_to_take_rt_mutex(lock, current, waiter))
break;
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- * TASK_INTERRUPTIBLE checks for signals and
- * timeout. Ignored otherwise.
- */
-- if (unlikely(state == TASK_INTERRUPTIBLE)) {
+- if (likely(state == TASK_INTERRUPTIBLE)) {
- /* Signal pending? */
- if (signal_pending(current))
- ret = -EINTR;
diff --git a/patches/rtmutex-Provide-locked-slowpath.patch b/patches/rtmutex-Provide-locked-slowpath.patch
index 085aba5f544b..c86e9e1b0a7e 100644
--- a/patches/rtmutex-Provide-locked-slowpath.patch
+++ b/patches/rtmutex-Provide-locked-slowpath.patch
@@ -13,12 +13,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
kernel/locking/rtmutex.c | 72 +++++++++++++++++++++++-----------------
- kernel/locking/rtmutex_common.h | 9 +++++
- 2 files changed, 51 insertions(+), 30 deletions(-)
+ kernel/locking/rtmutex_common.h | 8 ++++
+ 2 files changed, 50 insertions(+), 30 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1750,36 +1750,18 @@ static void ww_mutex_account_lock(struct
+@@ -1752,30 +1752,13 @@ static void ww_mutex_account_lock(struct
}
#endif
@@ -53,7 +53,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- raw_spin_lock_irqsave(&lock->wait_lock, flags);
+ int ret;
- /* Try to acquire the lock again: */
+ #ifdef CONFIG_PREEMPT_RT_FULL
+ if (ww_ctx) {
+@@ -1791,7 +1774,6 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (try_to_take_rt_mutex(lock, current, NULL)) {
if (ww_ctx)
ww_mutex_account_lock(lock, ww_ctx);
@@ -61,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -1789,13 +1771,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1801,13 +1783,13 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
@@ -79,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* ww_mutex received EDEADLK, let it become EALREADY */
ret = __mutex_lock_check_stamp(lock, ww_ctx);
BUG_ON(!ret);
-@@ -1804,10 +1786,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1816,10 +1798,10 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(ret)) {
__set_current_state(TASK_RUNNING);
if (rt_mutex_has_waiters(lock))
@@ -92,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} else if (ww_ctx) {
ww_mutex_account_lock(lock, ww_ctx);
}
-@@ -1817,6 +1799,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1829,6 +1811,36 @@ rt_mutex_slowlock(struct rt_mutex *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -131,10 +133,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -131,6 +131,15 @@ extern bool __rt_mutex_futex_unlock(stru
- extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
- struct wake_q_head *wq_sleeper);
+@@ -131,6 +131,14 @@ extern bool __rt_mutex_futex_unlock(stru
+ extern void rt_mutex_postunlock(struct wake_q_head *wake_q,
+ struct wake_q_head *wake_sleeper_q);
+/* RW semaphore special interface */
+struct ww_acquire_ctx;
+
@@ -143,7 +145,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ enum rtmutex_chainwalk chwalk,
+ struct ww_acquire_ctx *ww_ctx,
+ struct rt_mutex_waiter *waiter);
-+
+
#ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h"
- #else
diff --git a/patches/rtmutex-Provide-rt_mutex_lock_state.patch b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
index c698207ff70b..931c591523aa 100644
--- a/patches/rtmutex-Provide-rt_mutex_lock_state.patch
+++ b/patches/rtmutex-Provide-rt_mutex_lock_state.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2008,21 +2008,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -2020,21 +2020,32 @@ rt_mutex_fastunlock(struct rt_mutex *loc
}
/**
@@ -61,7 +61,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @lock: the rt_mutex to be locked
*
* Returns:
-@@ -2031,20 +2042,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -2043,20 +2054,10 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
-@@ -2054,16 +2055,21 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -2066,16 +2067,21 @@ int __sched rt_mutex_futex_trylock(struc
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
diff --git a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
index 899d9e7b7b52..4a16d5e24ba4 100644
--- a/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
+++ b/patches/rtmutex-add-a-first-shot-of-ww_mutex.patch
@@ -22,20 +22,31 @@ lockdep says:
Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
---
- kernel/locking/rtmutex.c | 248 ++++++++++++++++++++++++++++++++++++++++++-----
- 1 file changed, 224 insertions(+), 24 deletions(-)
+ kernel/locking/rtmutex.c | 273 ++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 247 insertions(+), 26 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -21,6 +21,7 @@
- #include <linux/sched/rt.h>
- #include <linux/sched/deadline.h>
+@@ -23,6 +23,7 @@
+ #include <linux/sched/wake_q.h>
+ #include <linux/sched/debug.h>
#include <linux/timer.h>
+#include <linux/ww_mutex.h>
#include "rtmutex_common.h"
-@@ -1300,6 +1301,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1286,8 +1287,8 @@ int atomic_dec_and_spin_lock(atomic_t *a
+ }
+ EXPORT_SYMBOL(atomic_dec_and_spin_lock);
+
+- void
+-__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
++void
++__rt_spin_lock_init(spinlock_t *lock, const char *name, struct lock_class_key *key)
+ {
+ #ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+@@ -1301,6 +1302,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -76,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1580,7 +1615,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1581,7 +1616,8 @@ void rt_mutex_init_waiter(struct rt_mute
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -86,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
int ret = 0;
-@@ -1598,6 +1634,12 @@ static int __sched
+@@ -1599,6 +1635,12 @@ static int __sched
break;
}
@@ -99,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1632,13 +1674,90 @@ static void rt_mutex_handle_deadlock(int
+@@ -1633,13 +1675,91 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -168,6 +179,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+ BUG_ON(waiter->lock != lock);
+ rt_mutex_wake_waiter(waiter);
+ }
++
+}
+
+#else
@@ -191,8 +203,20 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1658,6 +1777,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1657,8 +1777,20 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+ */
+ raw_spin_lock_irqsave(&lock->wait_lock, flags);
++#ifdef CONFIG_PREEMPT_RT_FULL
++ if (ww_ctx) {
++ struct ww_mutex *ww;
++
++ ww = container_of(lock, struct ww_mutex, base.lock);
++ if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
++ return -EALREADY;
++ }
++#endif
++
/* Try to acquire the lock again: */
if (try_to_take_rt_mutex(lock, current, NULL)) {
+ if (ww_ctx)
@@ -200,7 +224,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return 0;
}
-@@ -1672,13 +1793,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1673,13 +1805,23 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (likely(!ret))
/* sleep on the mutex */
@@ -226,7 +250,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
/*
-@@ -1808,29 +1939,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1809,29 +1951,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -264,7 +288,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
static inline int
-@@ -1881,7 +2016,7 @@ void __sched rt_mutex_lock(struct rt_mut
+@@ -1882,7 +2028,7 @@ void __sched rt_mutex_lock(struct rt_mut
{
might_sleep();
@@ -273,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1898,7 +2033,7 @@ int __sched rt_mutex_lock_interruptible(
+@@ -1899,7 +2045,7 @@ int __sched rt_mutex_lock_interruptible(
{
might_sleep();
@@ -282,7 +306,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1925,7 +2060,7 @@ int __sched rt_mutex_lock_killable(struc
+@@ -1926,7 +2072,7 @@ int __sched rt_mutex_lock_killable(struc
{
might_sleep();
@@ -291,7 +315,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1949,6 +2084,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1950,6 +2096,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -299,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
rt_mutex_slowlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
-@@ -2239,7 +2375,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2248,7 +2395,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -308,20 +332,13 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2306,24 +2442,88 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2315,24 +2462,98 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
--#ifdef CONFIG_PREEMPT_RT_FULL
--struct ww_mutex {
--};
--struct ww_acquire_ctx {
--};
--int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+static inline int
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
- {
-- BUG();
++{
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+ unsigned tmp;
+
@@ -343,47 +360,58 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+#endif
+
+ return 0;
- }
--EXPORT_SYMBOL_GPL(__ww_mutex_lock);
--int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++}
+
-+#ifdef CONFIG_PREEMPT_RT_FULL
+ #ifdef CONFIG_PREEMPT_RT_FULL
+-struct ww_mutex {
+-};
+-struct ww_acquire_ctx {
+-};
+-int __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+int __sched
-+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
- BUG();
+ int ret;
+
+ might_sleep();
+
-+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++ ctx ? &ctx->dep_map : NULL, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0,
++ ctx);
+ if (ret)
+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+ else if (!ret && ww_ctx->acquired > 1)
-+ return ww_mutex_deadlock_injection(lock, ww_ctx);
++ else if (!ret && ctx && ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ctx);
+
+ return ret;
}
- EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
+-EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+-int __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
++EXPORT_SYMBOL_GPL(ww_mutex_lock_interruptible);
+
+int __sched
-+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
-+{
++ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+ {
+- BUG();
+ int ret;
+
+ might_sleep();
+
-+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
-+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
++ mutex_acquire_nest(&lock->base.dep_map, 0, 0,
++ ctx ? &ctx->dep_map : NULL, _RET_IP_);
++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0,
++ ctx);
+ if (ret)
+ mutex_release(&lock->base.dep_map, 1, _RET_IP_);
-+ else if (!ret && ww_ctx->acquired > 1)
-+ return ww_mutex_deadlock_injection(lock, ww_ctx);
++ else if (!ret && ctx && ctx->acquired > 1)
++ return ww_mutex_deadlock_injection(lock, ctx);
+
+ return ret;
-+}
-+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+ }
+-EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
++EXPORT_SYMBOL_GPL(ww_mutex_lock);
+
void __sched ww_mutex_unlock(struct ww_mutex *lock)
{
@@ -405,7 +433,13 @@ Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
+
+ mutex_release(&lock->base.dep_map, nest, _RET_IP_);
+ rt_mutex_unlock(&lock->base.lock);
++}
++EXPORT_SYMBOL(ww_mutex_unlock);
++
++int __rt_mutex_owner_current(struct rt_mutex *lock)
++{
++ return rt_mutex_owner(lock) == current;
}
-EXPORT_SYMBOL_GPL(ww_mutex_unlock);
-+EXPORT_SYMBOL(ww_mutex_unlock);
++EXPORT_SYMBOL(__rt_mutex_owner_current);
#endif
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 0ff9c5834ad9..125274ddd9ee 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2011,6 +2011,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2013,6 +2013,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -2996,7 +3006,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -2998,7 +3008,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3052,20 +3062,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3054,20 +3064,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3074,7 +3119,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3076,7 +3121,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3085,7 +3131,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3087,7 +3133,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3099,7 +3145,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3101,7 +3147,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -133,6 +133,11 @@ static void fixup_rt_mutex_waiters(struc
+@@ -135,6 +135,11 @@ static void fixup_rt_mutex_waiters(struc
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -389,7 +394,8 @@ int max_lock_depth = 1024;
+@@ -391,7 +396,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -525,7 +531,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -527,7 +533,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -961,6 +967,23 @@ static int task_blocks_on_rt_mutex(struc
+@@ -963,6 +969,23 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rt_mutex_adjust_prio(task);
waiter->task = task;
waiter->lock = lock;
-@@ -985,7 +1008,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -987,7 +1010,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1081,7 +1104,7 @@ static void remove_waiter(struct rt_mute
+@@ -1083,7 +1106,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_assert_held(&lock->wait_lock);
-@@ -1107,7 +1130,8 @@ static void remove_waiter(struct rt_mute
+@@ -1109,7 +1132,8 @@ static void remove_waiter(struct rt_mute
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -222,7 +222,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&owner->pi_lock);
-@@ -1143,7 +1167,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1145,7 +1169,8 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
diff --git a/patches/rtmutex-lock-killable.patch b/patches/rtmutex-lock-killable.patch
index ab9bee7a7bc6..cbda846a4f32 100644
--- a/patches/rtmutex-lock-killable.patch
+++ b/patches/rtmutex-lock-killable.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1535,6 +1535,25 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -1537,6 +1537,25 @@ int __sched rt_mutex_futex_trylock(struc
}
/**
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/rtmutex-trylock-is-okay-on-RT.patch
index eddbba860be3..cfa95fd2df38 100644
--- a/patches/rtmutex-trylock-is-okay-on-RT.patch
+++ b/patches/rtmutex-trylock-is-okay-on-RT.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1545,7 +1545,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
+@@ -1547,7 +1547,11 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
*/
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch
index b89ce4e3b285..2cbfd2fd83cb 100644
--- a/patches/rtmutex_dont_include_rcu.patch
+++ b/patches/rtmutex_dont_include_rcu.patch
@@ -93,7 +93,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -46,6 +46,7 @@
+@@ -45,6 +45,7 @@
#include <linux/compiler.h>
#include <linux/ktime.h>
#include <linux/irqflags.h>
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/barrier.h>
-@@ -633,54 +634,6 @@ static inline void rcu_preempt_sleep_che
+@@ -593,54 +594,6 @@ static inline void rcu_preempt_sleep_che
})
/**
diff --git a/patches/rwsem-rt-Lift-single-reader-restriction.patch b/patches/rwsem-rt-Lift-single-reader-restriction.patch
index 3f02568c3b26..df0e1452a372 100644
--- a/patches/rwsem-rt-Lift-single-reader-restriction.patch
+++ b/patches/rwsem-rt-Lift-single-reader-restriction.patch
@@ -45,8 +45,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
include/linux/rwsem_rt.h | 166 +++++-----------------------
kernel/locking/Makefile | 4
kernel/locking/rt.c | 167 ----------------------------
- kernel/locking/rwsem-rt.c | 268 ++++++++++++++++++++++++++++++++++++++++++++++
- 5 files changed, 310 insertions(+), 304 deletions(-)
+ kernel/locking/rwsem-rt.c | 269 ++++++++++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 311 insertions(+), 304 deletions(-)
create mode 100644 kernel/locking/rwsem-rt.c
--- a/include/linux/rwsem.h
@@ -283,7 +283,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -32,6 +32,6 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+@@ -32,7 +32,7 @@ ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
endif
@@ -291,9 +291,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o rwsem-rt.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
+ obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
--- a/kernel/locking/rt.c
+++ b/kernel/locking/rt.c
-@@ -306,173 +306,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
+@@ -329,173 +329,6 @@ void __rt_rwlock_init(rwlock_t *rwlock,
}
EXPORT_SYMBOL(__rt_rwlock_init);
@@ -469,11 +470,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @cnt: the atomic which we are to dec
--- /dev/null
+++ b/kernel/locking/rwsem-rt.c
-@@ -0,0 +1,268 @@
+@@ -0,0 +1,269 @@
+/*
+ */
+#include <linux/rwsem.h>
-+#include <linux/sched.h>
++#include <linux/sched/debug.h>
++#include <linux/sched/signal.h>
+#include <linux/export.h>
+
+#include "rtmutex_common.h"
diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
index 93f554941463..f505d221ebde 100644
--- a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2207,7 +2207,7 @@ EXPORT_SYMBOL(wake_up_process);
+@@ -2206,7 +2206,7 @@ EXPORT_SYMBOL(wake_up_process);
*/
int wake_up_lock_sleeper(struct task_struct *p)
{
diff --git a/patches/sched-Remove-TASK_ALL.patch b/patches/sched-Remove-TASK_ALL.patch
index 6b25930e3ded..f1b09654e434 100644
--- a/patches/sched-Remove-TASK_ALL.patch
+++ b/patches/sched-Remove-TASK_ALL.patch
@@ -19,11 +19,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -236,7 +236,6 @@ extern char ___assert_task_state[1 - 2*!
+@@ -94,7 +94,6 @@ struct task_group;
- /* Convenience macros for the sake of wake_up */
- #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
--#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+ /* Convenience macros for the sake of wake_up(): */
+ #define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+-#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
- /* get_task_state() */
- #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
+ /* get_task_state(): */
+ #define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
diff --git a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
index 03a11a1982af..af69550adc14 100644
--- a/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
+++ b/patches/sched-deadline-dl_task_timer-has-to-be-irqsafe.patch
@@ -12,11 +12,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -687,6 +687,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -693,6 +693,7 @@ void init_dl_task_timer(struct sched_dl_
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
timer->function = dl_task_timer;
+ timer->irqsafe = 1;
}
- static
+ /*
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 95841d76303e..c249eded9804 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -7,24 +7,27 @@ burden random tasks with that.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/sched.h | 13 +++++++++++++
- kernel/fork.c | 15 ++++++++++++++-
- 2 files changed, 27 insertions(+), 1 deletion(-)
+ include/linux/sched.h | 3 +++
+ include/linux/sched/task.h | 10 ++++++++++
+ kernel/fork.c | 15 ++++++++++++++-
+ 3 files changed, 27 insertions(+), 1 deletion(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1968,6 +1968,9 @@ struct task_struct {
- unsigned int sequential_io;
- unsigned int sequential_io_avg;
+@@ -1052,6 +1052,9 @@ struct task_struct {
+ unsigned int sequential_io;
+ unsigned int sequential_io_avg;
#endif
+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct rcu_head put_rcu;
++ struct rcu_head put_rcu;
+#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
+ unsigned long task_state_change;
#endif
-@@ -2225,6 +2228,15 @@ extern struct pid *cad_pid;
- extern void free_task(struct task_struct *tsk);
+--- a/include/linux/sched/task.h
++++ b/include/linux/sched/task.h
+@@ -86,6 +86,15 @@ extern void sched_exec(void);
+
#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+#ifdef CONFIG_PREEMPT_RT_BASE
@@ -39,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void __put_task_struct(struct task_struct *t);
static inline void put_task_struct(struct task_struct *t)
-@@ -2232,6 +2244,7 @@ static inline void put_task_struct(struc
+@@ -93,6 +102,7 @@ static inline void put_task_struct(struc
if (atomic_dec_and_test(&t->usage))
__put_task_struct(t);
}
@@ -49,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct task_struct *try_get_task_struct(struct task_struct **ptask);
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -376,7 +376,9 @@ static inline void put_signal_struct(str
+@@ -389,7 +389,9 @@ static inline void put_signal_struct(str
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -60,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -393,7 +395,18 @@ void __put_task_struct(struct task_struc
+@@ -406,7 +408,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/patches/sched-disable-rt-group-sched-on-rt.patch b/patches/sched-disable-rt-group-sched-on-rt.patch
index 4ee8a1911417..ba61099d6e13 100644
--- a/patches/sched-disable-rt-group-sched-on-rt.patch
+++ b/patches/sched-disable-rt-group-sched-on-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1051,6 +1051,7 @@ config CFS_BANDWIDTH
+@@ -1052,6 +1052,7 @@ config CFS_BANDWIDTH
config RT_GROUP_SCHED
bool "Group scheduling for SCHED_RR/FIFO"
depends on CGROUP_SCHED
diff --git a/patches/sched-disable-ttwu-queue.patch b/patches/sched-disable-ttwu-queue.patch
index b89c7cd4c753..4f4bec45cc26 100644
--- a/patches/sched-disable-ttwu-queue.patch
+++ b/patches/sched-disable-ttwu-queue.patch
@@ -27,5 +27,5 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
SCHED_FEAT(TTWU_QUEUE, true)
+#endif
- #ifdef HAVE_RT_PUSH_IPI
/*
+ * When doing wakeups, attempt to limit superfluous scans of the LLC domain.
diff --git a/patches/sched-limit-nr-migrate.patch b/patches/sched-limit-nr-migrate.patch
index 81270b7802fb..dc639213105c 100644
--- a/patches/sched-limit-nr-migrate.patch
+++ b/patches/sched-limit-nr-migrate.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -129,7 +129,11 @@ const_debug unsigned int sysctl_sched_fe
+@@ -58,7 +58,11 @@ const_debug unsigned int sysctl_sched_fe
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
*/
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index 6db6951f9982..8827d153e2a1 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -301,6 +301,11 @@ void synchronize_rcu(void);
+@@ -261,6 +261,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -326,6 +331,8 @@ static inline int rcu_preempt_depth(void
+@@ -286,6 +291,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7862,7 +7862,7 @@ void __init sched_init(void)
+@@ -6271,7 +6271,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index 010cecfb48cb..dd1c6e3a325a 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -8,7 +8,7 @@ we want to do in task switch and oder atomic contexts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/mm_types.h | 4 ++++
- include/linux/sched.h | 11 +++++++++++
+ include/linux/sched/mm.h | 11 +++++++++++
kernel/fork.c | 13 +++++++++++++
kernel/sched/core.c | 19 +++++++++++++++++--
4 files changed, 45 insertions(+), 2 deletions(-)
@@ -22,20 +22,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#include <linux/rcupdate.h>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
- #include <asm/page.h>
-@@ -509,6 +510,9 @@ struct mm_struct {
+
+@@ -491,6 +492,9 @@ struct mm_struct {
bool tlb_flush_pending;
#endif
struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ struct rcu_head delayed_drop;
+#endif
- #ifdef CONFIG_X86_INTEL_MPX
- /* address of the bounds directory */
- void __user *bd_addr;
---- a/include/linux/sched.h
-+++ b/include/linux/sched.h
-@@ -2912,6 +2912,17 @@ static inline void mmdrop(struct mm_stru
+ #ifdef CONFIG_HUGETLB_PAGE
+ atomic_long_t hugetlb_usage;
+ #endif
+--- a/include/linux/sched/mm.h
++++ b/include/linux/sched/mm.h
+@@ -42,6 +42,17 @@ static inline void mmdrop(struct mm_stru
__mmdrop(mm);
}
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -865,6 +865,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -885,6 +885,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -91,16 +91,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5587,6 +5591,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5612,6 +5616,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm);
+
/*
- * Ensures that the idle task is using init_mm right before its cpu goes
+ * Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5601,7 +5607,12 @@ void idle_task_exit(void)
+@@ -5626,7 +5632,12 @@ void idle_task_exit(void)
switch_mm_irqs_off(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -7547,6 +7558,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5953,6 +5964,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 5654a311bf86..edd5db3ab804 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -10,32 +10,33 @@ sleep is done, the saved state is restored.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/sched.h | 2 ++
+ include/linux/sched.h | 3 +++
kernel/sched/core.c | 31 ++++++++++++++++++++++++++++++-
kernel/sched/sched.h | 1 +
- 3 files changed, 33 insertions(+), 1 deletion(-)
+ 3 files changed, 34 insertions(+), 1 deletion(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1481,6 +1481,7 @@ struct task_struct {
- struct thread_info thread_info;
+@@ -490,6 +490,8 @@ struct task_struct {
#endif
- volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
-+ volatile long saved_state; /* saved state for "spinlock sleepers" */
- void *stack;
- atomic_t usage;
- unsigned int flags; /* per process flags, defined below */
-@@ -2704,6 +2705,7 @@ extern void xtime_update(unsigned long t
+ /* -1 unrunnable, 0 runnable, >0 stopped: */
+ volatile long state;
++ /* saved state for "spinlock sleepers" */
++ volatile long saved_state;
+ void *stack;
+ atomic_t usage;
+ /* Per task flags (PF_*), defined further below: */
+@@ -1415,6 +1417,7 @@ extern struct task_struct *find_task_by_
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
-+extern int wake_up_lock_sleeper(struct task_struct * tsk);
++extern int wake_up_lock_sleeper(struct task_struct *tsk);
extern void wake_up_new_task(struct task_struct *tsk);
+
#ifdef CONFIG_SMP
- extern void kick_process(struct task_struct *tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2033,8 +2033,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2013,8 +2013,25 @@ try_to_wake_up(struct task_struct *p, un
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -62,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2181,6 +2198,18 @@ int wake_up_process(struct task_struct *
+@@ -2180,6 +2197,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -83,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1163,6 +1163,7 @@ static inline void finish_lock_switch(st
+@@ -1293,6 +1293,7 @@ static inline void finish_lock_switch(st
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index 62eea3f91c04..c413f66f597f 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2040,8 +2040,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2020,8 +2020,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index d781b0f7e6f2..2c6ec642e0af 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3477,8 +3477,10 @@ static void __sched notrace __schedule(b
+@@ -3474,8 +3474,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/scsi-fcoe-rt-aware.patch b/patches/scsi-fcoe-rt-aware.patch
index a39b3f4b1093..8742dafd9443 100644
--- a/patches/scsi-fcoe-rt-aware.patch
+++ b/patches/scsi-fcoe-rt-aware.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
-@@ -1455,11 +1455,11 @@ static int fcoe_rcv(struct sk_buff *skb,
+@@ -1464,11 +1464,11 @@ static int fcoe_rcv(struct sk_buff *skb,
static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
{
struct fcoe_percpu_s *fps;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return rc;
}
-@@ -1646,11 +1646,11 @@ static inline int fcoe_filter_frames(str
+@@ -1655,11 +1655,11 @@ static inline int fcoe_filter_frames(str
return 0;
}
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return -EINVAL;
}
-@@ -1693,7 +1693,7 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1702,7 +1702,7 @@ static void fcoe_recv_frame(struct sk_bu
*/
hp = (struct fcoe_hdr *) skb_network_header(skb);
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
if (stats->ErrorFrames < 5)
printk(KERN_WARNING "fcoe: FCoE version "
-@@ -1725,13 +1725,13 @@ static void fcoe_recv_frame(struct sk_bu
+@@ -1734,13 +1734,13 @@ static void fcoe_recv_frame(struct sk_bu
goto drop;
if (!fcoe_filter_frames(lport, fp)) {
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -834,7 +834,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -836,7 +836,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
INIT_LIST_HEAD(&del_list);
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -870,7 +870,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -872,7 +872,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
sel_time = fcf->time;
}
}
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Removes fcf from current list */
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
-@@ -814,10 +814,10 @@ static struct fc_exch *fc_exch_em_alloc(
+@@ -833,10 +833,10 @@ static struct fc_exch *fc_exch_em_alloc(
}
memset(ep, 0, sizeof(*ep));
diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch
index 9b97b8277e66..82b410ab807c 100644
--- a/patches/seqlock-prevent-rt-starvation.patch
+++ b/patches/seqlock-prevent-rt-starvation.patch
@@ -22,9 +22,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/seqlock.h | 56 +++++++++++++++++++++++++++++++++++++-----------
- include/net/dst.h | 2 -
- include/net/neighbour.h | 4 +--
- 3 files changed, 47 insertions(+), 15 deletions(-)
+ include/net/neighbour.h | 6 ++---
+ 2 files changed, 47 insertions(+), 15 deletions(-)
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -157,17 +156,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irqrestore(&sl->lock, flags);
}
---- a/include/net/dst.h
-+++ b/include/net/dst.h
-@@ -446,7 +446,7 @@ static inline void dst_confirm(struct ds
- static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
- struct sk_buff *skb)
- {
-- const struct hh_cache *hh;
-+ struct hh_cache *hh;
-
- if (dst->pending_confirm) {
- unsigned long now = jiffies;
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -446,7 +446,7 @@ static inline int neigh_hh_bridge(struct
@@ -179,7 +167,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned int seq;
int hh_len;
-@@ -501,7 +501,7 @@ struct neighbour_cb {
+@@ -470,7 +470,7 @@ static inline int neigh_hh_output(const
+
+ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
+ {
+- const struct hh_cache *hh = &n->hh;
++ struct hh_cache *hh = &n->hh;
+
+ if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
+ return neigh_hh_output(hh, skb);
+@@ -511,7 +511,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
diff --git a/patches/series b/patches/series
index 601935aca5cc..790548344128 100644
--- a/patches/series
+++ b/patches/series
@@ -46,6 +46,77 @@ lockdep-Fix-per-cpu-static-objects.patch
0004-MAINTAINERS-Add-FUTEX-SUBSYSTEM.patch
futex-rt_mutex-Fix-rt_mutex_cleanup_proxy_lock.patch
+###
+# get_online_cpus() rework.
+# cpus_allowed queue from sched/core
+0001-ia64-topology-Remove-cpus_allowed-manipulation.patch
+0002-workqueue-Provide-work_on_cpu_safe.patch
+0003-ia64-salinfo-Replace-racy-task-affinity-logic.patch
+0004-ia64-sn-hwperf-Replace-racy-task-affinity-logic.patch
+0005-powerpc-smp-Replace-open-coded-task-affinity-logic.patch
+0006-sparc-sysfs-Replace-racy-task-affinity-logic.patch
+0007-ACPI-processor-Fix-error-handling-in-__acpi_processo.patch
+0008-ACPI-processor-Replace-racy-task-affinity-logic.patch
+0009-cpufreq-ia64-Replace-racy-task-affinity-logic.patch
+0010-cpufreq-sh-Replace-racy-task-affinity-logic.patch
+0011-cpufreq-sparc-us3-Replace-racy-task-affinity-logic.patch
+0012-cpufreq-sparc-us2e-Replace-racy-task-affinity-logic.patch
+0013-crypto-N2-Replace-racy-task-affinity-logic.patch
+
+# a few patches from tip's sched/core
+0001-sched-clock-Fix-early-boot-preempt-assumption-in-__s.patch
+0001-init-Pin-init-task-to-the-boot-CPU-initially.patch
+0002-arm-Adjust-system_state-check.patch
+0003-arm64-Adjust-system_state-check.patch
+0004-x86-smp-Adjust-system_state-check.patch
+0005-metag-Adjust-system_state-check.patch
+0006-powerpc-Adjust-system_state-check.patch
+0007-ACPI-Adjust-system_state-check.patch
+0008-mm-Adjust-system_state-check.patch
+0009-cpufreq-pasemi-Adjust-system_state-check.patch
+0010-iommu-vt-d-Adjust-system_state-checks.patch
+0012-async-Adjust-system_state-checks.patch
+0013-extable-Adjust-system_state-checks.patch
+0014-printk-Adjust-system_state-checks.patch
+0015-mm-vmscan-Adjust-system_state-checks.patch
+0016-init-Introduce-SYSTEM_SCHEDULING-state.patch
+0017-sched-core-Enable-might_sleep-and-smp_processor_id-c.patch
+
+# recursive get_online_cpus() invocations from smp/hotplug
+#0001-cpu-hotplug-Provide-cpus_read-write_-un-lock.patch
+#0002-cpu-hotplug-Provide-lockdep_assert_cpus_held.patch
+#0003-cpu-hotplug-Provide-cpuhp_setup-remove_state-_nocall.patch
+#0004-cpu-hotplug-Add-__cpuhp_state_add_instance_cpuslocke.patch
+#0005-stop_machine-Provide-stop_machine_cpuslocked.patch
+#0006-padata-Make-padata_alloc-static.patch
+#0007-padata-Avoid-nested-calls-to-cpus_read_lock-in-pcryp.patch
+#0008-x86-mtrr-Remove-get_online_cpus-from-mtrr_save_state.patch
+#0009-cpufreq-Use-cpuhp_setup_state_nocalls_cpuslocked.patch
+#0010-KVM-PPC-Book3S-HV-Use-cpuhp_setup_state_nocalls_cpus.patch
+#0011-hwtracing-coresight-etm3x-Use-cpuhp_setup_state_noca.patch
+#0012-hwtracing-coresight-etm4x-Use-cpuhp_setup_state_noca.patch
+#0013-perf-x86-intel-cqm-Use-cpuhp_setup_state_cpuslocked.patch
+#0014-ARM-hw_breakpoint-Use-cpuhp_setup_state_cpuslocked.patch
+#0015-s390-kernel-Use-stop_machine_cpuslocked.patch
+#0016-powerpc-powernv-Use-stop_machine_cpuslocked.patch
+#0017-cpu-hotplug-Use-stop_machine_cpuslocked-in-takedown_.patch
+#0018-x86-perf-Drop-EXPORT-of-perf_check_microcode.patch
+#0019-perf-x86-intel-Drop-get_online_cpus-in-intel_snb_che.patch
+#0020-PCI-Use-cpu_hotplug_disable-instead-of-get_online_cp.patch
+#0021-PCI-Replace-the-racy-recursion-prevention.patch
+#0022-ACPI-processor-Use-cpu_hotplug_disable-instead-of-ge.patch
+#0023-perf-tracing-cpuhotplug-Fix-locking-order.patch
+#0024-jump_label-Reorder-hotplug-lock-and-jump_label_lock.patch
+#0025-kprobes-Cure-hotplug-lock-ordering-issues.patch
+#0026-arm64-Prevent-cpu-hotplug-rwsem-recursion.patch
+#0027-arm-Prevent-hotplug-rwsem-recursion.patch
+#0028-s390-Prevent-hotplug-rwsem-recursion.patch
+#0029-cpu-hotplug-Convert-hotplug-locking-to-percpu-rwsem.patch
+#0030-sched-Provide-is_percpu_thread-helper.patch
+#0031-acpi-processor-Prevent-cpu-hotplug-deadlock.patch
+#0032-cpuhotplug-Link-lock-stacks-for-hotplug-callbacks.patch
+###
+
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -59,17 +130,11 @@ rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patc
# Stuff broken upstream, need to be sent
############################################################
rtmutex--Handle-non-enqueued-waiters-gracefully.patch
-fs-dcache-include-wait.h.patch
rbtree-include-rcu.h-because-we-use-it.patch
fs-dcache-init-in_lookup_hashtable.patch
iommu-iova-don-t-disable-preempt-around-this_cpu_ptr.patch
iommu-vt-d-don-t-disable-preemption-while-accessing-.patch
-x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
rxrpc-remove-unused-static-variables.patch
-rcu-update-make-RCU_EXPEDITE_BOOT-default.patch
-locking-percpu-rwsem-use-swait-for-the-wating-writer.patch
-pinctrl-qcom-Use-raw-spinlock-variants.patch
-x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -111,6 +176,9 @@ kernel-SRCU-provide-a-static-initializer.patch
############################################################
# Stuff which should go upstream ASAP
############################################################
+CPUFREQ-Loongson2-drop-set_cpus_allowed_ptr.patch
+kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+add_migrate_disable.patch
# SCHED BLOCK/WQ
block-shorten-interrupt-disabled-regions.patch
@@ -157,6 +225,8 @@ suspend-prevernt-might-sleep-splats.patch
# NETWORKING
net-prevent-abba-deadlock.patch
net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+net-core-remove-explicit-do_softirq-from-busy_poll_s.patch
+net_disable_NET_RX_BUSY_POLL.patch
# X86
x86-io-apic-migra-no-unmask.patch
@@ -165,12 +235,8 @@ x86-io-apic-migra-no-unmask.patch
# LOCKING INIT FIXES
-# PCI
-pci-access-use-__wake_up_all_locked.patch
-
# WORKQUEUE
-
#####################################################
# Stuff which should go mainline, but wants some care
#####################################################
@@ -210,7 +276,7 @@ local-irq-rt-depending-variants.patch
preempt-nort-rt-variants.patch
# local locks & migrate disable
-introduce_migrate_disable_cpu_light.patch
+#introduce_migrate_disable_cpu_light.patch
futex-workaround-migrate_disable-enable-in-different.patch
rt-local-irq-lock.patch
locallock-add-local_lock_on.patch
@@ -246,6 +312,9 @@ genirq-force-threading.patch
# DRIVERS NET
drivers-net-vortex-fix-locking-issues.patch
+# ACCT
+delayacct-use-raw_spinlocks.patch
+
# MM PAGE_ALLOC
mm-page_alloc-rt-friendly-per-cpu-pages.patch
mm-page_alloc-reduce-lock-sections-further.patch
@@ -320,6 +389,7 @@ stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
stop-machine-raw-lock.patch
# MIGRATE DISABLE AND PER CPU
+# XXX redo
hotplug-light-get-online-cpus.patch
hotplug-sync_unplug-no-27-5cn-27-in-task-name.patch
re-migrate_disable-race-with-cpu-hotplug-3f.patch
@@ -338,16 +408,12 @@ softirq-preempt-fix-3-re.patch
softirq-disable-softirq-stacks-for-rt.patch
softirq-split-locks.patch
kernel-softirq-unlock-with-irqs-on.patch
-kernel-migrate_disable-do-fastpath-in-atomic-irqs-of.patch
irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
softirq-split-timer-softirqs-out-of-ksoftirqd.patch
softirq-wake-the-timer-softirq-if-needed.patch
-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
-Revert-timers-Don-t-wake-ktimersoftd-on-every-tick.patch
rtmutex-trylock-is-okay-on-RT.patch
# compile fix due to rtmutex locks
-gpu_don_t_check_for_the_lock_owner.patch
fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
# FUTEX/RTMUTEX
@@ -365,7 +431,6 @@ spinlock-types-separate-raw.patch
rtmutex-avoid-include-hell.patch
rtmutex_dont_include_rcu.patch
rt-add-rt-locks.patch
-rt-drop_mutex_disable_on_not_debug.patch
rtmutex-add-a-first-shot-of-ww_mutex.patch
rtmutex-Provide-rt_mutex_lock_state.patch
rtmutex-Provide-locked-slowpath.patch
@@ -438,8 +503,7 @@ workqueue-use-locallock.patch
work-queue-work-around-irqsafe-timer-optimization.patch
workqueue-distangle-from-rq-lock.patch
-# IDR
-idr-use-local-lock-for-protection.patch
+# IDA
percpu_ida-use-locklocks.patch
# DEBUGOBJECTS
@@ -508,9 +572,6 @@ x86-highmem-add-a-already-used-pte-check.patch
arm-highmem-flush-tlb-on-unmap.patch
arm-enable-highmem-for-rt.patch
-# IPC
-ipc-sem-rework-semaphore-wakeups.patch
-
# SYSRQ
# KVM require constant freq TSC (smp function call -> cpufreq)
@@ -535,10 +596,13 @@ acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
cpumask-disable-offstack-on-rt.patch
# RANDOM
+Revert-random-invalidate-batched-entropy-after-crng-.patch
random-make-it-work-on-rt.patch
random-avoid-preempt_disable-ed-section.patch
+char-random-don-t-print-that-the-init-is-done.patch
# HOTPLUG
+# XXX
cpu-rt-make-hotplug-lock-a-sleeping-spinlock-on-rt.patch
cpu-rt-rework-cpu-down.patch
cpu-hotplug-Document-why-PREEMPT_RT-uses-a-spinlock.patch
@@ -546,7 +610,7 @@ kernel-cpu-fix-cpu-down-problem-if-kthread-s-cpu-is-.patch
kernel-hotplug-restore-original-cpu-mask-oncpu-down.patch
cpu_down_move_migrate_enable_back.patch
hotplug-Use-set_cpus_allowed_ptr-in-sync_unplug_thre.patch
-
+#
rt-locking-Reenable-migration-accross-schedule.patch
# SCSCI QLA2xxx
@@ -599,6 +663,7 @@ drm-i915-drop-trace_i915_gem_ring_dispatch-onrt.patch
i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+drm-i915-init-spinlock-properly-on-RT.patch
# CGROUPS
cgroups-use-simple-wait-in-css_release.patch
diff --git a/patches/signal-fix-up-rcu-wreckage.patch b/patches/signal-fix-up-rcu-wreckage.patch
index 71a57f38292a..7968da84a556 100644
--- a/patches/signal-fix-up-rcu-wreckage.patch
+++ b/patches/signal-fix-up-rcu-wreckage.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1276,12 +1276,12 @@ struct sighand_struct *__lock_task_sigha
+@@ -1287,12 +1287,12 @@ struct sighand_struct *__lock_task_sigha
* Disable interrupts early to avoid deadlocks.
* See rcu_read_unlock() comment header for details.
*/
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
}
/*
-@@ -1302,7 +1302,7 @@ struct sighand_struct *__lock_task_sigha
+@@ -1313,7 +1313,7 @@ struct sighand_struct *__lock_task_sigha
}
spin_unlock(&sighand->siglock);
rcu_read_unlock();
diff --git a/patches/signal-revert-ptrace-preempt-magic.patch b/patches/signal-revert-ptrace-preempt-magic.patch
index 7152ebca6ea5..0857b62353b1 100644
--- a/patches/signal-revert-ptrace-preempt-magic.patch
+++ b/patches/signal-revert-ptrace-preempt-magic.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -1846,15 +1846,7 @@ static void ptrace_stop(int exit_code, i
+@@ -1857,15 +1857,7 @@ static void ptrace_stop(int exit_code, i
if (gstop_done && ptrace_reparented(current))
do_notify_parent_cldstop(current, false, why);
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index b66f40b5ba31..6aa199801fd3 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -8,26 +8,27 @@ task struct.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/sched.h | 1
+ include/linux/sched.h | 2 +
include/linux/signal.h | 1
kernel/exit.c | 2 -
kernel/fork.c | 1
kernel/signal.c | 69 ++++++++++++++++++++++++++++++++++++++++++++++---
- 5 files changed, 69 insertions(+), 5 deletions(-)
+ 5 files changed, 70 insertions(+), 5 deletions(-)
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1689,6 +1689,7 @@ struct task_struct {
- /* signal handlers */
- struct signal_struct *signal;
- struct sighand_struct *sighand;
-+ struct sigqueue *sigqueue_cache;
-
- sigset_t blocked, real_blocked;
- sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+@@ -753,6 +753,8 @@ struct task_struct {
+ /* Signal handlers: */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
++ struct sigqueue *sigqueue_cache;
++
+ sigset_t blocked;
+ sigset_t real_blocked;
+ /* Restored if set_restore_sigmask() was used: */
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
-@@ -233,6 +233,7 @@ static inline void init_sigpending(struc
+@@ -231,6 +231,7 @@ static inline void init_sigpending(struc
}
extern void flush_sigqueue(struct sigpending *queue);
@@ -37,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int valid_signal(unsigned long sig)
--- a/kernel/exit.c
+++ b/kernel/exit.c
-@@ -143,7 +143,7 @@ static void __exit_signal(struct task_st
+@@ -159,7 +159,7 @@ static void __exit_signal(struct task_st
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
*/
@@ -48,25 +49,25 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1553,6 +1553,7 @@ static __latent_entropy struct task_stru
+@@ -1607,6 +1607,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
+ p->sigqueue_cache = NULL;
p->utime = p->stime = p->gtime = 0;
- p->utimescaled = p->stimescaled = 0;
+ #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
--- a/kernel/signal.c
+++ b/kernel/signal.c
-@@ -14,6 +14,7 @@
- #include <linux/export.h>
- #include <linux/init.h>
- #include <linux/sched.h>
+@@ -19,6 +19,7 @@
+ #include <linux/sched/task.h>
+ #include <linux/sched/task_stack.h>
+ #include <linux/sched/cputime.h>
+#include <linux/sched/rt.h>
#include <linux/fs.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
-@@ -352,13 +353,30 @@ static bool task_participate_group_stop(
+@@ -357,13 +358,30 @@ static bool task_participate_group_stop(
return false;
}
@@ -98,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
struct sigqueue *q = NULL;
struct user_struct *user;
-@@ -375,7 +393,10 @@ static struct sigqueue *
+@@ -380,7 +398,10 @@ static struct sigqueue *
if (override_rlimit ||
atomic_read(&user->sigpending) <=
task_rlimit(t, RLIMIT_SIGPENDING)) {
@@ -110,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
print_dropped_signal(sig);
}
-@@ -392,6 +413,13 @@ static struct sigqueue *
+@@ -397,6 +418,13 @@ static struct sigqueue *
return q;
}
@@ -124,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
-@@ -401,6 +429,21 @@ static void __sigqueue_free(struct sigqu
+@@ -406,6 +434,21 @@ static void __sigqueue_free(struct sigqu
kmem_cache_free(sigqueue_cachep, q);
}
@@ -146,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void flush_sigqueue(struct sigpending *queue)
{
struct sigqueue *q;
-@@ -414,6 +457,21 @@ void flush_sigqueue(struct sigpending *q
+@@ -419,6 +462,21 @@ void flush_sigqueue(struct sigpending *q
}
/*
@@ -168,7 +169,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Flush all pending signals for this kthread.
*/
void flush_signals(struct task_struct *t)
-@@ -525,7 +583,7 @@ static void collect_signal(int sig, stru
+@@ -532,7 +590,7 @@ static void collect_signal(int sig, stru
still_pending:
list_del_init(&first->list);
copy_siginfo(info, &first->info);
@@ -177,7 +178,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* Ok, it wasn't in the queue. This must be
-@@ -560,6 +618,8 @@ int dequeue_signal(struct task_struct *t
+@@ -567,6 +625,8 @@ int dequeue_signal(struct task_struct *t
{
int signr;
@@ -186,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We only dequeue private signals from ourselves, we don't let
* signalfd steal them
*/
-@@ -1485,7 +1545,8 @@ EXPORT_SYMBOL(kill_pid);
+@@ -1496,7 +1556,8 @@ EXPORT_SYMBOL(kill_pid);
*/
struct sigqueue *sigqueue_alloc(void)
{
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index b5ad49cc34c8..824c55abac37 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2855,6 +2855,7 @@ struct softnet_data {
+@@ -2767,6 +2767,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -284,6 +284,7 @@ struct sk_buff_head {
+@@ -285,6 +285,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(s
+@@ -1587,6 +1588,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4337,7 +4337,7 @@ static void flush_backlog(struct work_st
+@@ -4318,7 +4318,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4347,11 +4347,14 @@ static void flush_backlog(struct work_st
+@@ -4328,11 +4328,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -4876,7 +4879,9 @@ static int process_backlog(struct napi_s
+@@ -4866,7 +4869,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -4884,9 +4889,9 @@ static int process_backlog(struct napi_s
+@@ -4874,9 +4879,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5228,13 +5233,21 @@ static __latent_entropy void net_rx_acti
+@@ -5317,13 +5322,21 @@ static __latent_entropy void net_rx_acti
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2;
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8037,6 +8050,9 @@ static int dev_cpu_callback(struct notif
+@@ -8084,6 +8097,9 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -141,9 +141,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ kfree_skb(skb);
+ }
- return NOTIFY_OK;
+ return 0;
}
-@@ -8341,8 +8357,9 @@ static int __init net_dev_init(void)
+@@ -8387,8 +8403,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index 4cdb5a65c4b1..b785ad0c55bc 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1799,7 +1799,7 @@ config SLAB_FREELIST_RANDOM
+@@ -1865,7 +1865,7 @@ config SLAB_FREELIST_RANDOM
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index b4af19555631..e648f7fbf2d8 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1533,14 +1533,17 @@ static struct page *allocate_slab(struct
+@@ -1538,14 +1538,17 @@ static struct page *allocate_slab(struct
void *start, *p;
int idx, order;
bool shuffle;
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ if (gfpflags_allow_blocking(flags))
+ enableirqs = true;
#ifdef CONFIG_PREEMPT_RT_FULL
- if (system_state == SYSTEM_RUNNING)
+ if (system_state > SYSTEM_BOOTING)
-#else
- if (gfpflags_allow_blocking(flags))
+ enableirqs = true;
@@ -32,12 +32,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1615,11 +1618,7 @@ static struct page *allocate_slab(struct
+@@ -1620,11 +1623,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
-#ifdef CONFIG_PREEMPT_RT_FULL
-- if (system_state == SYSTEM_RUNNING)
+- if (system_state > SYSTEM_BOOTING)
-#else
- if (gfpflags_allow_blocking(flags))
-#endif
diff --git a/patches/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch b/patches/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
index bd96fad33758..92557121d3cd 100644
--- a/patches/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
+++ b/patches/snd-pcm-fix-snd_pcm_stream_lock-irqs_disabled-splats.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
-@@ -135,7 +135,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock)
+@@ -136,7 +136,7 @@ EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock)
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
if (!substream->pcm->nonatomic)
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
snd_pcm_stream_lock(substream);
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq);
-@@ -150,7 +150,7 @@ void snd_pcm_stream_unlock_irq(struct sn
+@@ -151,7 +151,7 @@ void snd_pcm_stream_unlock_irq(struct sn
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq);
-@@ -158,7 +158,7 @@ unsigned long _snd_pcm_stream_lock_irqsa
+@@ -159,7 +159,7 @@ unsigned long _snd_pcm_stream_lock_irqsa
{
unsigned long flags = 0;
if (!substream->pcm->nonatomic)
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
snd_pcm_stream_lock(substream);
return flags;
}
-@@ -176,7 +176,7 @@ void snd_pcm_stream_unlock_irqrestore(st
+@@ -177,7 +177,7 @@ void snd_pcm_stream_unlock_irqrestore(st
{
snd_pcm_stream_unlock(substream);
if (!substream->pcm->nonatomic)
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index 2ba3305f0625..c79b47661028 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -894,6 +894,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -892,6 +892,7 @@ EXPORT_SYMBOL(native_load_gs_index)
jmp 2b
.previous
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -906,6 +907,7 @@ ENTRY(do_softirq_own_stack)
+@@ -904,6 +905,7 @@ ENTRY(do_softirq_own_stack)
decl PER_CPU_VAR(irq_count)
ret
END(do_softirq_own_stack)
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -472,7 +472,7 @@ struct softirq_action
+@@ -484,7 +484,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 0af390d4852c..15a4ff830ee1 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
-@@ -51,6 +51,7 @@ static void trigger_softirq(void *data)
+@@ -52,6 +52,7 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -89,6 +90,7 @@ static int blk_softirq_cpu_dead(unsigned
+@@ -90,6 +91,7 @@ static int blk_softirq_cpu_dead(unsigned
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -141,6 +143,7 @@ void __blk_complete_request(struct reque
+@@ -142,6 +144,7 @@ void __blk_complete_request(struct reque
goto do_local;
local_irq_restore(flags);
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -160,8 +160,10 @@ do { \
+@@ -186,8 +186,10 @@ do { \
#ifdef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -59,14 +59,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -232,6 +234,7 @@ do { \
+@@ -274,6 +276,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
+#define preempt_check_resched_rt() barrier()
#define preemptible() 0
- #endif /* CONFIG_PREEMPT_COUNT */
+ #define migrate_disable() barrier()
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2285,6 +2285,7 @@ static void __netif_reschedule(struct Qd
+@@ -2403,6 +2403,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2366,6 +2367,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2465,6 +2466,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3785,6 +3787,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3772,6 +3774,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -4831,6 +4834,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4821,6 +4824,7 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Send pending IPI's to kick RPS processing on remote cpus. */
while (remsd) {
-@@ -4844,6 +4848,7 @@ static void net_rps_action_and_irq_enabl
+@@ -4834,6 +4838,7 @@ static void net_rps_action_and_irq_enabl
} else
#endif
local_irq_enable();
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -4921,6 +4926,7 @@ void __napi_schedule(struct napi_struct
+@@ -4911,6 +4916,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8022,6 +8028,7 @@ static int dev_cpu_callback(struct notif
+@@ -8069,6 +8075,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 9c2c45c2d969..c5ef4368c659 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -469,10 +469,11 @@ struct softirq_action
+@@ -481,10 +481,11 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
-@@ -480,6 +481,9 @@ static inline void do_softirq_own_stack(
+@@ -492,6 +493,9 @@ static inline void do_softirq_own_stack(
__do_softirq();
}
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -487,6 +491,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -499,6 +503,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -644,6 +649,12 @@ void tasklet_hrtimer_cancel(struct taskl
+@@ -656,6 +661,12 @@ void tasklet_hrtimer_cancel(struct taskl
tasklet_kill(&ttimer->tasklet);
}
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
-@@ -59,9 +63,15 @@
+@@ -80,9 +84,15 @@
#include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
@@ -162,36 +162,36 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Are we doing bottom half or hardware interrupt processing?
-@@ -72,7 +82,6 @@
+@@ -100,7 +110,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-
- /*
- * Are we in NMI context?
+ #define in_nmi() (preempt_count() & NMI_MASK)
+ #define in_task() (!(preempt_count() & \
+ (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1970,6 +1970,8 @@ struct task_struct {
+@@ -1055,6 +1055,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
- struct rcu_head put_rcu;
-+ int softirq_nestcnt;
-+ unsigned int softirqs_raised;
+ struct rcu_head put_rcu;
++ int softirq_nestcnt;
++ unsigned int softirqs_raised;
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
- unsigned long task_state_change;
-@@ -2286,6 +2288,7 @@ extern void thread_group_cputime_adjuste
+ unsigned long task_state_change;
+@@ -1227,6 +1229,7 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
-+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
- #define PF_EXITING 0x00000004 /* getting shut down */
- #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
- #define PF_VCPU 0x00000010 /* I'm a virtual CPU */
++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */
+ #define PF_IDLE 0x00000002 /* I am an IDLE thread */
+ #define PF_EXITING 0x00000004 /* Getting shut down */
+ #define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
--- a/init/main.c
+++ b/init/main.c
-@@ -507,6 +507,7 @@ asmlinkage __visible void __init start_k
+@@ -537,6 +537,7 @@ asmlinkage __visible void __init start_k
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -798,7 +798,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.thread_comm = "ksoftirqd/%u",
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -882,14 +882,7 @@ static bool can_stop_idle_tick(int cpu,
+@@ -881,14 +881,7 @@ static bool can_stop_idle_tick(int cpu,
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -816,7 +816,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3856,11 +3856,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -3844,11 +3844,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/softirq-wake-the-timer-softirq-if-needed.patch b/patches/softirq-wake-the-timer-softirq-if-needed.patch
index 12e9ba287cd4..0fefa35d95a5 100644
--- a/patches/softirq-wake-the-timer-softirq-if-needed.patch
+++ b/patches/softirq-wake-the-timer-softirq-if-needed.patch
@@ -24,12 +24,20 @@ Cc: stable-rt@vger.kernel.org
Signed-off-by: Mike Galbraith <efault@gmx.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/softirq.c | 10 ++++------
- 1 file changed, 4 insertions(+), 6 deletions(-)
+ kernel/softirq.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
-@@ -206,6 +206,7 @@ static void handle_softirq(unsigned int
+@@ -28,6 +28,7 @@
+ #include <linux/tick.h>
+ #include <linux/locallock.h>
+ #include <linux/irq.h>
++#include <linux/sched/types.h>
+
+ #define CREATE_TRACE_POINTS
+ #include <trace/events/irq.h>
+@@ -206,6 +207,7 @@ static void handle_softirq(unsigned int
}
}
@@ -37,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
* right now. Let ksoftirqd handle this at its own rate, to get fairness.
-@@ -217,7 +218,6 @@ static bool ksoftirqd_running(void)
+@@ -217,7 +219,6 @@ static bool ksoftirqd_running(void)
return tsk && (tsk->state == TASK_RUNNING);
}
@@ -45,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int ksoftirqd_softirq_pending(void)
{
return local_softirq_pending();
-@@ -773,13 +773,10 @@ void irq_enter(void)
+@@ -773,13 +774,10 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
@@ -61,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!force_irqthreads) {
#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
/*
-@@ -800,6 +797,7 @@ static inline void invoke_softirq(void)
+@@ -800,6 +798,7 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
#else /* PREEMPT_RT_FULL */
diff --git a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 519070ebf36b..7b7c1ca29164 100644
--- a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
-@@ -194,12 +194,10 @@ config NR_CPUS
+@@ -199,12 +199,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index 452a52a84303..069930c49634 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -488,6 +488,7 @@ extern enum system_states {
+@@ -499,6 +499,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TAINT_PROPRIETARY_MODULE 0
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
-@@ -286,6 +286,8 @@ static int create_image(int platform_mod
+@@ -287,6 +287,8 @@ static int create_image(int platform_mod
local_irq_disable();
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+
error = syscore_suspend();
if (error) {
- printk(KERN_ERR "PM: Some system devices failed to power down, "
+ pr_err("Some system devices failed to power down, aborting hibernation\n");
@@ -317,6 +319,7 @@ static int create_image(int platform_mod
syscore_resume();
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -446,6 +449,7 @@ static int resume_target_kernel(bool pla
+@@ -445,6 +448,7 @@ static int resume_target_kernel(bool pla
goto Enable_cpus;
local_irq_disable();
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (error)
-@@ -479,6 +483,7 @@ static int resume_target_kernel(bool pla
+@@ -478,6 +482,7 @@ static int resume_target_kernel(bool pla
syscore_resume();
Enable_irqs:
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
Enable_cpus:
-@@ -564,6 +569,7 @@ int hibernation_platform_enter(void)
+@@ -563,6 +568,7 @@ int hibernation_platform_enter(void)
goto Enable_cpus;
local_irq_disable();
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
syscore_suspend();
if (pm_wakeup_pending()) {
error = -EAGAIN;
-@@ -576,6 +582,7 @@ int hibernation_platform_enter(void)
+@@ -575,6 +581,7 @@ int hibernation_platform_enter(void)
Power_up:
syscore_resume();
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Enable_cpus:
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -369,6 +369,8 @@ static int suspend_enter(suspend_state_t
+@@ -384,6 +384,8 @@ static int suspend_enter(suspend_state_t
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -385,6 +387,8 @@ static int suspend_enter(suspend_state_t
+@@ -400,6 +402,8 @@ static int suspend_enter(suspend_state_t
syscore_resume();
}
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index 808d6cd4947e..6bc42d7508fc 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -508,8 +508,9 @@ static inline struct task_struct *this_c
+@@ -520,8 +520,9 @@ static inline struct task_struct *this_c
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -534,27 +535,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -546,27 +547,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -603,12 +613,7 @@ static inline void tasklet_disable(struc
+@@ -615,12 +625,7 @@ static inline void tasklet_disable(struc
smp_mb();
}
diff --git a/patches/thermal-Defer-thermal-wakups-to-threads.patch b/patches/thermal-Defer-thermal-wakups-to-threads.patch
index 8cc51be0c906..72faa3db2d20 100644
--- a/patches/thermal-Defer-thermal-wakups-to-threads.patch
+++ b/patches/thermal-Defer-thermal-wakups-to-threads.patch
@@ -23,8 +23,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
[bigeasy: reoder init/denit position. TODO: flush swork on exit]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/thermal/x86_pkg_temp_thermal.c | 50 +++++++++++++++++++++++++++++++--
- 1 file changed, 47 insertions(+), 3 deletions(-)
+ drivers/thermal/x86_pkg_temp_thermal.c | 52 +++++++++++++++++++++++++++++++--
+ 1 file changed, 49 insertions(+), 3 deletions(-)
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -36,34 +36,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/cpu_device_id.h>
#include <asm/mce.h>
-@@ -353,7 +354,7 @@ static void pkg_temp_thermal_threshold_w
- }
+@@ -329,7 +330,7 @@ static void pkg_thermal_schedule_work(in
+ schedule_delayed_work_on(cpu, work, ms);
}
--static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
-+static void platform_thermal_notify_work(struct swork_event *event)
+-static int pkg_thermal_notify(u64 msr_val)
++static void pkg_thermal_notify_work(struct swork_event *event)
{
- unsigned long flags;
int cpu = smp_processor_id();
-@@ -370,7 +371,7 @@ static int pkg_temp_thermal_platform_the
- pkg_work_scheduled[phy_id]) {
- disable_pkg_thres_interrupt();
- spin_unlock_irqrestore(&pkg_work_lock, flags);
-- return -EINVAL;
-+ return;
+ struct pkg_device *pkgdev;
+@@ -348,9 +349,47 @@ static int pkg_thermal_notify(u64 msr_va
}
- pkg_work_scheduled[phy_id] = 1;
- spin_unlock_irqrestore(&pkg_work_lock, flags);
-@@ -379,9 +380,48 @@ static int pkg_temp_thermal_platform_the
- schedule_delayed_work_on(cpu,
- &per_cpu(pkg_temp_thermal_threshold_work, cpu),
- msecs_to_jiffies(notify_delay_ms));
+
+ spin_unlock_irqrestore(&pkg_temp_lock, flags);
+}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static struct swork_event notify_work;
+
-+static int thermal_notify_work_init(void)
++static int pkg_thermal_notify_work_init(void)
+{
+ int err;
+
@@ -71,16 +62,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (err)
+ return err;
+
-+ INIT_SWORK(&notify_work, platform_thermal_notify_work);
++ INIT_SWORK(&notify_work, pkg_thermal_notify_work);
return 0;
}
-+static void thermal_notify_work_cleanup(void)
++static void pkg_thermal_notify_work_cleanup(void)
+{
+ swork_put();
+}
+
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++static int pkg_thermal_notify(u64 msr_val)
+{
+ swork_queue(&notify_work);
+ return 0;
@@ -88,45 +79,51 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+#else /* !CONFIG_PREEMPT_RT_FULL */
+
-+static int thermal_notify_work_init(void) { return 0; }
++static int pkg_thermal_notify_work_init(void) { return 0; }
+
-+static void thermal_notify_work_cleanup(void) { }
++static void pkg_thermal_notify_work_cleanup(void) { }
+
-+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
++static int pkg_thermal_notify(u64 msr_val)
+{
-+ platform_thermal_notify_work(NULL);
-+
++ pkg_thermal_notify_work(NULL);
+ return 0;
+}
+#endif /* CONFIG_PREEMPT_RT_FULL */
+
- static int find_siblings_cpu(int cpu)
+ static int pkg_temp_thermal_device_add(unsigned int cpu)
{
- int i;
-@@ -585,6 +625,9 @@ static int __init pkg_temp_thermal_init(
+ int pkgid = topology_logical_package_id(cpu);
+@@ -515,10 +554,15 @@ static int __init pkg_temp_thermal_init(
if (!x86_match_cpu(pkg_temp_thermal_ids))
return -ENODEV;
-+ if (!thermal_notify_work_init())
++ if (!pkg_thermal_notify_work_init())
+ return -ENODEV;
+
- spin_lock_init(&pkg_work_lock);
- platform_thermal_package_notify =
- pkg_temp_thermal_platform_thermal_notify;
-@@ -609,7 +652,7 @@ static int __init pkg_temp_thermal_init(
- kfree(pkg_work_scheduled);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
--
-+ thermal_notify_work_cleanup();
- return -ENODEV;
+ max_packages = topology_max_packages();
+ packages = kzalloc(max_packages * sizeof(struct pkg_device *), GFP_KERNEL);
+- if (!packages)
+- return -ENOMEM;
++ if (!packages) {
++ ret = -ENOMEM;
++ goto err;
++ }
+
+ ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "thermal/x86_pkg:online",
+ pkg_thermal_cpu_online, pkg_thermal_cpu_offline);
+@@ -536,6 +580,7 @@ static int __init pkg_temp_thermal_init(
+ return 0;
+
+ err:
++ pkg_thermal_notify_work_cleanup();
+ kfree(packages);
+ return ret;
+ }
+@@ -549,6 +594,7 @@ static void __exit pkg_temp_thermal_exit
+ cpuhp_remove_state(pkg_thermal_hp_state);
+ debugfs_remove_recursive(debugfs);
+ kfree(packages);
++ pkg_thermal_notify_work_cleanup();
}
+ module_exit(pkg_temp_thermal_exit)
-@@ -634,6 +677,7 @@ static void __exit pkg_temp_thermal_exit
- mutex_unlock(&phy_dev_list_mutex);
- platform_thermal_package_notify = NULL;
- platform_thermal_package_rate_control = NULL;
-+ thermal_notify_work_cleanup();
- for_each_online_cpu(i)
- cancel_delayed_work_sync(
- &per_cpu(pkg_temp_thermal_threshold_work, i));
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index 7c1f4f405ad6..651d5c104036 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
-@@ -62,7 +62,8 @@ static void tick_do_update_jiffies64(kti
+@@ -66,7 +66,8 @@ static void tick_do_update_jiffies64(kti
return;
/* Reevaluate with jiffies_lock held */
@@ -81,8 +81,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ write_seqcount_begin(&jiffies_seq);
delta = ktime_sub(now, last_jiffies_update);
- if (delta.tv64 >= tick_period.tv64) {
-@@ -85,10 +86,12 @@ static void tick_do_update_jiffies64(kti
+ if (delta >= tick_period) {
+@@ -89,10 +90,12 @@ static void tick_do_update_jiffies64(kti
/* Keep the tick_next_period variable up to date */
tick_next_period = ktime_add(last_jiffies_update, tick_period);
} else {
@@ -97,7 +97,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
update_wall_time();
}
-@@ -99,12 +102,14 @@ static ktime_t tick_init_jiffy_update(vo
+@@ -103,12 +106,14 @@ static ktime_t tick_init_jiffy_update(vo
{
ktime_t period;
@@ -105,7 +105,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ raw_spin_lock(&jiffies_lock);
+ write_seqcount_begin(&jiffies_seq);
/* Did we start the jiffies update yet ? */
- if (last_jiffies_update.tv64 == 0)
+ if (last_jiffies_update == 0)
last_jiffies_update = tick_next_period;
period = last_jiffies_update;
- write_sequnlock(&jiffies_lock);
@@ -114,13 +114,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return period;
}
-@@ -673,10 +678,10 @@ static ktime_t tick_nohz_stop_sched_tick
+@@ -672,10 +677,10 @@ static ktime_t tick_nohz_stop_sched_tick
/* Read jiffies and the time when jiffies were updated last */
do {
- seq = read_seqbegin(&jiffies_lock);
+ seq = read_seqcount_begin(&jiffies_seq);
- basemono = last_jiffies_update.tv64;
+ basemono = last_jiffies_update;
basejiff = jiffies;
- } while (read_seqretry(&jiffies_lock, seq));
+ } while (read_seqcount_retry(&jiffies_seq, seq));
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (rcu_needs_cpu(basemono, &next_rcu) ||
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
-@@ -2328,8 +2328,10 @@ EXPORT_SYMBOL(hardpps);
+@@ -2302,8 +2302,10 @@ EXPORT_SYMBOL(hardpps);
*/
void xtime_update(unsigned long ticks)
{
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
-@@ -19,7 +19,8 @@ extern void timekeeping_resume(void);
+@@ -17,7 +17,8 @@ extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
diff --git a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index ff005701de30..720089a460de 100644
--- a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1641,13 +1641,13 @@ void update_process_times(int user_tick)
+@@ -1601,13 +1601,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
@@ -70,6 +70,6 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
irq_work_tick();
#endif
- scheduler_tick();
- run_posix_cpu_timers(p);
+ if (IS_ENABLED(CONFIG_POSIX_TIMERS))
+ run_posix_cpu_timers(p);
}
-
diff --git a/patches/timer-hrtimer-check-properly-for-a-running-timer.patch b/patches/timer-hrtimer-check-properly-for-a-running-timer.patch
index ecd1f1ef833a..b94a0aa120d8 100644
--- a/patches/timer-hrtimer-check-properly-for-a-running-timer.patch
+++ b/patches/timer-hrtimer-check-properly-for-a-running-timer.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -455,7 +455,13 @@ static inline int hrtimer_is_queued(stru
+@@ -444,7 +444,13 @@ static inline int hrtimer_is_queued(stru
*/
static inline int hrtimer_callback_running(const struct hrtimer *timer)
{
diff --git a/patches/timer-make-the-base-lock-raw.patch b/patches/timer-make-the-base-lock-raw.patch
index e1b4d57816dc..f3f6f8800276 100644
--- a/patches/timer-make-the-base-lock-raw.patch
+++ b/patches/timer-make-the-base-lock-raw.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -193,7 +193,7 @@ EXPORT_SYMBOL(jiffies_64);
+@@ -195,7 +195,7 @@ EXPORT_SYMBOL(jiffies_64);
#endif
struct timer_base {
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct timer_list *running_timer;
unsigned long clk;
unsigned long next_expiry;
-@@ -948,10 +948,10 @@ static struct timer_base *lock_timer_bas
+@@ -913,10 +913,10 @@ static struct timer_base *lock_timer_bas
if (!(tf & TIMER_MIGRATING)) {
base = get_timer_base(tf);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
cpu_relax();
}
-@@ -1023,9 +1023,9 @@ static inline int
+@@ -986,9 +986,9 @@ static inline int
/* See the comment in lock_timer_base() */
timer->flags |= TIMER_MIGRATING;
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WRITE_ONCE(timer->flags,
(timer->flags & ~TIMER_BASEMASK) | base->cpu);
}
-@@ -1050,7 +1050,7 @@ static inline int
+@@ -1013,7 +1013,7 @@ static inline int
}
out_unlock:
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1144,16 +1144,16 @@ void add_timer_on(struct timer_list *tim
+@@ -1106,16 +1106,16 @@ void add_timer_on(struct timer_list *tim
if (base != new_base) {
timer->flags |= TIMER_MIGRATING;
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(add_timer_on);
-@@ -1180,7 +1180,7 @@ int del_timer(struct timer_list *timer)
+@@ -1141,7 +1141,7 @@ int del_timer(struct timer_list *timer)
if (timer_pending(timer)) {
base = lock_timer_base(timer, &flags);
ret = detach_if_pending(timer, base, true);
@@ -85,16 +85,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
return ret;
-@@ -1208,7 +1208,7 @@ int try_to_del_timer_sync(struct timer_l
- timer_stats_timer_clear_start_info(timer);
+@@ -1168,7 +1168,7 @@ int try_to_del_timer_sync(struct timer_l
+ if (base->running_timer != timer)
ret = detach_if_pending(timer, base, true);
- }
+
- spin_unlock_irqrestore(&base->lock, flags);
+ raw_spin_unlock_irqrestore(&base->lock, flags);
return ret;
}
-@@ -1340,13 +1340,13 @@ static void expire_timers(struct timer_b
+@@ -1299,13 +1299,13 @@ static void expire_timers(struct timer_b
data = timer->data;
if (timer->flags & TIMER_IRQSAFE) {
@@ -112,7 +112,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
}
-@@ -1515,7 +1515,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1474,7 +1474,7 @@ u64 get_next_timer_interrupt(unsigned lo
if (cpu_is_offline(smp_processor_id()))
return expires;
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
nextevt = __next_timer_interrupt(base);
is_max_delta = (nextevt == base->clk + NEXT_TIMER_MAX_DELTA);
base->next_expiry = nextevt;
-@@ -1543,7 +1543,7 @@ u64 get_next_timer_interrupt(unsigned lo
+@@ -1502,7 +1502,7 @@ u64 get_next_timer_interrupt(unsigned lo
if ((expires - basem) > TICK_NSEC)
base->is_idle = true;
}
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return cmp_next_hrtimer_event(basem, expires);
}
-@@ -1630,7 +1630,7 @@ static inline void __run_timers(struct t
+@@ -1590,7 +1590,7 @@ static inline void __run_timers(struct t
if (!time_after_eq(jiffies, base->clk))
return;
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while (time_after_eq(jiffies, base->clk)) {
-@@ -1641,7 +1641,7 @@ static inline void __run_timers(struct t
+@@ -1601,7 +1601,7 @@ static inline void __run_timers(struct t
expire_timers(base, heads + levels);
}
base->running_timer = NULL;
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1836,16 +1836,16 @@ int timers_dead_cpu(unsigned int cpu)
+@@ -1786,16 +1786,16 @@ int timers_dead_cpu(unsigned int cpu)
* The caller is globally serialized and nobody else
* takes two locks at once, deadlock is not possible.
*/
@@ -169,7 +169,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
put_cpu_ptr(&timer_bases);
}
return 0;
-@@ -1861,7 +1861,7 @@ static void __init init_timer_cpu(int cp
+@@ -1811,7 +1811,7 @@ static void __init init_timer_cpu(int cp
for (i = 0; i < NR_BASES; i++) {
base = per_cpu_ptr(&timer_bases[i], cpu);
base->cpu = cpu;
diff --git a/patches/timers-Don-t-wake-ktimersoftd-on-every-tick.patch b/patches/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
deleted file mode 100644
index 10d6478aa7dc..000000000000
--- a/patches/timers-Don-t-wake-ktimersoftd-on-every-tick.patch
+++ /dev/null
@@ -1,228 +0,0 @@
-From: Haris Okanovic <haris.okanovic@ni.com>
-Date: Fri, 3 Feb 2017 17:26:44 +0100
-Subject: [PATCH] timers: Don't wake ktimersoftd on every tick
-
-We recently upgraded from 4.1 to 4.6 and noticed a minor latency
-regression caused by an additional thread wakeup (ktimersoftd) in
-interrupt context on every tick. The wakeups are from
-run_local_timers() raising TIMER_SOFTIRQ. Both TIMER and SCHED softirq
-coalesced into one ksoftirqd wakeup prior to Sebastian's change to split
-timers into their own thread.
-
-There's already logic in run_local_timers() to avoid some unnecessary
-wakeups of ksoftirqd, but it doesn't seems to catch them all. In
-particular, I've seen many unnecessary wakeups when jiffies increments
-prior to run_local_timers().
-
-Change the way timers are collected per Julia and Thomas'
-recommendation: Expired timers are now collected in interrupt context
-and fired in ktimersoftd to avoid double-walk of `pending_map`.
-
-Collect expired timers in interrupt context to avoid overhead of waking
-ktimersoftd on every tick. ktimersoftd now wakes only when one or more
-timers are ready, which yields a minor reduction in small latency spikes.
-
-This is implemented by storing lists of expired timers in timer_base,
-updated on each tick. Any addition to the lists wakes ktimersoftd
-(softirq) to process those timers.
-
-Signed-off-by: Haris Okanovic <haris.okanovic@ni.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/timer.c | 96 ++++++++++++++++++++++++++++++++++++----------------
- 1 file changed, 67 insertions(+), 29 deletions(-)
-
---- a/kernel/time/timer.c
-+++ b/kernel/time/timer.c
-@@ -206,6 +206,8 @@ struct timer_base {
- bool is_idle;
- DECLARE_BITMAP(pending_map, WHEEL_SIZE);
- struct hlist_head vectors[WHEEL_SIZE];
-+ struct hlist_head expired_lists[LVL_DEPTH];
-+ int expired_count;
- } ____cacheline_aligned;
-
- static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
-@@ -1353,7 +1355,8 @@ static void call_timer_fn(struct timer_l
- }
- }
-
--static void expire_timers(struct timer_base *base, struct hlist_head *head)
-+static inline void __expire_timers(struct timer_base *base,
-+ struct hlist_head *head)
- {
- while (!hlist_empty(head)) {
- struct timer_list *timer;
-@@ -1384,21 +1387,38 @@ static void expire_timers(struct timer_b
- }
- }
-
--static int __collect_expired_timers(struct timer_base *base,
-- struct hlist_head *heads)
-+static void expire_timers(struct timer_base *base)
-+{
-+ struct hlist_head *head;
-+
-+ while (base->expired_count--) {
-+ head = base->expired_lists + base->expired_count;
-+ __expire_timers(base, head);
-+ }
-+ base->expired_count = 0;
-+}
-+
-+static void __collect_expired_timers(struct timer_base *base)
- {
- unsigned long clk = base->clk;
- struct hlist_head *vec;
-- int i, levels = 0;
-+ int i;
- unsigned int idx;
-
-+ /*
-+ * expire_timers() must be called at least once before we can
-+ * collect more timers
-+ */
-+ if (WARN_ON(base->expired_count))
-+ return;
-+
- for (i = 0; i < LVL_DEPTH; i++) {
- idx = (clk & LVL_MASK) + i * LVL_SIZE;
-
- if (__test_and_clear_bit(idx, base->pending_map)) {
- vec = base->vectors + idx;
-- hlist_move_list(vec, heads++);
-- levels++;
-+ hlist_move_list(vec,
-+ &base->expired_lists[base->expired_count++]);
- }
- /* Is it time to look at the next level? */
- if (clk & LVL_CLK_MASK)
-@@ -1406,7 +1426,6 @@ static int __collect_expired_timers(stru
- /* Shift clock for the next level granularity */
- clk >>= LVL_CLK_SHIFT;
- }
-- return levels;
- }
-
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -1599,8 +1618,7 @@ void timer_clear_idle(void)
- base->is_idle = false;
- }
-
--static int collect_expired_timers(struct timer_base *base,
-- struct hlist_head *heads)
-+static void collect_expired_timers(struct timer_base *base)
- {
- /*
- * NOHZ optimization. After a long idle sleep we need to forward the
-@@ -1617,20 +1635,49 @@ static int collect_expired_timers(struct
- if (time_after(next, jiffies)) {
- /* The call site will increment clock! */
- base->clk = jiffies - 1;
-- return 0;
-+ return;
- }
- base->clk = next;
- }
-- return __collect_expired_timers(base, heads);
-+ __collect_expired_timers(base);
- }
- #else
--static inline int collect_expired_timers(struct timer_base *base,
-- struct hlist_head *heads)
-+static inline void collect_expired_timers(struct timer_base *base)
- {
-- return __collect_expired_timers(base, heads);
-+ __collect_expired_timers(base);
- }
- #endif
-
-+static int find_expired_timers(struct timer_base *base)
-+{
-+ const unsigned long int end_clk = jiffies;
-+
-+ while (!base->expired_count && time_after_eq(end_clk, base->clk)) {
-+ collect_expired_timers(base);
-+ base->clk++;
-+ }
-+
-+ return base->expired_count;
-+}
-+
-+/* Called from CPU tick routine to quickly collect expired timers */
-+static int tick_find_expired(struct timer_base *base)
-+{
-+ int count;
-+
-+ raw_spin_lock(&base->lock);
-+
-+ if (unlikely(time_after(jiffies, base->clk + HZ))) {
-+ /* defer to ktimersoftd; don't spend too long in irq context */
-+ count = -1;
-+ } else
-+ count = find_expired_timers(base);
-+
-+ raw_spin_unlock(&base->lock);
-+
-+ return count;
-+}
-+
- /*
- * Called from the timer interrupt handler to charge one tick to the current
- * process. user_tick is 1 if the tick is user time, 0 for system.
-@@ -1657,22 +1704,11 @@ void update_process_times(int user_tick)
- */
- static inline void __run_timers(struct timer_base *base)
- {
-- struct hlist_head heads[LVL_DEPTH];
-- int levels;
--
-- if (!time_after_eq(jiffies, base->clk))
-- return;
--
- raw_spin_lock_irq(&base->lock);
-
-- while (time_after_eq(jiffies, base->clk)) {
--
-- levels = collect_expired_timers(base, heads);
-- base->clk++;
-+ while (find_expired_timers(base))
-+ expire_timers(base);
-
-- while (levels--)
-- expire_timers(base, heads + levels);
-- }
- raw_spin_unlock_irq(&base->lock);
- wakeup_timer_waiters(base);
- }
-@@ -1698,12 +1734,12 @@ void run_local_timers(void)
-
- hrtimer_run_queues();
- /* Raise the softirq only if required. */
-- if (time_before(jiffies, base->clk)) {
-+ if (time_before(jiffies, base->clk) || !tick_find_expired(base)) {
- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
- return;
- /* CPU is awake, so check the deferrable base. */
- base++;
-- if (time_before(jiffies, base->clk))
-+ if (time_before(jiffies, base->clk) || !tick_find_expired(base))
- return;
- }
- raise_softirq(TIMER_SOFTIRQ);
-@@ -1873,6 +1909,7 @@ int timers_dead_cpu(unsigned int cpu)
- raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
-
- BUG_ON(old_base->running_timer);
-+ BUG_ON(old_base->expired_count);
-
- for (i = 0; i < WHEEL_SIZE; i++)
- migrate_timer_list(new_base, old_base->vectors + i);
-@@ -1899,6 +1936,7 @@ static void __init init_timer_cpu(int cp
- #ifdef CONFIG_PREEMPT_RT_FULL
- init_swait_queue_head(&base->wait_for_running_timer);
- #endif
-+ base->expired_count = 0;
- }
- }
-
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 7d76c6af478f..31d72aebf301 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -12,12 +12,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/timer.h | 2 +-
kernel/sched/core.c | 9 +++++++--
- kernel/time/timer.c | 44 ++++++++++++++++++++++++++++++++++++++++----
- 3 files changed, 48 insertions(+), 7 deletions(-)
+ kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 49 insertions(+), 7 deletions(-)
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
-@@ -241,7 +241,7 @@ extern void add_timer(struct timer_list
+@@ -198,7 +198,7 @@ extern void add_timer(struct timer_list
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -525,11 +525,14 @@ void resched_cpu(int cpu)
+@@ -532,11 +532,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -548,6 +551,8 @@ int get_nohz_timer_target(void)
+@@ -555,6 +558,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu();
unlock:
rcu_read_unlock();
@@ -53,10 +53,18 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ preempt_enable_rt();
return cpu;
}
- /*
+
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -195,6 +195,9 @@ EXPORT_SYMBOL(jiffies_64);
+@@ -44,6 +44,7 @@
+ #include <linux/sched/debug.h>
+ #include <linux/slab.h>
+ #include <linux/compat.h>
++#include <linux/swait.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/unistd.h>
+@@ -197,6 +198,9 @@ EXPORT_SYMBOL(jiffies_64);
struct timer_base {
raw_spinlock_t lock;
struct timer_list *running_timer;
@@ -66,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
-@@ -1157,6 +1160,33 @@ void add_timer_on(struct timer_list *tim
+@@ -1119,6 +1123,33 @@ void add_timer_on(struct timer_list *tim
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -100,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer - deactive a timer.
* @timer: the timer to be deactivated
-@@ -1214,7 +1244,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1174,7 +1205,7 @@ int try_to_del_timer_sync(struct timer_l
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -109,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1274,7 +1304,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1234,7 +1265,7 @@ int del_timer_sync(struct timer_list *ti
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -118,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1339,13 +1369,16 @@ static void expire_timers(struct timer_b
+@@ -1298,13 +1329,16 @@ static void expire_timers(struct timer_b
fn = timer->function;
data = timer->data;
@@ -136,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock_irq(&base->lock);
}
}
-@@ -1640,8 +1673,8 @@ static inline void __run_timers(struct t
+@@ -1600,8 +1634,8 @@ static inline void __run_timers(struct t
while (levels--)
expire_timers(base, heads + levels);
}
@@ -146,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1863,6 +1896,9 @@ static void __init init_timer_cpu(int cp
+@@ -1813,6 +1847,9 @@ static void __init init_timer_cpu(int cp
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
diff --git a/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch b/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
index 917920b68942..25f84301be6f 100644
--- a/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
+++ b/patches/trace_Use_rcuidle_version_for_preemptoff_hist_trace_point.patch
@@ -54,7 +54,7 @@ in 4.4-rt. It looks such fix is still needed.
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
-@@ -425,13 +425,13 @@ void start_critical_timings(void)
+@@ -437,13 +437,13 @@ void start_critical_timings(void)
{
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
@@ -70,7 +70,7 @@ in 4.4-rt. It looks such fix is still needed.
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
-@@ -441,7 +441,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
+@@ -453,7 +453,7 @@ EXPORT_SYMBOL_GPL(stop_critical_timings)
#ifdef CONFIG_PROVE_LOCKING
void time_hardirqs_on(unsigned long a0, unsigned long a1)
{
@@ -79,7 +79,7 @@ in 4.4-rt. It looks such fix is still needed.
if (!preempt_trace() && irq_trace())
stop_critical_timing(a0, a1);
}
-@@ -450,7 +450,7 @@ void time_hardirqs_off(unsigned long a0,
+@@ -462,7 +462,7 @@ void time_hardirqs_off(unsigned long a0,
{
if (!preempt_trace() && irq_trace())
start_critical_timing(a0, a1);
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index 9cc659993bb1..ed02d08e40bf 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3583,7 +3583,16 @@ asmlinkage __visible void __sched notrac
+@@ -3654,7 +3654,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 977fcb34a722..609e361caa6c 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3144,10 +3144,8 @@ void serial8250_console_write(struct uar
+@@ -3179,10 +3179,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index c17d9780043f..36dced060c05 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3811,7 +3811,7 @@ static int netif_rx_internal(struct sk_b
+@@ -3798,7 +3798,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3821,13 +3821,13 @@ static int netif_rx_internal(struct sk_b
+@@ -3808,14 +3808,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
@@ -56,6 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
{
unsigned int qtail;
+
- ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
- put_cpu();
+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail);
diff --git a/patches/user-use-local-irq-nort.patch b/patches/user-use-local-irq-nort.patch
index 3150b0cce30b..70bc0c5836bb 100644
--- a/patches/user-use-local-irq-nort.patch
+++ b/patches/user-use-local-irq-nort.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/user.c
+++ b/kernel/user.c
-@@ -161,11 +161,11 @@ void free_uid(struct user_struct *up)
+@@ -162,11 +162,11 @@ void free_uid(struct user_struct *up)
if (!up)
return;
diff --git a/patches/wait.h-include-atomic.h.patch b/patches/wait.h-include-atomic.h.patch
index 9274a288c623..147eb733c945 100644
--- a/patches/wait.h-include-atomic.h.patch
+++ b/patches/wait.h-include-atomic.h.patch
@@ -22,8 +22,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
-@@ -8,6 +8,7 @@
- #include <linux/spinlock.h>
+@@ -9,6 +9,7 @@
+
#include <asm/current.h>
#include <uapi/linux/wait.h>
+#include <linux/atomic.h>
diff --git a/patches/work-queue-work-around-irqsafe-timer-optimization.patch b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
index 3d91be226c5f..ab1a5a18b7b1 100644
--- a/patches/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "workqueue_internal.h"
-@@ -1279,7 +1280,7 @@ static int try_to_grab_pending(struct wo
+@@ -1281,7 +1282,7 @@ static int try_to_grab_pending(struct wo
local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
diff --git a/patches/work-simple-Simple-work-queue-implemenation.patch b/patches/work-simple-Simple-work-queue-implemenation.patch
index f502c2ec0020..3ca550e0228e 100644
--- a/patches/work-simple-Simple-work-queue-implemenation.patch
+++ b/patches/work-simple-Simple-work-queue-implemenation.patch
@@ -50,8 +50,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
-obj-y += wait.o swait.o completion.o idle.o
+obj-y += wait.o swait.o swork.o completion.o idle.o
- obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
- obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
+ obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o
+ obj-$(CONFIG_SCHED_AUTOGROUP) += autogroup.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
--- /dev/null
+++ b/kernel/sched/swork.c
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index ad9e7ee2ba5e..ec7be2fdb22b 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -24,25 +24,25 @@ Link: http://lkml.kernel.org/r/20110622174919.135236139@linutronix.de
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/sched/core.c | 81 ++++++++------------------------------------
- kernel/workqueue.c | 52 ++++++++++++----------------
+ kernel/sched/core.c | 86 +++++++-------------------------------------
+ kernel/workqueue.c | 52 +++++++++++---------------
kernel/workqueue_internal.h | 5 +-
- 3 files changed, 41 insertions(+), 97 deletions(-)
+ 3 files changed, 41 insertions(+), 102 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1711,10 +1711,6 @@ static inline void ttwu_activate(struct
+@@ -1690,10 +1690,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
-
-- /* if a worker is waking up, notify workqueue */
+- /* If a worker is waking up, notify the workqueue: */
- if (p->flags & PF_WQ_WORKER)
- wq_worker_waking_up(p, cpu_of(rq));
}
/*
-@@ -2152,53 +2148,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2146,58 +2142,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * ensure that this_rq() is locked, @p is bound to this_rq() and not
- * the current task.
- */
--static void try_to_wake_up_local(struct task_struct *p, struct pin_cookie cookie)
+-static void try_to_wake_up_local(struct task_struct *p, struct rq_flags *rf)
-{
- struct rq *rq = task_rq(p);
-
@@ -71,11 +71,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * disabled avoiding further scheduler activity on it and we've
- * not yet picked a replacement task.
- */
-- lockdep_unpin_lock(&rq->lock, cookie);
+- rq_unpin_lock(rq, rf);
- raw_spin_unlock(&rq->lock);
- raw_spin_lock(&p->pi_lock);
- raw_spin_lock(&rq->lock);
-- lockdep_repin_lock(&rq->lock, cookie);
+- rq_repin_lock(rq, rf);
- }
-
- if (!(p->state & TASK_NORMAL))
@@ -83,10 +83,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
- trace_sched_waking(p);
-
-- if (!task_on_rq_queued(p))
+- if (!task_on_rq_queued(p)) {
+- if (p->in_iowait) {
+- delayacct_blkio_end();
+- atomic_dec(&rq->nr_iowait);
+- }
- ttwu_activate(rq, p, ENQUEUE_WAKEUP);
+- }
-
-- ttwu_do_wakeup(rq, p, 0, cookie);
+- ttwu_do_wakeup(rq, p, 0, rf);
- ttwu_stat(p, smp_processor_id(), 0);
-out:
- raw_spin_unlock(&p->pi_lock);
@@ -96,10 +101,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3494,21 +3443,6 @@ static void __sched notrace __schedule(b
- } else {
- deactivate_task(rq, prev, DEQUEUE_SLEEP);
- prev->on_rq = 0;
+@@ -3485,21 +3429,6 @@ static void __sched notrace __schedule(b
+ atomic_inc(&rq->nr_iowait);
+ delayacct_blkio_start();
+ }
-
- /*
- * If a worker went to sleep, notify and ask workqueue
@@ -113,12 +118,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-
- to_wakeup = wq_worker_sleeping(prev);
- if (to_wakeup)
-- try_to_wake_up_local(to_wakeup, cookie);
+- try_to_wake_up_local(to_wakeup, &rf);
- }
}
switch_count = &prev->nvcsw;
}
-@@ -3567,6 +3501,14 @@ static inline void sched_submit_work(str
+@@ -3564,6 +3493,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -133,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3575,6 +3517,12 @@ static inline void sched_submit_work(str
+@@ -3572,6 +3509,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -146,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3585,6 +3533,7 @@ asmlinkage __visible void __sched schedu
+@@ -3582,6 +3525,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -156,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -841,43 +841,32 @@ static void wake_up_worker(struct worker
+@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker
}
/**
@@ -211,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -886,13 +875,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -231,7 +236,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The counterpart of the following dec_and_test, implied mb,
-@@ -906,9 +897,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index a8e412ee6b2d..7b7d866735e3 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -79,7 +79,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
* A: pool->attach_mutex protected.
*
* PL: wq_pool_mutex protected.
-@@ -428,6 +433,31 @@ static void workqueue_sysfs_unregister(s
+@@ -430,6 +435,31 @@ static void workqueue_sysfs_unregister(s
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -111,7 +111,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -834,10 +864,16 @@ static struct worker *first_idle_worker(
+@@ -836,10 +866,16 @@ static struct worker *first_idle_worker(
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -129,7 +129,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -866,7 +902,7 @@ void wq_worker_running(struct task_struc
+@@ -868,7 +904,7 @@ void wq_worker_running(struct task_struc
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -138,7 +138,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
struct worker_pool *pool;
/*
-@@ -883,26 +919,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -885,26 +921,18 @@ void wq_worker_sleeping(struct task_stru
return;
worker->sleeping = 1;
@@ -168,7 +168,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -1631,7 +1659,9 @@ static void worker_enter_idle(struct wor
+@@ -1635,7 +1663,9 @@ static void worker_enter_idle(struct wor
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -178,7 +178,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1664,7 +1694,9 @@ static void worker_leave_idle(struct wor
+@@ -1668,7 +1698,9 @@ static void worker_leave_idle(struct wor
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -188,7 +188,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
static struct worker *alloc_worker(int node)
-@@ -1830,7 +1862,9 @@ static void destroy_worker(struct worker
+@@ -1834,7 +1866,9 @@ static void destroy_worker(struct worker
pool->nr_workers--;
pool->nr_idle--;
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index ffc72b923722..8c798dc53dee 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -7,8 +7,8 @@ semantic of irq-off in regard to the pool->lock and remain preemptible.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- kernel/workqueue.c | 33 +++++++++++++++++++--------------
- 1 file changed, 19 insertions(+), 14 deletions(-)
+ kernel/workqueue.c | 36 ++++++++++++++++++++++--------------
+ 1 file changed, 22 insertions(+), 14 deletions(-)
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "workqueue_internal.h"
-@@ -348,6 +349,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
+@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -1101,9 +1104,11 @@ static void put_pwq_unlocked(struct pool
+@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1207,7 +1212,7 @@ static int try_to_grab_pending(struct wo
+@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct wo
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1271,7 +1276,7 @@ static int try_to_grab_pending(struct wo
+@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct wo
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1376,7 +1381,7 @@ static void __queue_work(int cpu, struct
+@@ -1378,7 +1383,7 @@ static void __queue_work(int cpu, struct
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_work_activate(work);
-@@ -1482,14 +1487,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1484,14 +1489,14 @@ bool queue_work_on(int cpu, struct workq
bool ret = false;
unsigned long flags;
@@ -87,7 +87,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1556,14 +1561,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1500,8 +1505,11 @@ void delayed_work_timer_fn(unsigned long
+ {
+ struct delayed_work *dwork = (struct delayed_work *)__data;
+
++ /* XXX */
++ /* local_lock(pendingb_lock); */
+ /* should have been called from irqsafe timer with irq already off */
+ __queue_work(dwork->cpu, dwork->wq, &dwork->work);
++ /* local_unlock(pendingb_lock); */
+ }
+ EXPORT_SYMBOL(delayed_work_timer_fn);
+
+@@ -1557,14 +1565,14 @@ bool queue_delayed_work_on(int cpu, stru
unsigned long flags;
/* read the comment in __queue_work() */
@@ -104,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1598,7 +1603,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1599,7 +1607,7 @@ bool mod_delayed_work_on(int cpu, struct
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -113,16 +125,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2916,7 +2921,7 @@ static bool __cancel_work_timer(struct w
+@@ -2923,7 +2931,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
- local_irq_restore(flags);
+ local_unlock_irqrestore(pendingb_lock, flags);
- flush_work(work);
- clear_work_data(work);
-@@ -2971,10 +2976,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+ /*
+ * This allows canceling during early boot. We know that @work
+@@ -2984,10 +2992,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -135,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2992,7 +2997,7 @@ static bool __cancel_work(struct work_st
+@@ -3005,7 +3013,7 @@ static bool __cancel_work(struct work_st
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index 5bd35b655823..0e4370c589b2 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -355,20 +355,20 @@ static void workqueue_sysfs_unregister(s
+@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(s
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -380,7 +380,7 @@ static void workqueue_sysfs_unregister(s
+@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(s
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -412,7 +412,7 @@ static void workqueue_sysfs_unregister(s
+@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(s
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -548,7 +548,7 @@ static int worker_pool_assign_id(struct
+@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct
* @wq: the target workqueue
* @node: the node ID
*
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -692,8 +692,8 @@ static struct pool_workqueue *get_work_p
+@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_p
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1098,7 +1098,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool
{
if (pwq) {
/*
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1226,6 +1226,7 @@ static int try_to_grab_pending(struct wo
+@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct wo
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1264,10 +1265,12 @@ static int try_to_grab_pending(struct wo
+@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct wo
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1381,6 +1384,7 @@ static void __queue_work(int cpu, struct
+@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -153,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1437,10 +1441,8 @@ static void __queue_work(int cpu, struct
+@@ -1439,10 +1443,8 @@ static void __queue_work(int cpu, struct
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1458,7 +1460,9 @@ static void __queue_work(int cpu, struct
+@@ -1460,7 +1462,9 @@ static void __queue_work(int cpu, struct
insert_work(pwq, work, worklist, work_flags);
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -2785,14 +2789,14 @@ static bool start_flush_work(struct work
+@@ -2789,14 +2793,14 @@ static bool start_flush_work(struct work
might_sleep();
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2821,10 +2825,11 @@ static bool start_flush_work(struct work
+@@ -2825,10 +2829,11 @@ static bool start_flush_work(struct work
else
lock_map_acquire_read(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
@@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3245,7 +3250,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3258,7 +3263,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3299,8 +3304,8 @@ static void put_unbound_pool(struct work
+@@ -3312,8 +3317,8 @@ static void put_unbound_pool(struct work
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3407,14 +3412,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3420,14 +3425,14 @@ static void pwq_unbound_release_workfn(s
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -244,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -4064,7 +4069,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4081,7 +4086,7 @@ void destroy_workqueue(struct workqueue_
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4157,7 +4162,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4174,7 +4179,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4168,7 +4174,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4185,7 +4191,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4194,15 +4201,15 @@ unsigned int work_busy(struct work_struc
+@@ -4211,15 +4218,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4391,7 +4398,7 @@ void show_workqueue_state(void)
+@@ -4408,7 +4415,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4444,7 +4451,7 @@ void show_workqueue_state(void)
+@@ -4461,7 +4468,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4782,16 +4789,16 @@ bool freeze_workqueues_busy(void)
+@@ -4822,16 +4829,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -4981,7 +4988,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5021,7 +5028,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -4989,7 +4997,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5029,7 +5037,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-UV-raw_spinlock-conversion.patch b/patches/x86-UV-raw_spinlock-conversion.patch
index 60e206e9c1e4..fe5fe849833f 100644
--- a/patches/x86-UV-raw_spinlock-conversion.patch
+++ b/patches/x86-UV-raw_spinlock-conversion.patch
@@ -10,8 +10,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
arch/x86/include/asm/uv/uv_bau.h | 14 +++++++-------
arch/x86/platform/uv/tlb_uv.c | 26 +++++++++++++-------------
- arch/x86/platform/uv/uv_time.c | 21 +++++++++++++--------
- 3 files changed, 33 insertions(+), 28 deletions(-)
+ arch/x86/platform/uv/uv_time.c | 20 ++++++++++++--------
+ 3 files changed, 32 insertions(+), 28 deletions(-)
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
-@@ -748,9 +748,9 @@ static void destination_plugged(struct b
+@@ -747,9 +747,9 @@ static void destination_plugged(struct b
quiesce_local_uvhub(hmaster);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
end_uvhub_quiesce(hmaster);
-@@ -770,9 +770,9 @@ static void destination_timeout(struct b
+@@ -769,9 +769,9 @@ static void destination_timeout(struct b
quiesce_local_uvhub(hmaster);
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
end_uvhub_quiesce(hmaster);
-@@ -793,7 +793,7 @@ static void disable_for_period(struct ba
+@@ -792,7 +792,7 @@ static void disable_for_period(struct ba
cycles_t tm1;
hmaster = bcp->uvhub_master;
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!bcp->baudisabled) {
stat->s_bau_disabled++;
tm1 = get_cycles();
-@@ -806,7 +806,7 @@ static void disable_for_period(struct ba
+@@ -805,7 +805,7 @@ static void disable_for_period(struct ba
}
}
}
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void count_max_concurr(int stat, struct bau_control *bcp,
-@@ -869,7 +869,7 @@ static void record_send_stats(cycles_t t
+@@ -868,7 +868,7 @@ static void record_send_stats(cycles_t t
*/
static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
{
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_t *v;
v = &hmaster->active_descriptor_count;
-@@ -1002,7 +1002,7 @@ static int check_enable(struct bau_contr
+@@ -1001,7 +1001,7 @@ static int check_enable(struct bau_contr
struct bau_control *hmaster;
hmaster = bcp->uvhub_master;
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
stat->s_bau_reenabled++;
for_each_present_cpu(tcpu) {
-@@ -1014,10 +1014,10 @@ static int check_enable(struct bau_contr
+@@ -1013,10 +1013,10 @@ static int check_enable(struct bau_contr
tbcp->period_giveups = 0;
}
}
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -1;
}
-@@ -1940,9 +1940,9 @@ static void __init init_per_cpu_tunables
+@@ -1938,9 +1938,9 @@ static void __init init_per_cpu_tunables
bcp->cong_reps = congested_reps;
bcp->disabled_period = sec_2_cycles(disabled_period);
bcp->giveup_limit = giveup_limit;
@@ -198,11 +198,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rc;
}
-@@ -299,13 +299,18 @@ static int uv_rtc_unset_timer(int cpu, i
- static cycle_t uv_read_rtc(struct clocksource *cs)
+@@ -299,13 +299,17 @@ static int uv_rtc_unset_timer(int cpu, i
+ static u64 uv_read_rtc(struct clocksource *cs)
{
unsigned long offset;
-+ cycle_t cycles;
++ u64 cycles;
+ preempt_disable();
if (uv_get_min_hub_revision_id() == 1)
@@ -210,10 +210,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
else
offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
-- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
-+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+- return (u64)uv_read_local_mmr(UVH_RTC | offset);
++ cycles = (u64)uv_read_local_mmr(UVH_RTC | offset);
+ preempt_enable();
-+
+ return cycles;
}
diff --git a/patches/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch b/patches/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
deleted file mode 100644
index b86f9c939a67..000000000000
--- a/patches/x86-apic-get-rid-of-warning-acpi_ioapic_lock-defined.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Fri, 21 Oct 2016 10:29:11 +0200
-Subject: [PATCH] x86/apic: get rid of "warning: 'acpi_ioapic_lock' defined but
- not used"
-
-kbuild test robot reported this against the -RT tree:
-
-| In file included from include/linux/mutex.h:30:0,
-| from include/linux/notifier.h:13,
-| from include/linux/memory_hotplug.h:6,
-| from include/linux/mmzone.h:777,
-| from include/linux/gfp.h:5,
-| from include/linux/slab.h:14,
-| from include/linux/resource_ext.h:19,
-| from include/linux/acpi.h:26,
-| from arch/x86/kernel/acpi/boot.c:27:
-|>> arch/x86/kernel/acpi/boot.c:90:21: warning: 'acpi_ioapic_lock' defined but not used [-Wunused-variable]
-| static DEFINE_MUTEX(acpi_ioapic_lock);
-| ^
-| include/linux/mutex_rt.h:27:15: note: in definition of macro 'DEFINE_MUTEX'
-| struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
- ^~~~~~~~~
-which is also true (as in non-used) for !RT but the compiler does not
-emit a warning.
-
-Reported-by: kbuild test robot <fengguang.wu@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/acpi/boot.c | 2 ++
- 1 file changed, 2 insertions(+)
-
---- a/arch/x86/kernel/acpi/boot.c
-+++ b/arch/x86/kernel/acpi/boot.c
-@@ -87,7 +87,9 @@ static u64 acpi_lapic_addr __initdata =
- * ->ioapic_mutex
- * ->ioapic_lock
- */
-+#ifdef CONFIG_X86_IO_APIC
- static DEFINE_MUTEX(acpi_ioapic_lock);
-+#endif
-
- /* --------------------------------------------------------------------------
- Boot-time Configuration
diff --git a/patches/x86-crypto-reduce-preempt-disabled-regions.patch b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
index edbaaa56adea..3bade762e020 100644
--- a/patches/x86-crypto-reduce-preempt-disabled-regions.patch
+++ b/patches/x86-crypto-reduce-preempt-disabled-regions.patch
@@ -13,32 +13,31 @@ Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- arch/x86/crypto/aesni-intel_glue.c | 24 +++++++++++++-----------
- 1 file changed, 13 insertions(+), 11 deletions(-)
+ arch/x86/crypto/aesni-intel_glue.c | 22 ++++++++++++----------
+ 1 file changed, 12 insertions(+), 10 deletions(-)
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
-@@ -372,14 +372,14 @@ static int ecb_encrypt(struct blkcipher_
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -374,14 +374,14 @@ static int ecb_encrypt(struct skcipher_r
+
+ err = skcipher_walk_virt(&walk, req, true);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
+ kernel_fpu_begin();
aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-- nbytes & AES_BLOCK_MASK);
-+ nbytes & AES_BLOCK_MASK);
+ nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
-@@ -396,14 +396,14 @@ static int ecb_decrypt(struct blkcipher_
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -396,14 +396,14 @@ static int ecb_decrypt(struct skcipher_r
+
+ err = skcipher_walk_virt(&walk, req, true);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -47,15 +46,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nbytes & AES_BLOCK_MASK);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
-@@ -420,14 +420,14 @@ static int cbc_encrypt(struct blkcipher_
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -418,14 +418,14 @@ static int cbc_encrypt(struct skcipher_r
+
+ err = skcipher_walk_virt(&walk, req, true);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -64,15 +63,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
-@@ -444,14 +444,14 @@ static int cbc_decrypt(struct blkcipher_
- err = blkcipher_walk_virt(desc, &walk);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -440,14 +440,14 @@ static int cbc_decrypt(struct skcipher_r
+
+ err = skcipher_walk_virt(&walk, req, true);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes)) {
@@ -81,15 +80,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
- kernel_fpu_end();
return err;
}
-@@ -503,18 +503,20 @@ static int ctr_crypt(struct blkcipher_de
- err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
+@@ -497,18 +497,20 @@ static int ctr_crypt(struct skcipher_req
+
+ err = skcipher_walk_virt(&walk, req, true);
- kernel_fpu_begin();
while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
@@ -98,13 +97,13 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
nbytes & AES_BLOCK_MASK, walk.iv);
+ kernel_fpu_end();
nbytes &= AES_BLOCK_SIZE - 1;
- err = blkcipher_walk_done(desc, &walk, nbytes);
+ err = skcipher_walk_done(&walk, nbytes);
}
if (walk.nbytes) {
+ kernel_fpu_begin();
ctr_crypt_final(ctx, &walk);
+ kernel_fpu_end();
- err = blkcipher_walk_done(desc, &walk, 0);
+ err = skcipher_walk_done(&walk, 0);
}
- kernel_fpu_end();
diff --git a/patches/x86-io-apic-migra-no-unmask.patch b/patches/x86-io-apic-migra-no-unmask.patch
index c5605fd0fda6..2d8afd851adf 100644
--- a/patches/x86-io-apic-migra-no-unmask.patch
+++ b/patches/x86-io-apic-migra-no-unmask.patch
@@ -15,7 +15,7 @@ xXx
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1712,7 +1712,8 @@ static bool io_apic_level_ack_pending(st
+@@ -1711,7 +1711,8 @@ static bool io_apic_level_ack_pending(st
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 1c55c606fcaa..255b5e1181bd 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -5958,6 +5958,13 @@ int kvm_arch_init(void *opaque)
+@@ -6105,6 +6105,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-timer-hrtimer.patch b/patches/x86-mce-timer-hrtimer.patch
index 962744968ee7..6002f4f1dd06 100644
--- a/patches/x86-mce-timer-hrtimer.patch
+++ b/patches/x86-mce-timer-hrtimer.patch
@@ -21,8 +21,8 @@ fold in:
|[bigeasy: use ULL instead of u64 cast]
|Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/kernel/cpu/mcheck/mce.c | 52 +++++++++++++++------------------------
- 1 file changed, 20 insertions(+), 32 deletions(-)
+ arch/x86/kernel/cpu/mcheck/mce.c | 54 ++++++++++++++++++---------------------
+ 1 file changed, 26 insertions(+), 28 deletions(-)
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -33,8 +33,8 @@ fold in:
+#include <linux/jiffies.h>
#include <linux/jump_label.h>
- #include <asm/processor.h>
-@@ -1307,7 +1308,7 @@ void mce_log_therm_throt_event(__u64 sta
+ #include <asm/intel-family.h>
+@@ -1315,7 +1316,7 @@ int memory_failure(unsigned long pfn, in
static unsigned long check_interval = INITIAL_CHECK_INTERVAL;
static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
@@ -43,35 +43,30 @@ fold in:
static unsigned long mce_adjust_timer_default(unsigned long interval)
{
-@@ -1316,32 +1317,18 @@ static unsigned long mce_adjust_timer_de
+@@ -1324,27 +1325,19 @@ static unsigned long mce_adjust_timer_de
static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default;
--static void __restart_timer(struct timer_list *t, unsigned long interval)
-+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval)
+-static void __start_timer(struct timer_list *t, unsigned long interval)
++static void __start_timer(struct hrtimer *t, unsigned long iv)
{
- unsigned long when = jiffies + interval;
- unsigned long flags;
-
- local_irq_save(flags);
-
-- if (timer_pending(t)) {
-- if (time_before(when, t->expires))
-- mod_timer(t, when);
-- } else {
-- t->expires = round_jiffies(when);
-- add_timer_on(t, smp_processor_id());
-- }
--
+- if (!timer_pending(t) || time_before(when, t->expires))
+- mod_timer(t, round_jiffies(when));
++ if (!iv)
++ return;
+
- local_irq_restore(flags);
-+ if (!interval)
-+ return HRTIMER_NORESTART;
-+ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval)));
-+ return HRTIMER_RESTART;
++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
++ 0, HRTIMER_MODE_REL_PINNED);
}
-static void mce_timer_fn(unsigned long data)
-+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
{
- struct timer_list *t = this_cpu_ptr(&mce_timer);
- int cpu = smp_processor_id();
@@ -82,16 +77,20 @@ fold in:
iv = __this_cpu_read(mce_next_interval);
if (mce_available(this_cpu_ptr(&cpu_info))) {
-@@ -1364,7 +1351,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1367,7 +1360,11 @@ static void mce_timer_fn(unsigned long d
done:
__this_cpu_write(mce_next_interval, iv);
-- __restart_timer(t, iv);
-+ return __restart_timer(timer, iv);
+- __start_timer(t, iv);
++ if (!iv)
++ return HRTIMER_NORESTART;
++
++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(iv)));
++ return HRTIMER_RESTART;
}
/*
-@@ -1372,7 +1359,7 @@ static void mce_timer_fn(unsigned long d
+@@ -1375,7 +1372,7 @@ static void mce_timer_fn(unsigned long d
*/
void mce_timer_kick(unsigned long interval)
{
@@ -99,8 +98,8 @@ fold in:
+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
unsigned long iv = __this_cpu_read(mce_next_interval);
- __restart_timer(t, interval);
-@@ -1387,7 +1374,7 @@ static void mce_timer_delete_all(void)
+ __start_timer(t, interval);
+@@ -1390,7 +1387,7 @@ static void mce_timer_delete_all(void)
int cpu;
for_each_online_cpu(cpu)
@@ -109,71 +108,60 @@ fold in:
}
static void mce_do_trigger(struct work_struct *work)
-@@ -1722,7 +1709,7 @@ static void __mcheck_cpu_clear_vendor(st
+@@ -1725,7 +1722,7 @@ static void __mcheck_cpu_clear_vendor(st
}
}
--static void mce_start_timer(unsigned int cpu, struct timer_list *t)
-+static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
+-static void mce_start_timer(struct timer_list *t)
++static void mce_start_timer(struct hrtimer *t)
{
unsigned long iv = check_interval * HZ;
-@@ -1731,16 +1718,17 @@ static void mce_start_timer(unsigned int
+@@ -1738,18 +1735,19 @@ static void mce_start_timer(struct timer
- per_cpu(mce_next_interval, cpu) = iv;
+ static void __mcheck_cpu_setup_timer(void)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- unsigned int cpu = smp_processor_id();
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
-- t->expires = round_jiffies(jiffies + iv);
-- add_timer_on(t, cpu);
-+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
-+ 0, HRTIMER_MODE_REL_PINNED);
+- setup_pinned_timer(t, mce_timer_fn, cpu);
++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ t->function = mce_timer_fn;
}
static void __mcheck_cpu_init_timer(void)
{
- struct timer_list *t = this_cpu_ptr(&mce_timer);
+- unsigned int cpu = smp_processor_id();
+ struct hrtimer *t = this_cpu_ptr(&mce_timer);
- unsigned int cpu = smp_processor_id();
-
-- setup_pinned_timer(t, mce_timer_fn, cpu);
++
+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ t->function = mce_timer_fn;
- mce_start_timer(cpu, t);
+
+- setup_pinned_timer(t, mce_timer_fn, cpu);
+ mce_start_timer(t);
}
-@@ -2465,6 +2453,8 @@ static void mce_disable_cpu(void *h)
- if (!mce_available(raw_cpu_ptr(&cpu_info)))
- return;
+@@ -2509,7 +2507,7 @@ static int mce_cpu_dead(unsigned int cpu
-+ hrtimer_cancel(this_cpu_ptr(&mce_timer));
-+
- if (!(action & CPU_TASKS_FROZEN))
- cmci_clear();
+ static int mce_cpu_online(unsigned int cpu)
+ {
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ int ret;
-@@ -2487,6 +2477,7 @@ static void mce_reenable_cpu(void *h)
- if (b->init)
- wrmsrl(msr_ops.ctl(i), b->ctl);
- }
-+ __mcheck_cpu_init_timer();
- }
+ mce_device_create(cpu);
+@@ -2526,10 +2524,10 @@ static int mce_cpu_online(unsigned int c
- /* Get notified when a cpu comes on/off. Be hotplug friendly. */
-@@ -2494,7 +2485,6 @@ static int
- mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
+ static int mce_cpu_pre_down(unsigned int cpu)
{
- unsigned int cpu = (unsigned long)hcpu;
-- struct timer_list *t = &per_cpu(mce_timer, cpu);
-
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_ONLINE:
-@@ -2514,11 +2504,9 @@ mce_cpu_callback(struct notifier_block *
- break;
- case CPU_DOWN_PREPARE:
- smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
-- del_timer_sync(t);
- break;
- case CPU_DOWN_FAILED:
- smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
-- mce_start_timer(cpu, t);
- break;
- }
+- struct timer_list *t = this_cpu_ptr(&mce_timer);
++ struct hrtimer *t = this_cpu_ptr(&mce_timer);
+ mce_disable_cpu();
+- del_timer_sync(t);
++ hrtimer_cancel(t);
+ mce_threshold_remove_device(cpu);
+ mce_device_remove(cpu);
+ return 0;
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 6a9563ae89f2..f73ec82d4507 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -67,8 +67,8 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
+#include <linux/swork.h>
#include <linux/jump_label.h>
- #include <asm/processor.h>
-@@ -1384,6 +1385,56 @@ static void mce_do_trigger(struct work_s
+ #include <asm/intel-family.h>
+@@ -1397,6 +1398,56 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -125,7 +125,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
/*
* Notify the user(s) about new machine check events.
* Can be called from interrupt context, but not from machine check/NMI
-@@ -1391,19 +1442,8 @@ static DECLARE_WORK(mce_trigger_work, mc
+@@ -1404,19 +1455,8 @@ static DECLARE_WORK(mce_trigger_work, mc
*/
int mce_notify_irq(void)
{
@@ -146,7 +146,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
return 1;
}
return 0;
-@@ -2545,6 +2585,10 @@ static __init int mcheck_init_device(voi
+@@ -2561,6 +2601,10 @@ static __init int mcheck_init_device(voi
goto err_out;
}
diff --git a/patches/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch b/patches/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
deleted file mode 100644
index dd5b734d5373..000000000000
--- a/patches/x86-mm-cpa-avoid-wbinvd-for-PREEMPT.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: John Ogness <john.ogness@linutronix.de>
-Date: Mon, 30 Jan 2017 09:41:21 +0100
-Subject: [PATCH] x86/mm/cpa: avoid wbinvd() for PREEMPT
-
-Although wbinvd() is faster than flushing many individual pages, it
-blocks the memory bus for "long" periods of time (>100us), thus
-directly causing unusually large latencies on all CPUs, regardless
-of any CPU isolation features that may be active.
-
-For 1024 pages, flushing those pages individually can take up to
-2200us, but the task remains fully preemptible during that time.
-
-Cc: stable-rt@vger.kernel.org
-Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Signed-off-by: John Ogness <john.ogness@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/mm/pageattr.c | 8 ++++++++
- 1 file changed, 8 insertions(+)
-
---- a/arch/x86/mm/pageattr.c
-+++ b/arch/x86/mm/pageattr.c
-@@ -214,7 +214,15 @@ static void cpa_flush_array(unsigned lon
- int in_flags, struct page **pages)
- {
- unsigned int i, level;
-+#ifdef CONFIG_PREEMPT
-+ /*
-+ * Avoid wbinvd() because it causes latencies on all CPUs,
-+ * regardless of any CPU isolation that may be in effect.
-+ */
-+ unsigned long do_wbinvd = 0;
-+#else
- unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
-+#endif
-
- BUG_ON(irqs_disabled());
-
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 96d6afe9c77f..789cd45e6075 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -17,17 +17,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -17,6 +17,7 @@ config X86_64
- ### Arch settings
- config X86
- def_bool y
+@@ -160,6 +160,7 @@ config X86
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_PREEMPT_LAZY
- select ACPI_LEGACY_TABLES_LOOKUP if ACPI
- select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
- select ANON_INODES
+ select HAVE_STACK_VALIDATION if X86_64
+ select HAVE_SYSCALL_TRACEPOINTS
+ select HAVE_UNSTABLE_SCHED_CLOCK
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -129,7 +129,7 @@ static long syscall_trace_enter(struct p
+@@ -130,7 +130,7 @@ static long syscall_trace_enter(struct p
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -145,7 +145,7 @@ static void exit_to_usermode_loop(struct
+@@ -146,7 +146,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -47,10 +47,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -308,8 +308,25 @@ END(ret_from_exception)
+@@ -332,8 +332,25 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
- need_resched:
+ .Lneed_resched:
+ # preempt count == 0 + NEED_RS set?
cmpl $0, PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
@@ -62,12 +62,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+ jne restore_all
+
-+ movl PER_CPU_VAR(current_task), %ebp
-+ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
-+ jnz restore_all
++ movl PER_CPU_VAR(current_task), %ebp
++ cmpl $0,TASK_TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ?
++ jnz restore_all
+
-+ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
-+ jz restore_all
++ testl $_TIF_NEED_RESCHED_LAZY, TASK_TI_flags(%ebp)
++ jz restore_all
+test_int_off:
+#endif
testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -546,7 +546,23 @@ GLOBAL(retint_user)
+@@ -544,7 +544,23 @@ GLOBAL(retint_user)
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1:
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
-@@ -79,17 +79,46 @@ static __always_inline void __preempt_co
+@@ -85,17 +85,46 @@ static __always_inline void __preempt_co
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
BLANK();
-@@ -91,4 +92,5 @@ void common(void) {
+@@ -92,4 +93,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index 22e3659d4c71..28194c7a053e 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -232,8 +232,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -242,8 +242,11 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API