summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch25
-rw-r--r--patches/0001-timers-Use-static-keys-for-migrate_enable-and-nohz_a.patch307
-rw-r--r--patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch6
-rw-r--r--patches/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch24
-rw-r--r--patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch10
-rw-r--r--patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch35
-rw-r--r--patches/0005-hrtimer-Fix-hrtimer-function-description.patch60
-rw-r--r--patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch61
-rw-r--r--patches/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch2
-rw-r--r--patches/0006-hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALT.patch41
-rw-r--r--patches/0006-tracing-Make-traceprobe-parsing-code-reusable.patch4
-rw-r--r--patches/0007-hrtimer-Cleanup-hrtimer_mode-enum.patch45
-rw-r--r--patches/0008-tracing-hrtimer-Take-all-clock-bases-and-modes-into-.patch55
-rw-r--r--patches/0009-tracing-hrtimer-Print-hrtimer-mode-in-hrtimer_start-.patch113
-rw-r--r--patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch96
-rw-r--r--patches/0010-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch79
-rw-r--r--patches/0011-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch (renamed from patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch)26
-rw-r--r--patches/0011-tracing-Remove-code-which-merges-duplicates.patch4
-rw-r--r--patches/0012-hrtimer-Make-room-in-struct-hrtimer_cpu_base.patch33
-rw-r--r--patches/0012-ring-buffer-Add-interface-for-setting-absolute-time-.patch10
-rw-r--r--patches/0013-hrtimer-Reduce-conditional-code-hres_active.patch (renamed from patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch)82
-rw-r--r--patches/0013-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch4
-rw-r--r--patches/0014-hrtimer-Use-accesor-functions-instead-of-direct-acce.patch35
-rw-r--r--patches/0014-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch12
-rw-r--r--patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch119
-rw-r--r--patches/0015-hrtimer-Make-the-remote-enqueue-check-unconditional.patch (renamed from patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch)82
-rw-r--r--patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch77
-rw-r--r--patches/0016-hrtimer-Make-hrtimer_cpu_base.next_timer-handling-un.patch98
-rw-r--r--patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch303
-rw-r--r--patches/0017-hrtimer-Make-hrtimer_reprogramm-unconditional.patch (renamed from patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch)51
-rw-r--r--patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch82
-rw-r--r--patches/0018-hrtimer-Reduce-conditional-code-and-make-hrtimer_for.patch104
-rw-r--r--patches/0018-tracing-Add-per-element-variable-support-to-tracing_.patch10
-rw-r--r--patches/0019-hrtimer-Unify-handling-of-hrtimer-remove.patch88
-rw-r--r--patches/0020-hrtimer-Unify-handling-of-remote-enqueue.patch157
-rw-r--r--patches/0020-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch2
-rw-r--r--patches/0021-hrtimer-Make-remote-enqueue-decision-less-restrictiv.patch (renamed from patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch)22
-rw-r--r--patches/0022-hrtimer-Remove-base-argument-from-hrtimer_reprogram.patch (renamed from patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch)25
-rw-r--r--patches/0023-hrtimer-Split-hrtimer_start_range_ns.patch (renamed from patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch)33
-rw-r--r--patches/0024-hrtimer-Split-__hrtimer_get_next_event.patch (renamed from patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch)27
-rw-r--r--patches/0025-hrtimer-Use-irqsave-irqrestore-around-__run_hrtimer.patch144
-rw-r--r--patches/0026-hrtimer-Add-clock-bases-and-hrtimer-mode-for-soft-ir.patch108
-rw-r--r--patches/0027-hrtimer-Prepare-handling-of-hard-and-softirq-based-h.patch116
-rw-r--r--patches/0028-hrtimer-Implement-support-for-softirq-based-hrtimers.patch508
-rw-r--r--patches/0028-tracing-Add-variable-reference-handling-to-hist-trig.patch8
-rw-r--r--patches/0029-hrtimer-Implement-SOFT-HARD-clock-base-selection.patch55
-rw-r--r--patches/0030-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch (renamed from patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch)53
-rw-r--r--patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch (renamed from patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch)14
-rw-r--r--patches/0032-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch (renamed from patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch)45
-rw-r--r--patches/0033-softirq-Remove-tasklet_hrtimer.patch (renamed from patches/0022-softirq-Remove-tasklet_hrtimer.patch)6
-rw-r--r--patches/0034-ALSA-dummy-Replace-tasklet-with-softirq-hrtimer.patch98
-rw-r--r--patches/0035-usb-gadget-NCM-Replace-tasklet-with-softirq-hrtimer.patch96
-rw-r--r--patches/0036-net-mvpp2-Replace-tasklet-with-softirq-hrtimer.patch132
-rw-r--r--patches/0039-tracing-Make-tracing_set_clock-non-static.patch4
-rw-r--r--patches/HACK-printk-drop-the-logbuf_lock-more-often.patch10
-rw-r--r--patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch6
-rw-r--r--patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch6
-rw-r--r--patches/RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch2
-rw-r--r--patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch4
-rw-r--r--patches/add_migrate_disable.patch16
-rw-r--r--patches/apparmor-use-a-locallock-instead-preempt_disable.patch2
-rw-r--r--patches/arch-arm64-Add-lazy-preempt-support.patch36
-rw-r--r--patches/arm-enable-highmem-for-rt.patch6
-rw-r--r--patches/arm-include-definition-for-cpumask_t.patch2
-rw-r--r--patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch6
-rw-r--r--patches/arm-preempt-lazy-support.patch20
-rw-r--r--patches/arm-xen-don-t-inclide-rwlock.h-directly.patch26
-rw-r--r--patches/arm64-xen--Make-XEN-depend-on-non-rt.patch2
-rw-r--r--patches/at91_dont_enable_disable_clock.patch12
-rw-r--r--patches/block-blk-mq-use-swait.patch10
-rw-r--r--patches/block-mq-don-t-complete-requests-via-IPI.patch12
-rw-r--r--patches/block-mq-drop-preempt-disable.patch6
-rw-r--r--patches/block-mq-use-cpu_light.patch2
-rw-r--r--patches/block-shorten-interrupt-disabled-regions.patch10
-rw-r--r--patches/block-use-cpu-chill.patch6
-rw-r--r--patches/bug-rt-dependend-variants.patch2
-rw-r--r--patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch22
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch10
-rw-r--r--patches/clocksource-tclib-allow-higher-clockrates.patch20
-rw-r--r--patches/completion-use-simple-wait-queues.patch115
-rw-r--r--patches/cond-resched-lock-rt-tweak.patch2
-rw-r--r--patches/cond-resched-softirq-rt.patch6
-rw-r--r--patches/cpu-hotplug--Implement-CPU-pinning.patch22
-rw-r--r--patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch14
-rw-r--r--patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch176
-rw-r--r--patches/cpumask-disable-offstack-on-rt.patch4
-rw-r--r--patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch44
-rw-r--r--patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch4
-rw-r--r--patches/debugobjects-rt.patch2
-rw-r--r--patches/dm-make-rt-aware.patch2
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch22
-rw-r--r--patches/drivers-tty-fix-omap-lock-crap.patch4
-rw-r--r--patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch12
-rw-r--r--patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch10
-rw-r--r--patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch8
-rw-r--r--patches/fs-aio-simple-simple-work.patch8
-rw-r--r--patches/fs-convert-two-mroe-BH_Uptodate_Lock-related-bitspin.patch66
-rw-r--r--patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch2
-rw-r--r--patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch6
-rw-r--r--patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch26
-rw-r--r--patches/fs-jbd-replace-bh_state-lock.patch4
-rw-r--r--patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch20
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch52
-rw-r--r--patches/ftrace-Fix-trace-header-alignment.patch2
-rw-r--r--patches/ftrace-migrate-disable-tracing.patch6
-rw-r--r--patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch5
-rw-r--r--patches/futex-requeue-pi-fix.patch4
-rw-r--r--patches/futex-workaround-migrate_disable-enable-in-different.patch4
-rw-r--r--patches/genirq-disable-irqpoll-on-rt.patch4
-rw-r--r--patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch14
-rw-r--r--patches/genirq-force-threading.patch2
-rw-r--r--patches/genirq-update-irq_set_irqchip_state-documentation.patch2
-rw-r--r--patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch22
-rw-r--r--patches/hotplug-light-get-online-cpus.patch10
-rw-r--r--patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch4
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch192
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch26
-rw-r--r--patches/hrtimer-soft-bases-timekeeping.patch31
-rw-r--r--patches/hrtimers-prepare-full-preemption.patch24
-rw-r--r--patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch29
-rw-r--r--patches/iommu-amd--Use-WARN_ON_NORT.patch4
-rw-r--r--patches/iommu-amd-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-for.patch43
-rw-r--r--patches/iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch42
-rw-r--r--patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch18
-rw-r--r--patches/irqwork-Move-irq-safe-work-to-irq-context.patch6
-rw-r--r--patches/irqwork-push_most_work_into_softirq_context.patch8
-rw-r--r--patches/jump-label-rt.patch2
-rw-r--r--patches/kconfig-disable-a-few-options-rt.patch4
-rw-r--r--patches/kconfig-preempt-rt-full.patch6
-rw-r--r--patches/kernel-SRCU-provide-a-static-initializer.patch20
-rw-r--r--patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch4
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch109
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch8
-rw-r--r--patches/kgb-serial-hackaround.patch4
-rw-r--r--patches/list_bl-fixup-bogus-lockdep-warning.patch2
-rw-r--r--patches/list_bl.h-make-list-head-locking-RT-safe.patch6
-rw-r--r--patches/local-irq-rt-depending-variants.patch4
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-disable-self-test.patch2
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch42
-rw-r--r--patches/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch22
-rw-r--r--patches/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch4
-rw-r--r--patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch20
-rw-r--r--patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch11
-rw-r--r--patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch2
-rw-r--r--patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch9
-rw-r--r--patches/md-raid5-do-not-disable-interrupts.patch11
-rw-r--r--patches/md-raid5-percpu-handling-rt-aware.patch10
-rw-r--r--patches/mips-disable-highmem-on-rt.patch2
-rw-r--r--patches/mm--rt--Fix-generic-kmap_atomic-for-RT.patch4
-rw-r--r--patches/mm-bounce-local-irq-save-nort.patch2
-rw-r--r--patches/mm-convert-swap-to-percpu-locked.patch6
-rw-r--r--patches/mm-disable-sloub-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch84
-rw-r--r--patches/mm-make-vmstat-rt-aware.patch28
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch14
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch37
-rw-r--r--patches/mm-page-alloc-use-local-lock-on-target-cpu.patch2
-rw-r--r--patches/mm-page_alloc-reduce-lock-sections-further.patch16
-rw-r--r--patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch34
-rw-r--r--patches/mm-protect-activate-switch-mm.patch2
-rw-r--r--patches/mm-rt-kmap-atomic-scheduling.patch14
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch28
-rw-r--r--patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch8
-rw-r--r--patches/move_sched_delayed_work_to_helper.patch4
-rw-r--r--patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch4
-rw-r--r--patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch34
-rw-r--r--patches/net-add-back-the-missing-serialization-in-ip_send_un.patch10
-rw-r--r--patches/net-another-local-irq-disable-alloc-atomic-headache.patch6
-rw-r--r--patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch2
-rw-r--r--patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch14
-rw-r--r--patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch2
-rw-r--r--patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch15
-rw-r--r--patches/net-make-devnet_rename_seq-a-mutex.patch12
-rw-r--r--patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch14
-rw-r--r--patches/net-prevent-abba-deadlock.patch2
-rw-r--r--patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch4
-rw-r--r--patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch2
-rw-r--r--patches/net-take-the-tcp_sk_lock-lock-with-BH-disabled.patch6
-rw-r--r--patches/net-use-cpu-chill.patch4
-rw-r--r--patches/net_disable_NET_RX_BUSY_POLL.patch2
-rw-r--r--patches/oleg-signal-rt-fix.patch6
-rw-r--r--patches/panic-disable-random-on-rt.patch2
-rw-r--r--patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch14
-rw-r--r--patches/peter_zijlstra-frob-rcu.patch2
-rw-r--r--patches/peterz-percpu-rwsem-rt.patch36
-rw-r--r--patches/pid.h-include-atomic.h.patch2
-rw-r--r--patches/ping-sysrq.patch4
-rw-r--r--patches/posix-timers-thread-posix-cpu-timers-on-rt.patch20
-rw-r--r--patches/power-disable-highmem-on-rt.patch2
-rw-r--r--patches/power-use-generic-rwsem-on-rt.patch2
-rw-r--r--patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch2
-rw-r--r--patches/powerpc-preempt-lazy-support.patch18
-rw-r--r--patches/preempt-lazy-support.patch66
-rw-r--r--patches/preempt-nort-rt-variants.patch4
-rw-r--r--patches/printk-kill.patch23
-rw-r--r--patches/printk-rt-aware.patch8
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch12
-rw-r--r--patches/radix-tree-use-local-locks.patch6
-rw-r--r--patches/random-make-it-work-on-rt.patch6
-rw-r--r--patches/rbtree-include-rcu.h-because-we-use-it.patch2
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch22
-rw-r--r--patches/rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch2
-rw-r--r--patches/rcu-disable-rcu-fast-no-hz-on-rt.patch2
-rw-r--r--patches/rcu-make-RCU_BOOST-default-on-RT.patch2
-rw-r--r--patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch50
-rw-r--r--patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch2
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rt-introduce-cpu-chill.patch6
-rw-r--r--patches/rt-local-irq-lock.patch2
-rw-r--r--patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch2
-rw-r--r--patches/rtmutex-Make-lock_killable-work.patch2
-rw-r--r--patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch8
-rw-r--r--patches/rtmutex-add-sleeping-lock-implementation.patch83
-rw-r--r--patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch35
-rw-r--r--patches/rtmutex-avoid-include-hell.patch2
-rw-r--r--patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch16
-rw-r--r--patches/rtmutex-futex-prepare-rt.patch28
-rw-r--r--patches/rtmutex-lock-killable.patch2
-rw-r--r--patches/rtmutex-trylock-is-okay-on-RT.patch2
-rw-r--r--patches/rtmutex-wire-up-RT-s-locking.patch28
-rw-r--r--patches/rtmutex_dont_include_rcu.patch8
-rw-r--r--patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch2
-rw-r--r--patches/sched-Remove-TASK_ALL.patch2
-rw-r--r--patches/sched-delay-put-task.patch6
-rw-r--r--patches/sched-disable-ttwu-queue.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch6
-rw-r--r--patches/sched-mmdrop-delayed.patch16
-rw-r--r--patches/sched-rt-mutex-wakeup.patch10
-rw-r--r--patches/sched-ttwu-ensure-success-return-is-correct.patch2
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/seqlock-prevent-rt-starvation.patch16
-rw-r--r--patches/series66
-rw-r--r--patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch6
-rw-r--r--patches/skbufhead-raw-lock.patch22
-rw-r--r--patches/slub-disable-SLUB_CPU_PARTIAL.patch2
-rw-r--r--patches/slub-enable-irqs-for-no-wait.patch4
-rw-r--r--patches/softirq-disable-softirq-stacks-for-rt.patch28
-rw-r--r--patches/softirq-preempt-fix-3-re.patch32
-rw-r--r--patches/softirq-split-locks.patch26
-rw-r--r--patches/sparc64-use-generic-rwsem-spinlocks-rt.patch2
-rw-r--r--patches/srcu-Prohibit-call_srcu-use-under-raw-spinlocks.patch74
-rw-r--r--patches/srcu-replace-local_irqsave-with-a-locallock.patch8
-rw-r--r--patches/srcu-use-cpu_online-instead-custom-check.patch10
-rw-r--r--patches/suspend-prevernt-might-sleep-splats.patch6
-rw-r--r--patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch6
-rw-r--r--patches/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch52
-rw-r--r--patches/timekeeping-split-jiffies-lock.patch2
-rw-r--r--patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch2
-rw-r--r--patches/timer-fd-avoid-live-lock.patch2
-rw-r--r--patches/timers-prepare-for-full-preemption.patch14
-rw-r--r--patches/tracing-account-for-preempt-off-in-preempt_schedule.patch2
-rw-r--r--patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch2
-rw-r--r--patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch4
-rw-r--r--patches/wait.h-include-atomic.h.patch2
-rw-r--r--patches/work-queue-work-around-irqsafe-timer-optimization.patch2
-rw-r--r--patches/work-simple-Simple-work-queue-implemenation.patch2
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch22
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch20
-rw-r--r--patches/workqueue-use-locallock.patch24
-rw-r--r--patches/workqueue-use-rcu.patch60
-rw-r--r--patches/x86-io-apic-migra-no-unmask.patch2
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
-rw-r--r--patches/x86-mce-use-swait-queue-for-mce-wakeups.patch4
-rw-r--r--patches/x86-preempt-lazy.patch34
-rw-r--r--patches/x86-signal-delay-calling-signals-on-32bit.patch2
-rw-r--r--patches/x86-stackprot-no-random-on-rt.patch4
-rw-r--r--patches/x86-use-gen-rwsem-spinlocks-rt.patch2
269 files changed, 4322 insertions, 2804 deletions
diff --git a/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch b/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
deleted file mode 100644
index 01318ec4a771..000000000000
--- a/patches/0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:02 +0000
-Subject: [PATCH 01/25] hrtimer: Use predefined function for updating
- next_timer
-
-There already exist a function for updating the next_timer
-hrtimer_update_next_timer().
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -630,7 +630,7 @@ static void hrtimer_reprogram(struct hrt
- return;
-
- /* Update the pointer to the next expiring timer */
-- cpu_base->next_timer = timer;
-+ hrtimer_update_next_timer(cpu_base, timer);
-
- /*
- * If a hang was detected in the last timer interrupt then we
diff --git a/patches/0001-timers-Use-static-keys-for-migrate_enable-and-nohz_a.patch b/patches/0001-timers-Use-static-keys-for-migrate_enable-and-nohz_a.patch
new file mode 100644
index 000000000000..e8504be31111
--- /dev/null
+++ b/patches/0001-timers-Use-static-keys-for-migrate_enable-and-nohz_a.patch
@@ -0,0 +1,307 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Mon, 13 Nov 2017 20:23:44 +0100
+Subject: [PATCH 01/36] timers: Use static keys for migrate_enable and
+ nohz_active
+
+The migration_enabled and nohz_active will be later moved into the bitfield. In
+the bitfield a change to one bit causes RMW operation and without holding a
+lock it might happen that a concurrent change on a second CPU might cause the
+loss of the an update.
+To avoid that and since both fields are changed to static_branch.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 -
+ kernel/time/hrtimer.c | 17 ++------
+ kernel/time/tick-internal.h | 21 +++++++---
+ kernel/time/tick-sched.c | 2
+ kernel/time/timer.c | 91 ++++++++++++++++++++++----------------------
+ 5 files changed, 69 insertions(+), 66 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -153,8 +153,6 @@ enum hrtimer_base_type {
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+- * @migration_enabled: The migration of hrtimers to other cpus is enabled
+- * @nohz_active: The nohz functionality is enabled
+ * @expires_next: absolute time of the next event which was scheduled
+ * via clock_set_next_event()
+ * @next_timer: Pointer to the first expiring timer
+@@ -178,8 +176,6 @@ struct hrtimer_cpu_base {
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+- bool migration_enabled;
+- bool nohz_active;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hres_active : 1,
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -178,23 +178,16 @@ hrtimer_check_target(struct hrtimer *tim
+ #endif
+ }
+
+-#ifdef CONFIG_NO_HZ_COMMON
+-static inline
+-struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+- int pinned)
+-{
+- if (pinned || !base->migration_enabled)
+- return base;
+- return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+-}
+-#else
+ static inline
+ struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+ int pinned)
+ {
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++ if (static_branch_unlikely(&timers_migration_enabled) && !pinned)
++ return &per_cpu(hrtimer_bases, get_nohz_timer_target());
++#endif
+ return base;
+ }
+-#endif
+
+ /*
+ * We switch the timer base to a power-optimized selected CPU target,
+@@ -971,7 +964,7 @@ void hrtimer_start_range_ns(struct hrtim
+ * Kick to reschedule the next tick to handle the new timer
+ * on dynticks target.
+ */
+- if (new_base->cpu_base->nohz_active)
++ if (is_timers_nohz_active())
+ wake_up_nohz_cpu(new_base->cpu_base->cpu);
+ } else {
+ hrtimer_reprogram(timer, new_base);
+--- a/kernel/time/tick-internal.h
++++ b/kernel/time/tick-internal.h
+@@ -150,14 +150,25 @@ static inline void tick_nohz_init(void)
+
+ #ifdef CONFIG_NO_HZ_COMMON
+ extern unsigned long tick_nohz_active;
++extern void timers_update_nohz(void);
++extern struct static_key_false timers_nohz_active;
++
++static inline bool is_timers_nohz_active(void)
++{
++ return static_branch_unlikely(&timers_nohz_active);
++}
++
++#ifdef CONFIG_SMP
++extern struct static_key_false timers_migration_enabled;
++#endif
+ #else
++static inline void timers_update_nohz(void) { }
+ #define tick_nohz_active (0)
+-#endif
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+-extern void timers_update_migration(bool update_nohz);
+-#else
+-static inline void timers_update_migration(bool update_nohz) { }
++static inline bool is_timers_nohz_active(void)
++{
++ return false;
++}
+ #endif
+
+ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
+--- a/kernel/time/tick-sched.c
++++ b/kernel/time/tick-sched.c
+@@ -1103,7 +1103,7 @@ static inline void tick_nohz_activate(st
+ ts->nohz_mode = mode;
+ /* One update is enough */
+ if (!test_and_set_bit(0, &tick_nohz_active))
+- timers_update_migration(true);
++ timers_update_nohz();
+ }
+
+ /**
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -200,8 +200,6 @@ struct timer_base {
+ unsigned long clk;
+ unsigned long next_expiry;
+ unsigned int cpu;
+- bool migration_enabled;
+- bool nohz_active;
+ bool is_idle;
+ bool must_forward_clk;
+ DECLARE_BITMAP(pending_map, WHEEL_SIZE);
+@@ -210,45 +208,59 @@ struct timer_base {
+
+ static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
+
+-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++#ifdef CONFIG_NO_HZ_COMMON
++
++DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
++static DEFINE_MUTEX(timer_keys_mutex);
++
++static void timer_update_keys(struct work_struct *work);
++static DECLARE_WORK(timer_update_work, timer_update_keys);
++
++#ifdef CONFIG_SMP
+ unsigned int sysctl_timer_migration = 1;
+
+-void timers_update_migration(bool update_nohz)
++DEFINE_STATIC_KEY_FALSE(timers_migration_enabled);
++
++static void timers_update_migration(void)
+ {
+ bool on = sysctl_timer_migration && tick_nohz_active;
+- unsigned int cpu;
+
+- /* Avoid the loop, if nothing to update */
+- if (this_cpu_read(timer_bases[BASE_STD].migration_enabled) == on)
+- return;
++ if (on)
++ static_branch_enable(&timers_migration_enabled);
++ else
++ static_branch_disable(&timers_migration_enabled);
++}
++#else
++static inline void timers_update_migration(void) { }
++#endif /* !CONFIG_SMP */
+
+- for_each_possible_cpu(cpu) {
+- per_cpu(timer_bases[BASE_STD].migration_enabled, cpu) = on;
+- per_cpu(timer_bases[BASE_DEF].migration_enabled, cpu) = on;
+- per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
+- if (!update_nohz)
+- continue;
+- per_cpu(timer_bases[BASE_STD].nohz_active, cpu) = true;
+- per_cpu(timer_bases[BASE_DEF].nohz_active, cpu) = true;
+- per_cpu(hrtimer_bases.nohz_active, cpu) = true;
+- }
++static void timer_update_keys(struct work_struct *work)
++{
++ mutex_lock(&timer_keys_mutex);
++ timers_update_migration();
++ static_branch_enable(&timers_nohz_active);
++ mutex_unlock(&timer_keys_mutex);
++}
++
++void timers_update_nohz(void)
++{
++ schedule_work(&timer_update_work);
+ }
+
+ int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+ {
+- static DEFINE_MUTEX(mutex);
+ int ret;
+
+- mutex_lock(&mutex);
++ mutex_lock(&timer_keys_mutex);
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (!ret && write)
+- timers_update_migration(false);
+- mutex_unlock(&mutex);
++ timers_update_migration();
++ mutex_unlock(&timer_keys_mutex);
+ return ret;
+ }
+-#endif
++#endif /* NO_HZ_COMMON */
+
+ static unsigned long round_jiffies_common(unsigned long j, int cpu,
+ bool force_up)
+@@ -534,7 +546,7 @@ static void
+ static void
+ trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+ {
+- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
++ if (!is_timers_nohz_active())
+ return;
+
+ /*
+@@ -817,7 +829,7 @@ static inline struct timer_base *get_tim
+ * If the timer is deferrable and nohz is active then we need to use
+ * the deferrable base.
+ */
+- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
++ if (is_timers_nohz_active() &&
+ (tflags & TIMER_DEFERRABLE))
+ base = per_cpu_ptr(&timer_bases[BASE_DEF], cpu);
+ return base;
+@@ -831,7 +843,7 @@ static inline struct timer_base *get_tim
+ * If the timer is deferrable and nohz is active then we need to use
+ * the deferrable base.
+ */
+- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active &&
++ if (is_timers_nohz_active() &&
+ (tflags & TIMER_DEFERRABLE))
+ base = this_cpu_ptr(&timer_bases[BASE_DEF]);
+ return base;
+@@ -842,21 +854,20 @@ static inline struct timer_base *get_tim
+ return get_timer_cpu_base(tflags, tflags & TIMER_CPUMASK);
+ }
+
+-#ifdef CONFIG_NO_HZ_COMMON
+ static inline struct timer_base *
+ get_target_base(struct timer_base *base, unsigned tflags)
+ {
+-#ifdef CONFIG_SMP
+- if ((tflags & TIMER_PINNED) || !base->migration_enabled)
+- return get_timer_this_cpu_base(tflags);
+- return get_timer_cpu_base(tflags, get_nohz_timer_target());
+-#else
+- return get_timer_this_cpu_base(tflags);
++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
++ if (static_branch_unlikely(&timers_migration_enabled) &&
++ !(tflags & TIMER_PINNED))
++ return get_timer_cpu_base(tflags, get_nohz_timer_target());
+ #endif
++ return get_timer_this_cpu_base(tflags);
+ }
+
+ static inline void forward_timer_base(struct timer_base *base)
+ {
++#ifdef CONFIG_NO_HZ_COMMON
+ unsigned long jnow;
+
+ /*
+@@ -880,16 +891,8 @@ static inline void forward_timer_base(st
+ base->clk = jnow;
+ else
+ base->clk = base->next_expiry;
+-}
+-#else
+-static inline struct timer_base *
+-get_target_base(struct timer_base *base, unsigned tflags)
+-{
+- return get_timer_this_cpu_base(tflags);
+-}
+-
+-static inline void forward_timer_base(struct timer_base *base) { }
+ #endif
++}
+
+
+ /*
+@@ -1644,7 +1647,7 @@ static __latent_entropy void run_timer_s
+ base->must_forward_clk = false;
+
+ __run_timers(base);
+- if (IS_ENABLED(CONFIG_NO_HZ_COMMON) && base->nohz_active)
++ if (is_timers_nohz_active())
+ __run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
+ }
+
+@@ -1658,7 +1661,7 @@ void run_local_timers(void)
+ hrtimer_run_queues();
+ /* Raise the softirq only if required. */
+ if (time_before(jiffies, base->clk)) {
+- if (!IS_ENABLED(CONFIG_NO_HZ_COMMON) || !base->nohz_active)
++ if (!is_timers_nohz_active())
+ return;
+ /* CPU is awake, so check the deferrable base. */
+ base++;
diff --git a/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch b/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch
index 7362d5417520..5ac5dac1e983 100644
--- a/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch
+++ b/patches/0002-hrtimer-Correct-blantanly-wrong-comment.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:03 +0000
-Subject: [PATCH 02/25] hrtimer: Correct blantanly wrong comment
+Date: Sun, 22 Oct 2017 23:39:39 +0200
+Subject: [PATCH 02/36] hrtimer: Correct blantanly wrong comment
The protection of a hrtimer which runs its callback against migration to a
different CPU has nothing to do with hard interrupt context.
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1204,9 +1204,9 @@ static void __run_hrtimer(struct hrtimer
+@@ -1197,9 +1197,9 @@ static void __run_hrtimer(struct hrtimer
timer->is_rel = false;
/*
diff --git a/patches/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch b/patches/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch
index 32fee49967ff..3c1a2903a46b 100644
--- a/patches/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch
+++ b/patches/0002-tracing-Reverse-the-order-of-trace_types_lock-and-ev.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -7677,6 +7677,7 @@ static int instance_mkdir(const char *na
+@@ -7687,6 +7687,7 @@ static int instance_mkdir(const char *na
struct trace_array *tr;
int ret;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_lock(&trace_types_lock);
ret = -EEXIST;
-@@ -7732,6 +7733,7 @@ static int instance_mkdir(const char *na
+@@ -7742,6 +7743,7 @@ static int instance_mkdir(const char *na
list_add(&tr->list, &ftrace_trace_arrays);
mutex_unlock(&trace_types_lock);
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
-@@ -7743,6 +7745,7 @@ static int instance_mkdir(const char *na
+@@ -7753,6 +7755,7 @@ static int instance_mkdir(const char *na
out_unlock:
mutex_unlock(&trace_types_lock);
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
-@@ -7755,6 +7758,7 @@ static int instance_rmdir(const char *na
+@@ -7765,6 +7768,7 @@ static int instance_rmdir(const char *na
int ret;
int i;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_lock(&trace_types_lock);
ret = -ENODEV;
-@@ -7800,6 +7804,7 @@ static int instance_rmdir(const char *na
+@@ -7810,6 +7814,7 @@ static int instance_rmdir(const char *na
out_unlock:
mutex_unlock(&trace_types_lock);
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!system)
return -ENODEV;
-@@ -2290,15 +2290,15 @@ static void __add_event_to_tracers(struc
+@@ -2294,15 +2294,15 @@ static void __add_event_to_tracers(struc
int trace_add_event_call(struct trace_event_call *call)
{
int ret;
@@ -99,7 +99,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2352,13 +2352,13 @@ int trace_remove_event_call(struct trace
+@@ -2356,13 +2356,13 @@ int trace_remove_event_call(struct trace
{
int ret;
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2425,8 +2425,8 @@ static int trace_module_notify(struct no
+@@ -2424,8 +2424,8 @@ static int trace_module_notify(struct no
{
struct module *mod = data;
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
switch (val) {
case MODULE_STATE_COMING:
trace_module_add_events(mod);
-@@ -2435,8 +2435,8 @@ static int trace_module_notify(struct no
+@@ -2434,8 +2434,8 @@ static int trace_module_notify(struct no
trace_module_remove_events(mod);
break;
}
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -2951,24 +2951,24 @@ create_event_toplevel_files(struct dentr
+@@ -2950,24 +2950,24 @@ create_event_toplevel_files(struct dentr
* creates the event hierachry in the @parent/events directory.
*
* Returns 0 on success.
@@ -165,7 +165,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2997,9 +2997,10 @@ early_event_add_tracer(struct dentry *pa
+@@ -2996,9 +2996,10 @@ early_event_add_tracer(struct dentry *pa
return ret;
}
@@ -177,7 +177,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Disable any event triggers and associated soft-disabled events */
clear_event_triggers(tr);
-@@ -3020,8 +3021,6 @@ int event_trace_del_tracer(struct trace_
+@@ -3019,8 +3020,6 @@ int event_trace_del_tracer(struct trace_
tr->event_dir = NULL;
diff --git a/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch b/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
index 56c68b74fb40..2db66f54af8d 100644
--- a/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
+++ b/patches/0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
@@ -1,6 +1,6 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:03 +0000
-Subject: [PATCH 03/25] hrtimer: Fix kerneldoc for struct hrtimer_cpu_base
+Date: Sun, 22 Oct 2017 23:39:40 +0200
+Subject: [PATCH 03/36] hrtimer: Fix kerneldoc for struct hrtimer_cpu_base
The sequence '/**' marks the start of a struct description. Add the
missing second asterisk. While at it adapt the ordering of the struct
@@ -24,10 +24,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
-@@ -155,12 +155,12 @@ enum hrtimer_base_type {
+@@ -153,12 +153,12 @@ enum hrtimer_base_type {
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
- * @expires_next: absolute time of the next event which was scheduled
- * via clock_set_next_event()
- * @next_timer: Pointer to the first expiring timer
diff --git a/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch b/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
index 8cdaaaa06064..0eadcc20c1d5 100644
--- a/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
+++ b/patches/0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
@@ -1,6 +1,6 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:03 +0000
-Subject: [PATCH 04/25] hrtimer: Cleanup clock argument in
+Date: Sun, 22 Oct 2017 23:39:41 +0200
+Subject: [PATCH 04/36] hrtimer: Cleanup clock argument in
schedule_hrtimeout_range_clock()
schedule_hrtimeout_range_clock() uses an integer for the clock id
@@ -10,7 +10,8 @@ the variable as well to make it consistent.
While at it, clean up the description for the function parameters clock_id
and mode. The clock modes and the clock ids are not restricted as the
-comment suggests.
+comment suggests. Fix the mode description as well for the callers of
+schedule_hrtimeout_range_clock().
No functional change.
@@ -18,12 +19,12 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/hrtimer.h | 2 +-
- kernel/time/hrtimer.c | 8 ++++----
- 2 files changed, 5 insertions(+), 5 deletions(-)
+ kernel/time/hrtimer.c | 12 ++++++------
+ 2 files changed, 7 insertions(+), 7 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -466,7 +466,7 @@ extern int schedule_hrtimeout_range(ktim
+@@ -462,7 +462,7 @@ extern int schedule_hrtimeout_range(ktim
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
u64 delta,
const enum hrtimer_mode mode,
@@ -34,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Soft interrupt function to run the hrtimer queues: */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1671,12 +1671,12 @@ void __init hrtimers_init(void)
+@@ -1664,12 +1664,12 @@ void __init hrtimers_init(void)
* schedule_hrtimeout_range_clock - sleep until timeout
* @expires: timeout value (ktime_t)
* @delta: slack in expires timeout (ktime_t)
@@ -50,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct hrtimer_sleeper t;
-@@ -1697,7 +1697,7 @@ schedule_hrtimeout_range_clock(ktime_t *
+@@ -1690,7 +1690,7 @@ schedule_hrtimeout_range_clock(ktime_t *
return -EINTR;
}
@@ -59,3 +60,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
hrtimer_init_sleeper(&t, current);
+@@ -1712,7 +1712,7 @@ schedule_hrtimeout_range_clock(ktime_t *
+ * schedule_hrtimeout_range - sleep until timeout
+ * @expires: timeout value (ktime_t)
+ * @delta: slack in expires timeout (ktime_t)
+- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
++ * @mode: timer mode
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
+@@ -1751,7 +1751,7 @@ EXPORT_SYMBOL_GPL(schedule_hrtimeout_ran
+ /**
+ * schedule_hrtimeout - sleep until timeout
+ * @expires: timeout value (ktime_t)
+- * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
++ * @mode: timer mode
+ *
+ * Make the current task sleep until the given expiry time has
+ * elapsed. The routine will return immediately unless
diff --git a/patches/0005-hrtimer-Fix-hrtimer-function-description.patch b/patches/0005-hrtimer-Fix-hrtimer-function-description.patch
new file mode 100644
index 000000000000..64022bce7961
--- /dev/null
+++ b/patches/0005-hrtimer-Fix-hrtimer-function-description.patch
@@ -0,0 +1,60 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:42 +0200
+Subject: [PATCH 05/36] hrtimer: Fix hrtimer function description
+
+The hrtimer_start[_range_ns]() starts a timer reliable on this CPU only
+when HRTIMER_MODE_PINNED is set. Furthermore the HRTIMER_MODE_PINNED mode
+is not considered, when a hrtimer is initialized.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 6 +++---
+ kernel/time/hrtimer.c | 9 +++++----
+ 2 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -361,11 +361,11 @@ extern void hrtimer_start_range_ns(struc
+ u64 range_ns, const enum hrtimer_mode mode);
+
+ /**
+- * hrtimer_start - (re)start an hrtimer on the current CPU
++ * hrtimer_start - (re)start an hrtimer
+ * @timer: the timer to be added
+ * @tim: expiry time
+- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL)
++ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
++ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
+ */
+ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -926,12 +926,12 @@ static inline ktime_t hrtimer_update_low
+ }
+
+ /**
+- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
++ * hrtimer_start_range_ns - (re)start an hrtimer
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL)
++ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
++ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
+ */
+ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode)
+@@ -1109,7 +1109,8 @@ static void __hrtimer_init(struct hrtime
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer: the timer to be initialized
+ * @clock_id: the clock to be used
+- * @mode: timer mode abs/rel
++ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
++ * relative (HRTIMER_MODE_REL); pinned is not considered here!
+ */
+ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
diff --git a/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch b/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
deleted file mode 100644
index 57f987cfa13b..000000000000
--- a/patches/0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:04 +0000
-Subject: [PATCH 05/25] hrtimer: Switch for loop to _ffs() evaluation
-
-Looping over all clock bases to find active bits is suboptimal if not all
-bases are active.
-
-Avoid this by converting it to a __ffs() evaluation.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 18 ++++++++++--------
- 1 file changed, 10 insertions(+), 8 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -465,17 +465,18 @@ static inline void hrtimer_update_next_t
-
- static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
- {
-- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
- ktime_t expires, expires_next = KTIME_MAX;
-
- hrtimer_update_next_timer(cpu_base, NULL);
-- for (; active; base++, active >>= 1) {
-+ while (active) {
-+ unsigned int id = __ffs(active);
-+ struct hrtimer_clock_base *base;
- struct timerqueue_node *next;
- struct hrtimer *timer;
-
-- if (!(active & 0x01))
-- continue;
-+ active &= ~(1U << id);
-+ base = cpu_base->clock_base + id;
-
- next = timerqueue_getnext(&base->active);
- timer = container_of(next, struct hrtimer, node);
-@@ -1242,15 +1243,16 @@ static void __run_hrtimer(struct hrtimer
-
- static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
- {
-- struct hrtimer_clock_base *base = cpu_base->clock_base;
- unsigned int active = cpu_base->active_bases;
-
-- for (; active; base++, active >>= 1) {
-+ while (active) {
-+ unsigned int id = __ffs(active);
-+ struct hrtimer_clock_base *base;
- struct timerqueue_node *node;
- ktime_t basenow;
-
-- if (!(active & 0x01))
-- continue;
-+ active &= ~(1U << id);
-+ base = cpu_base->clock_base + id;
-
- basenow = ktime_add(now, base->offset);
-
diff --git a/patches/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch b/patches/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch
index a956e8bc8361..4757fb0e2f4d 100644
--- a/patches/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch
+++ b/patches/0005-tracing-Increase-tracing-map-KEYS_MAX-size.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
-@@ -5,7 +5,7 @@
+@@ -6,7 +6,7 @@
#define TRACING_MAP_BITS_MAX 17
#define TRACING_MAP_BITS_MIN 7
diff --git a/patches/0006-hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALT.patch b/patches/0006-hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALT.patch
new file mode 100644
index 000000000000..b57c607fe72f
--- /dev/null
+++ b/patches/0006-hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALT.patch
@@ -0,0 +1,41 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:43 +0200
+Subject: [PATCH 06/36] hrtimer: Ensure POSIX compliance (relative
+ CLOCK_REALTIME hrtimers)
+
+POSIX specification defines, that relative CLOCK_REALTIME timers are not
+affected by clock modifications. Those timers have to use CLOCK_MONOTONIC
+to ensure POSIX compliance.
+
+The introduction of the additional mode HRTIMER_MODE_PINNED broke this
+requirement for pinned timers. There is no user space visible impact
+because user space timers are not using the pinned mode, but for
+consistency reasons this needs to be fixed.
+
+Check whether the mode has the HRTIMER_MODE_REL bit set instead of
+comparing with HRTIMER_MODE_ABS.
+
+Fixes: 597d0275736d ("timers: Framework for identifying pinned timers")
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1097,7 +1097,12 @@ static void __hrtimer_init(struct hrtime
+
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
+
+- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
++ /*
++ * Posix magic: Relative CLOCK_REALTIME timers are not affected by
++ * clock modifications, so they needs to become CLOCK_MONOTONIC to
++ * ensure Posix compliance.
++ */
++ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
+ clock_id = CLOCK_MONOTONIC;
+
+ base = hrtimer_clockid_to_base(clock_id);
diff --git a/patches/0006-tracing-Make-traceprobe-parsing-code-reusable.patch b/patches/0006-tracing-Make-traceprobe-parsing-code-reusable.patch
index 0b0687f683a4..bf806dd8038b 100644
--- a/patches/0006-tracing-Make-traceprobe-parsing-code-reusable.patch
+++ b/patches/0006-tracing-Make-traceprobe-parsing-code-reusable.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -8271,6 +8271,92 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8281,6 +8281,92 @@ void ftrace_dump(enum ftrace_dump_mode o
}
EXPORT_SYMBOL_GPL(ftrace_dump);
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ring_buf_size;
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -1751,6 +1751,13 @@ void trace_printk_start_comm(void);
+@@ -1755,6 +1755,13 @@ void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
diff --git a/patches/0007-hrtimer-Cleanup-hrtimer_mode-enum.patch b/patches/0007-hrtimer-Cleanup-hrtimer_mode-enum.patch
new file mode 100644
index 000000000000..0fa1f2660438
--- /dev/null
+++ b/patches/0007-hrtimer-Cleanup-hrtimer_mode-enum.patch
@@ -0,0 +1,45 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:44 +0200
+Subject: [PATCH 07/36] hrtimer: Cleanup hrtimer_mode enum
+
+It's not obvious that the HRTIMER_MODE variants are bit combinations
+because all modes are hard coded constants.
+
+Change it so the bit meanings are clear and use the symbols for creating
+modes which combine bits.
+
+While at it get rid of the ugly tail comments.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 16 +++++++++++-----
+ 1 file changed, 11 insertions(+), 5 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -28,13 +28,19 @@ struct hrtimer_cpu_base;
+
+ /*
+ * Mode arguments of xxx_hrtimer functions:
++ *
++ * HRTIMER_MODE_ABS - Time value is absolute
++ * HRTIMER_MODE_REL - Time value is relative to now
++ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
++ * when starting the timer)
+ */
+ enum hrtimer_mode {
+- HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */
+- HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */
+- HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */
+- HRTIMER_MODE_ABS_PINNED = 0x02,
+- HRTIMER_MODE_REL_PINNED = 0x03,
++ HRTIMER_MODE_ABS = 0x00,
++ HRTIMER_MODE_REL = 0x01,
++ HRTIMER_MODE_PINNED = 0x02,
++
++ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
++ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+ };
+
+ /*
diff --git a/patches/0008-tracing-hrtimer-Take-all-clock-bases-and-modes-into-.patch b/patches/0008-tracing-hrtimer-Take-all-clock-bases-and-modes-into-.patch
new file mode 100644
index 000000000000..041086cf00bb
--- /dev/null
+++ b/patches/0008-tracing-hrtimer-Take-all-clock-bases-and-modes-into-.patch
@@ -0,0 +1,55 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:45 +0200
+Subject: [PATCH 08/36] tracing: hrtimer: Take all clock bases and modes into
+ account
+
+So far only CLOCK_MONOTONIC and CLOCK_REALTIME were taken into account as
+well as HRTIMER_MODE_ABS/REL in hrtimer_init tracepoint. The query for
+detecting timer mode ABS or REL is not valid, since the introduction of
+HRTIMER_MODE_PINNED.
+
+HRTIMER_MODE_PINNED is not evaluated in hrtimer_init() call. But for the
+sake of completeness print all given modes.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/trace/events/timer.h | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -136,6 +136,20 @@ DEFINE_EVENT(timer_class, timer_cancel,
+ TP_ARGS(timer)
+ );
+
++#define decode_clockid(type) \
++ __print_symbolic(type, \
++ { CLOCK_REALTIME, "CLOCK_REALTIME" }, \
++ { CLOCK_MONOTONIC, "CLOCK_MONOTONIC" }, \
++ { CLOCK_BOOTTIME, "CLOCK_BOOTTIME" }, \
++ { CLOCK_TAI, "CLOCK_TAI" })
++
++#define decode_hrtimer_mode(mode) \
++ __print_symbolic(mode, \
++ { HRTIMER_MODE_ABS, "ABS" }, \
++ { HRTIMER_MODE_REL, "REL" }, \
++ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
++ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
++
+ /**
+ * hrtimer_init - called when the hrtimer is initialized
+ * @hrtimer: pointer to struct hrtimer
+@@ -162,10 +176,8 @@ TRACE_EVENT(hrtimer_init,
+ ),
+
+ TP_printk("hrtimer=%p clockid=%s mode=%s", __entry->hrtimer,
+- __entry->clockid == CLOCK_REALTIME ?
+- "CLOCK_REALTIME" : "CLOCK_MONOTONIC",
+- __entry->mode == HRTIMER_MODE_ABS ?
+- "HRTIMER_MODE_ABS" : "HRTIMER_MODE_REL")
++ decode_clockid(__entry->clockid),
++ decode_hrtimer_mode(__entry->mode))
+ );
+
+ /**
diff --git a/patches/0009-tracing-hrtimer-Print-hrtimer-mode-in-hrtimer_start-.patch b/patches/0009-tracing-hrtimer-Print-hrtimer-mode-in-hrtimer_start-.patch
new file mode 100644
index 000000000000..348092156048
--- /dev/null
+++ b/patches/0009-tracing-hrtimer-Print-hrtimer-mode-in-hrtimer_start-.patch
@@ -0,0 +1,113 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:46 +0200
+Subject: [PATCH 09/36] tracing: hrtimer: Print hrtimer mode in hrtimer_start
+ tracepoint
+
+The hrtimer_start tracepoint lacks the mode information. The mode is
+important because consecutive starts can switch from ABS to REL or from
+PINNED to non PINNED.
+
+Add the mode information.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/trace/events/timer.h | 13 ++++++++-----
+ kernel/time/hrtimer.c | 16 +++++++++-------
+ 2 files changed, 17 insertions(+), 12 deletions(-)
+
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -186,15 +186,16 @@ TRACE_EVENT(hrtimer_init,
+ */
+ TRACE_EVENT(hrtimer_start,
+
+- TP_PROTO(struct hrtimer *hrtimer),
++ TP_PROTO(struct hrtimer *hrtimer, enum hrtimer_mode mode),
+
+- TP_ARGS(hrtimer),
++ TP_ARGS(hrtimer, mode),
+
+ TP_STRUCT__entry(
+ __field( void *, hrtimer )
+ __field( void *, function )
+ __field( s64, expires )
+ __field( s64, softexpires )
++ __field( enum hrtimer_mode, mode )
+ ),
+
+ TP_fast_assign(
+@@ -202,12 +203,14 @@ TRACE_EVENT(hrtimer_start,
+ __entry->function = hrtimer->function;
+ __entry->expires = hrtimer_get_expires(hrtimer);
+ __entry->softexpires = hrtimer_get_softexpires(hrtimer);
++ __entry->mode = mode;
+ ),
+
+- TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
+- __entry->hrtimer, __entry->function,
++ TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu "
++ "mode=%s", __entry->hrtimer, __entry->function,
+ (unsigned long long) __entry->expires,
+- (unsigned long long) __entry->softexpires)
++ (unsigned long long) __entry->softexpires,
++ decode_hrtimer_mode(__entry->mode))
+ );
+
+ /**
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -435,10 +435,11 @@ debug_init(struct hrtimer *timer, clocki
+ trace_hrtimer_init(timer, clockid, mode);
+ }
+
+-static inline void debug_activate(struct hrtimer *timer)
++static inline void debug_activate(struct hrtimer *timer,
++ enum hrtimer_mode mode)
+ {
+ debug_hrtimer_activate(timer);
+- trace_hrtimer_start(timer);
++ trace_hrtimer_start(timer, mode);
+ }
+
+ static inline void debug_deactivate(struct hrtimer *timer)
+@@ -830,9 +831,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
+ * Returns 1 when the new timer is the leftmost timer in the tree.
+ */
+ static int enqueue_hrtimer(struct hrtimer *timer,
+- struct hrtimer_clock_base *base)
++ struct hrtimer_clock_base *base,
++ enum hrtimer_mode mode)
+ {
+- debug_activate(timer);
++ debug_activate(timer, mode);
+
+ base->cpu_base->active_bases |= 1 << base->index;
+
+@@ -955,7 +957,7 @@ void hrtimer_start_range_ns(struct hrtim
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+- leftmost = enqueue_hrtimer(timer, new_base);
++ leftmost = enqueue_hrtimer(timer, new_base, mode);
+ if (!leftmost)
+ goto unlock;
+
+@@ -1224,7 +1226,7 @@ static void __run_hrtimer(struct hrtimer
+ */
+ if (restart != HRTIMER_NORESTART &&
+ !(timer->state & HRTIMER_STATE_ENQUEUED))
+- enqueue_hrtimer(timer, base);
++ enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
+
+ /*
+ * Separate the ->running assignment from the ->state assignment.
+@@ -1623,7 +1625,7 @@ static void migrate_hrtimer_list(struct
+ * sort out already expired timers and reprogram the
+ * event device.
+ */
+- enqueue_hrtimer(timer, new_base);
++ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
+ }
+ }
+
diff --git a/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch b/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
deleted file mode 100644
index 610f2a028705..000000000000
--- a/patches/0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
+++ /dev/null
@@ -1,96 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:06 +0000
-Subject: [PATCH 10/25] hrtimer: Make handling of hrtimer reprogramming and
- enqueuing not conditional
-
-The hrtimer_reprogramming, remote timer enqueuing and handling of the
-hrtimer_cpu_base struct member expires_next depend on the active high
-resolution timers. This makes the code harder to understand.
-
-To simplify the code, the hrtimer reprogramming is now executed
-independently except for the real reprogramming part. The expires_next
-stores now the first enqueued timer. Due to the adaption of the
-check_target function, remote enqueuing is now only possible when the
-expiry time is after the currently first expiry time independent of the
-active high resolution timers.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 31 ++++++++++++-------------------
- 1 file changed, 12 insertions(+), 19 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -154,10 +154,11 @@ struct hrtimer_clock_base *lock_hrtimer_
- }
-
- /*
-- * With high resolution timers enabled we do not migrate the timer
-- * when it is expiring before the next event on the target cpu because
-- * we cannot reprogram the target cpu hardware and we would cause it
-- * to fire late.
-+ * We do not migrate the timer when it is expiring before the next
-+ * event on the target cpu. When high resolution is enabled, we cannot
-+ * reprogram the target cpu hardware and we would cause it to fire
-+ * late. To keep it simple, we handle the high resolution enabled and
-+ * disabled case similar.
- *
- * Called with cpu_base->lock of target cpu held.
- */
-@@ -166,9 +167,6 @@ hrtimer_check_target(struct hrtimer *tim
- {
- ktime_t expires;
-
-- if (!new_base->cpu_base->hres_active)
-- return 0;
--
- expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
- return expires <= new_base->cpu_base->expires_next;
- }
-@@ -689,21 +687,24 @@ static void hrtimer_reprogram(struct hrt
-
- /* Update the pointer to the next expiring timer */
- hrtimer_update_next_timer(cpu_base, timer);
-+ cpu_base->expires_next = expires;
-
- /*
-+ * If hres is not active, hardware does not have to be
-+ * programmed yet.
-+ *
- * If a hang was detected in the last timer interrupt then we
- * do not schedule a timer which is earlier than the expiry
- * which we enforced in the hang detection. We want the system
- * to make progress.
- */
-- if (cpu_base->hang_detected)
-+ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
- return;
-
- /*
- * Program the timer hardware. We enforce the expiry for
- * events which are already in the past.
- */
-- cpu_base->expires_next = expires;
- tick_program_event(expires, 1);
- }
-
-@@ -943,16 +944,8 @@ void hrtimer_start_range_ns(struct hrtim
- if (!leftmost)
- goto unlock;
-
-- if (!hrtimer_is_hres_active(timer)) {
-- /*
-- * Kick to reschedule the next tick to handle the new timer
-- * on dynticks target.
-- */
-- if (new_base->cpu_base->nohz_active)
-- wake_up_nohz_cpu(new_base->cpu_base->cpu);
-- } else {
-- hrtimer_reprogram(timer, new_base);
-- }
-+ hrtimer_reprogram(timer, new_base);
-+
- unlock:
- unlock_hrtimer_base(timer, &flags);
- }
diff --git a/patches/0010-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch b/patches/0010-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
new file mode 100644
index 000000000000..97b6e8daf5a3
--- /dev/null
+++ b/patches/0010-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
@@ -0,0 +1,79 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:47 +0200
+Subject: [PATCH 10/36] hrtimer: Switch for loop to _ffs() evaluation
+
+Looping over all clock bases to find active bits is suboptimal if not all
+bases are active.
+
+Avoid this by converting it to a __ffs() evaluation. The functionallity is
+outsourced into an own function and is called via a macro as suggested by
+Peter Zijlstra.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 31 +++++++++++++++++++++----------
+ 1 file changed, 21 insertions(+), 10 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -457,20 +457,34 @@ static inline void hrtimer_update_next_t
+ #endif
+ }
+
++static struct hrtimer_clock_base *
++__next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
++{
++ struct hrtimer_clock_base *base = NULL;
++
++ if (*active) {
++ unsigned int idx = __ffs(*active);
++ *active &= ~(1U << idx);
++ base = &cpu_base->clock_base[idx];
++ }
++
++ return base;
++}
++
++#define for_each_active_base(base, cpu_base, active) \
++ while ((base = __next_base((cpu_base), &(active))))
++
+ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+ {
+- struct hrtimer_clock_base *base = cpu_base->clock_base;
++ struct hrtimer_clock_base *base;
+ unsigned int active = cpu_base->active_bases;
+ ktime_t expires, expires_next = KTIME_MAX;
+
+ hrtimer_update_next_timer(cpu_base, NULL);
+- for (; active; base++, active >>= 1) {
++ for_each_active_base(base, cpu_base, active) {
+ struct timerqueue_node *next;
+ struct hrtimer *timer;
+
+- if (!(active & 0x01))
+- continue;
+-
+ next = timerqueue_getnext(&base->active);
+ timer = container_of(next, struct hrtimer, node);
+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+@@ -1243,16 +1257,13 @@ static void __run_hrtimer(struct hrtimer
+
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
+ {
+- struct hrtimer_clock_base *base = cpu_base->clock_base;
++ struct hrtimer_clock_base *base;
+ unsigned int active = cpu_base->active_bases;
+
+- for (; active; base++, active >>= 1) {
++ for_each_active_base(base, cpu_base, active) {
+ struct timerqueue_node *node;
+ ktime_t basenow;
+
+- if (!(active & 0x01))
+- continue;
+-
+ basenow = ktime_add(now, base->offset);
+
+ while ((node = timerqueue_getnext(&base->active))) {
diff --git a/patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch b/patches/0011-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
index df4d12e75c08..4e4d6cd57c32 100644
--- a/patches/0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
+++ b/patches/0011-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
@@ -1,6 +1,6 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:04 +0000
-Subject: [PATCH 06/25] hrtimer: Store running timer in hrtimer_clock_base
+Date: Sun, 22 Oct 2017 23:39:48 +0200
+Subject: [PATCH 11/36] hrtimer: Store running timer in hrtimer_clock_base
The pointer to the currently running timer is stored in hrtimer_cpu_base
before the base lock is dropped and the callback is invoked.
@@ -21,7 +21,9 @@ kernels. Instead of having huge gaps due to alignment, remove the alignment
and let the compiler pack cpu base for 32bit. The resulting cache access
patterns are fortunately not really different from the current
behaviour. On 64bit kernels the 64byte alignment stays and the behaviour is
-unchanged.
+unchanged. This was determined by analyzing the resulting layout and
+looking at the number of cache lines involved for the frequently used
+clocks.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -32,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -112,9 +112,9 @@ struct hrtimer_sleeper {
+@@ -118,9 +118,9 @@ struct hrtimer_sleeper {
};
#ifdef CONFIG_64BIT
@@ -44,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
/**
-@@ -123,18 +123,22 @@ struct hrtimer_sleeper {
+@@ -129,18 +129,22 @@ struct hrtimer_sleeper {
* @index: clock type index for per_cpu support when moving a
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
@@ -69,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
enum hrtimer_base_type {
HRTIMER_BASE_MONOTONIC,
-@@ -148,8 +152,6 @@ enum hrtimer_base_type {
+@@ -154,8 +158,6 @@ enum hrtimer_base_type {
* struct hrtimer_cpu_base - the per cpu clock bases
* @lock: lock protecting the base and associated clock bases
* and timers
@@ -78,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @cpu: cpu number
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
-@@ -173,8 +175,6 @@ enum hrtimer_base_type {
+@@ -177,8 +179,6 @@ enum hrtimer_base_type {
*/
struct hrtimer_cpu_base {
raw_spinlock_t lock;
@@ -87,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned int cpu;
unsigned int active_bases;
unsigned int clock_was_set_seq;
-@@ -196,8 +196,6 @@ struct hrtimer_cpu_base {
+@@ -198,8 +198,6 @@ struct hrtimer_cpu_base {
static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
{
@@ -96,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
timer->node.expires = time;
timer->_softexpires = time;
}
-@@ -422,7 +420,7 @@ static inline int hrtimer_is_queued(stru
+@@ -424,7 +422,7 @@ static inline int hrtimer_is_queued(stru
*/
static inline int hrtimer_callback_running(struct hrtimer *timer)
{
@@ -123,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.clock_base = { { .cpu_base = &migration_cpu_base, }, },
};
-@@ -1136,19 +1134,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
+@@ -1150,19 +1148,19 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
*/
bool hrtimer_active(const struct hrtimer *timer)
{
@@ -149,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
}
-@@ -1182,16 +1180,16 @@ static void __run_hrtimer(struct hrtimer
+@@ -1196,16 +1194,16 @@ static void __run_hrtimer(struct hrtimer
lockdep_assert_held(&cpu_base->lock);
debug_deactivate(timer);
@@ -169,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
fn = timer->function;
-@@ -1232,13 +1230,13 @@ static void __run_hrtimer(struct hrtimer
+@@ -1246,13 +1244,13 @@ static void __run_hrtimer(struct hrtimer
* Separate the ->running assignment from the ->state assignment.
*
* As with a regular write barrier, this ensures the read side in
diff --git a/patches/0011-tracing-Remove-code-which-merges-duplicates.patch b/patches/0011-tracing-Remove-code-which-merges-duplicates.patch
index f668553f0471..a8d8c772b0e3 100644
--- a/patches/0011-tracing-Remove-code-which-merges-duplicates.patch
+++ b/patches/0011-tracing-Remove-code-which-merges-duplicates.patch
@@ -164,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cmp_entries_fn = cmp_entries_key;
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
-@@ -214,11 +214,6 @@ struct tracing_map {
+@@ -215,11 +215,6 @@ struct tracing_map {
* Element allocation occurs before tracing begins, when the
* tracing_map_init() call is made by client code.
*
@@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @elt_free: When a tracing_map_elt is freed, this function is called
* and allows client-allocated per-element data to be freed.
*
-@@ -232,8 +227,6 @@ struct tracing_map {
+@@ -233,8 +228,6 @@ struct tracing_map {
*/
struct tracing_map_ops {
int (*elt_alloc)(struct tracing_map_elt *elt);
diff --git a/patches/0012-hrtimer-Make-room-in-struct-hrtimer_cpu_base.patch b/patches/0012-hrtimer-Make-room-in-struct-hrtimer_cpu_base.patch
new file mode 100644
index 000000000000..ed464bdd0b67
--- /dev/null
+++ b/patches/0012-hrtimer-Make-room-in-struct-hrtimer_cpu_base.patch
@@ -0,0 +1,33 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:50 +0200
+Subject: [PATCH 12/36] hrtimer: Make room in struct hrtimer_cpu_base
+
+The upcoming softirq based hrtimers support requires an additional field in
+the hrtimer_cpu_base struct, which would grow the struct size beyond a
+cache line.
+
+The struct members nr_retries and nr_hangs of hrtimer_cpu_base are solely
+used for diagnostic output and have no requirement to be unsigned int.
+
+Make them unsigned short to create room for the new struct member. No
+functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -189,8 +189,8 @@ struct hrtimer_cpu_base {
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
+ unsigned int nr_events;
+- unsigned int nr_retries;
+- unsigned int nr_hangs;
++ unsigned short nr_retries;
++ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+ #endif
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
diff --git a/patches/0012-ring-buffer-Add-interface-for-setting-absolute-time-.patch b/patches/0012-ring-buffer-Add-interface-for-setting-absolute-time-.patch
index d8c4e03092db..dea5dbfe2bcd 100644
--- a/patches/0012-ring-buffer-Add-interface-for-setting-absolute-time-.patch
+++ b/patches/0012-ring-buffer-Add-interface-for-setting-absolute-time-.patch
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
-@@ -180,6 +180,8 @@ void ring_buffer_normalize_time_stamp(st
+@@ -181,6 +181,8 @@ void ring_buffer_normalize_time_stamp(st
int cpu, u64 *ts);
void ring_buffer_set_clock(struct ring_buffer *buffer,
u64 (*clock)(void));
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline unsigned long rb_page_entries(struct buffer_page *bpage)
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2266,7 +2266,7 @@ trace_event_buffer_lock_reserve(struct r
+@@ -2269,7 +2269,7 @@ trace_event_buffer_lock_reserve(struct r
*current_rb = trace_file->tr->trace_buffer.buffer;
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
(EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
(entry = this_cpu_read(trace_buffered_event))) {
/* Try to use the per cpu buffer first */
-@@ -6287,6 +6287,44 @@ static int tracing_clock_open(struct ino
+@@ -6297,6 +6297,44 @@ static int tracing_clock_open(struct ino
return ret;
}
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct trace_iterator iter;
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -271,6 +271,7 @@ struct trace_array {
+@@ -273,6 +273,7 @@ struct trace_array {
/* function tracing enabled */
int function_enabled;
#endif
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
enum {
-@@ -284,6 +285,8 @@ extern struct mutex trace_types_lock;
+@@ -286,6 +287,8 @@ extern struct mutex trace_types_lock;
extern int trace_array_get(struct trace_array *tr);
extern void trace_array_put(struct trace_array *tr);
diff --git a/patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch b/patches/0013-hrtimer-Reduce-conditional-code-hres_active.patch
index 4eb2fb772577..7fc249a0ec88 100644
--- a/patches/0007-hrtimer-Reduce-conditional-code-hres_active.patch
+++ b/patches/0013-hrtimer-Reduce-conditional-code-hres_active.patch
@@ -1,28 +1,42 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:05 +0000
-Subject: [PATCH 07/25] hrtimer: Reduce conditional code (hres_active)
+Date: Sun, 22 Oct 2017 23:39:51 +0200
+Subject: [PATCH 13/36] hrtimer: Reduce conditional code (hres_active)
The hrtimer_cpu_base struct has the CONFIG_HIGH_RES_TIMERS conditional
struct member hres_active. All related functions to this member are
conditional as well.
-There is no functional change, when the hres_active member is unconditional
-with all related functions and is set to zero during initialization. This
-makes the code easier to read.
+There is no functional change, when the hres_active member is
+unconditional with all related functions and is set to zero during
+initialization.
+The conditional code sections can be avoided by adding IS_ENABLED(HIGHRES)
+conditionals into common functions, which ensures dead code elimination.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/hrtimer.h | 17 ++++++-----------
- kernel/time/hrtimer.c | 30 ++++++++++++++----------------
- 2 files changed, 20 insertions(+), 27 deletions(-)
+ include/linux/hrtimer.h | 20 ++++++++------------
+ kernel/time/hrtimer.c | 31 +++++++++++++++----------------
+ 2 files changed, 23 insertions(+), 28 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -180,9 +180,9 @@ struct hrtimer_cpu_base {
+@@ -161,8 +161,8 @@ enum hrtimer_base_type {
+ * @cpu: cpu number
+ * @active_bases: Bitfield to mark bases with active timers
+ * @clock_was_set_seq: Sequence counter of clock was set events
+- * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hres_active: State of high resolution mode
++ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue
+@@ -182,9 +182,9 @@ struct hrtimer_cpu_base {
+ unsigned int cpu;
+ unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
+ unsigned int hres_active : 1;
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
@@ -30,29 +44,30 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hang_detected : 1;
ktime_t expires_next;
struct hrtimer *next_timer;
-@@ -264,16 +264,16 @@ static inline ktime_t hrtimer_cb_get_tim
+@@ -266,16 +266,17 @@ static inline ktime_t hrtimer_cb_get_tim
return timer->base->get_time();
}
--#ifdef CONFIG_HIGH_RES_TIMERS
--struct clock_event_device;
--
--extern void hrtimer_interrupt(struct clock_event_device *dev);
--
- static inline int hrtimer_is_hres_active(struct hrtimer *timer)
- {
- return timer->base->cpu_base->hres_active;
- }
-
-+#ifdef CONFIG_HIGH_RES_TIMERS
-+struct clock_event_device;
-+
-+extern void hrtimer_interrupt(struct clock_event_device *dev);
++static inline int hrtimer_is_hres_active(struct hrtimer *timer)
++{
++ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
++ timer->base->cpu_base->hres_active : 0;
++}
+
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ struct clock_event_device;
+
+ extern void hrtimer_interrupt(struct clock_event_device *dev);
+
+-static inline int hrtimer_is_hres_active(struct hrtimer *timer)
+-{
+- return timer->base->cpu_base->hres_active;
+-}
+-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
-@@ -296,11 +296,6 @@ extern unsigned int hrtimer_resolution;
+@@ -298,11 +299,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
@@ -66,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -505,6 +505,19 @@ static inline ktime_t hrtimer_update_bas
+@@ -512,6 +512,20 @@ static inline ktime_t hrtimer_update_bas
offs_real, offs_boot, offs_tai);
}
@@ -75,7 +90,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ */
+static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+{
-+ return cpu_base->hres_active;
++ return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ?
++ cpu_base->hres_active : 0;
+}
+
+static inline int hrtimer_hres_active(void)
@@ -86,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* High resolution timer related functions */
#ifdef CONFIG_HIGH_RES_TIMERS
-@@ -534,19 +547,6 @@ static inline int hrtimer_is_hres_enable
+@@ -541,19 +555,6 @@ static inline int hrtimer_is_hres_enable
}
/*
@@ -106,7 +122,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Reprogram the event source with checking both queues for the
* next event
* Called with interrupts disabled and base->lock held
-@@ -654,7 +654,6 @@ static void hrtimer_reprogram(struct hrt
+@@ -661,7 +662,6 @@ static void hrtimer_reprogram(struct hrt
static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
{
base->expires_next = KTIME_MAX;
@@ -114,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -713,8 +712,6 @@ void clock_was_set_delayed(void)
+@@ -720,8 +720,6 @@ void clock_was_set_delayed(void)
#else
@@ -123,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int hrtimer_is_hres_enabled(void) { return 0; }
static inline void hrtimer_switch_to_hres(void) { }
static inline void
-@@ -1592,6 +1589,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1602,6 +1600,7 @@ int hrtimers_prepare_cpu(unsigned int cp
}
cpu_base->cpu = cpu;
diff --git a/patches/0013-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch b/patches/0013-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
index 8c4281b3bec6..888a6e06a64e 100644
--- a/patches/0013-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
+++ b/patches/0013-ring-buffer-Redefine-the-unimplemented-RINGBUF_TIME_.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
-@@ -36,10 +36,12 @@ struct ring_buffer_event {
+@@ -37,10 +37,12 @@ struct ring_buffer_event {
* array[0] = time delta (28 .. 59)
* size = 8 bytes
*
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
* Data record
-@@ -56,12 +58,12 @@ enum ring_buffer_type {
+@@ -57,12 +59,12 @@ enum ring_buffer_type {
RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
diff --git a/patches/0014-hrtimer-Use-accesor-functions-instead-of-direct-acce.patch b/patches/0014-hrtimer-Use-accesor-functions-instead-of-direct-acce.patch
new file mode 100644
index 000000000000..335770ad645b
--- /dev/null
+++ b/patches/0014-hrtimer-Use-accesor-functions-instead-of-direct-acce.patch
@@ -0,0 +1,35 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:52 +0200
+Subject: [PATCH 14/36] hrtimer: Use accesor functions instead of direct access
+
+__hrtimer_hres_active() is now available unconditionally. Replace the
+direct access to hrtimer_cpu_base.hres_active.
+
+No functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -564,7 +564,7 @@ hrtimer_force_reprogram(struct hrtimer_c
+ {
+ ktime_t expires_next;
+
+- if (!cpu_base->hres_active)
++ if (!__hrtimer_hres_active(cpu_base))
+ return;
+
+ expires_next = __hrtimer_get_next_event(cpu_base);
+@@ -673,7 +673,7 @@ static void retrigger_next_event(void *a
+ {
+ struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
+
+- if (!base->hres_active)
++ if (!__hrtimer_hres_active(base))
+ return;
+
+ raw_spin_lock(&base->lock);
diff --git a/patches/0014-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch b/patches/0014-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
index 432868e3b171..31000ee7b308 100644
--- a/patches/0014-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
+++ b/patches/0014-tracing-Give-event-triggers-access-to-ring_buffer_ev.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -403,11 +403,13 @@ enum event_trigger_type {
+@@ -402,11 +402,13 @@ enum event_trigger_type {
extern int filter_match_preds(struct event_filter *filter, void *rec);
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
-@@ -427,7 +429,7 @@ trace_trigger_soft_disabled(struct trace
+@@ -426,7 +428,7 @@ trace_trigger_soft_disabled(struct trace
if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
@@ -47,7 +47,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (eflags & EVENT_FILE_FL_PID_FILTER)
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -1292,7 +1292,7 @@ static inline bool
+@@ -1296,7 +1296,7 @@ static inline bool
unsigned long eflags = file->flags;
if (eflags & EVENT_FILE_FL_TRIGGER_COND)
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags) ||
(unlikely(file->flags & EVENT_FILE_FL_FILTERED) &&
-@@ -1329,7 +1329,7 @@ event_trigger_unlock_commit(struct trace
+@@ -1333,7 +1333,7 @@ event_trigger_unlock_commit(struct trace
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1362,7 +1362,7 @@ event_trigger_unlock_commit_regs(struct
+@@ -1366,7 +1366,7 @@ event_trigger_unlock_commit_regs(struct
irq_flags, pc, regs);
if (tt)
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
#define FILTER_PRED_INVALID ((unsigned short)-1)
-@@ -1587,7 +1587,8 @@ extern int register_trigger_hist_enable_
+@@ -1591,7 +1591,8 @@ extern int register_trigger_hist_enable_
*/
struct event_trigger_ops {
void (*func)(struct event_trigger_data *data,
diff --git a/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch b/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
deleted file mode 100644
index 688dabb3543d..000000000000
--- a/patches/0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
+++ /dev/null
@@ -1,119 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:08 +0000
-Subject: [PATCH 15/25] hrtimer: Add clock bases for soft irq context
-
-hrtimer callback functions are always executed in hard interrupt
-context. Users of hrtimer which need their timer function to be executed
-in soft interrupt context, make use of tasklets to get the proper context.
-
-Add additional clock bases for timers which must expire in softirq context,
-so the detour via the tasklet can be avoided. This is also required for RT,
-where the majority of hrtimer is moved into softirq context.
-
-Keep the new clockids internal to hrtimer for now, so they can't be
-accessed from other code until the rest of the changes is in place.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/hrtimer.h | 4 +++
- kernel/time/hrtimer.c | 56 ++++++++++++++++++++++++++++++++++++++++++------
- 2 files changed, 54 insertions(+), 6 deletions(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -145,6 +145,10 @@ enum hrtimer_base_type {
- HRTIMER_BASE_REALTIME,
- HRTIMER_BASE_BOOTTIME,
- HRTIMER_BASE_TAI,
-+ HRTIMER_BASE_MONOTONIC_SOFT,
-+ HRTIMER_BASE_REALTIME_SOFT,
-+ HRTIMER_BASE_BOOTTIME_SOFT,
-+ HRTIMER_BASE_TAI_SOFT,
- HRTIMER_MAX_CLOCK_BASES,
- };
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -60,6 +60,18 @@
- #include "tick-internal.h"
-
- /*
-+ * Clock ids for timers which expire in softirq context. These clock ids
-+ * are kernel internal and never exported to user space. Kept internal
-+ * until the rest of the functionality is in place.
-+ */
-+#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
-+
-+#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
-+
-+/*
- * The timer bases:
- *
- * There are more clockids than hrtimer bases. Thus, we index
-@@ -92,17 +104,43 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
- .clockid = CLOCK_TAI,
- .get_time = &ktime_get_clocktai,
- },
-+ {
-+ .index = HRTIMER_BASE_MONOTONIC_SOFT,
-+ .clockid = CLOCK_MONOTONIC_SOFT,
-+ .get_time = &ktime_get,
-+ },
-+ {
-+ .index = HRTIMER_BASE_REALTIME_SOFT,
-+ .clockid = CLOCK_REALTIME_SOFT,
-+ .get_time = &ktime_get_real,
-+ },
-+ {
-+ .index = HRTIMER_BASE_BOOTTIME_SOFT,
-+ .clockid = CLOCK_BOOTTIME_SOFT,
-+ .get_time = &ktime_get_boottime,
-+ },
-+ {
-+ .index = HRTIMER_BASE_TAI_SOFT,
-+ .clockid = CLOCK_TAI_SOFT,
-+ .get_time = &ktime_get_clocktai,
-+ },
- }
- };
-
--static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
-+#define MAX_CLOCKS_HRT (MAX_CLOCKS * 2)
-+
-+static const int hrtimer_clock_to_base_table[MAX_CLOCKS_HRT] = {
- /* Make sure we catch unsupported clockids */
-- [0 ... MAX_CLOCKS - 1] = HRTIMER_MAX_CLOCK_BASES,
-+ [0 ... MAX_CLOCKS_HRT - 1] = HRTIMER_MAX_CLOCK_BASES,
-
-- [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
-- [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
-- [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
-- [CLOCK_TAI] = HRTIMER_BASE_TAI,
-+ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
-+ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
-+ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
-+ [CLOCK_TAI] = HRTIMER_BASE_TAI,
-+ [CLOCK_REALTIME_SOFT] = HRTIMER_BASE_REALTIME_SOFT,
-+ [CLOCK_MONOTONIC_SOFT] = HRTIMER_BASE_MONOTONIC_SOFT,
-+ [CLOCK_BOOTTIME_SOFT] = HRTIMER_BASE_BOOTTIME_SOFT,
-+ [CLOCK_TAI_SOFT] = HRTIMER_BASE_TAI_SOFT,
- };
-
- /*
-@@ -1652,6 +1690,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
-
- void __init hrtimers_init(void)
- {
-+ /*
-+ * It is necessary, that the soft base mask is a single
-+ * bit.
-+ */
-+ BUILD_BUG_ON_NOT_POWER_OF_2(HRTIMER_BASE_SOFT_MASK);
-+
- hrtimers_prepare_cpu(smp_processor_id());
- }
-
diff --git a/patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch b/patches/0015-hrtimer-Make-the-remote-enqueue-check-unconditional.patch
index 07a336bf833a..145827479e79 100644
--- a/patches/0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch
+++ b/patches/0015-hrtimer-Make-the-remote-enqueue-check-unconditional.patch
@@ -1,69 +1,79 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:05 +0000
-Subject: [PATCH 08/25] hrtimer: Reduce conditional code (expires_next,
- next_timer)
+Date: Sun, 22 Oct 2017 23:39:53 +0200
+Subject: [PATCH 15/36] hrtimer: Make the remote enqueue check unconditional
-The hrtimer_cpu_base struct member expires_next and next_timer are
-conditional members (CONFIG_HIGH_RES_TIMERS). This makes the hrtimer code
-more complex and harder to understand than it actually is.
+hrtimer_cpu_base.expires_next is used to cache the next event armed in the
+timer hardware. The value is used to check whether an hrtimer can be
+enqueued remotely. If the new hrtimer is expiring before expires_next, then
+remote enqueue is not possible as the remote hrtimer hardware cannot be
+accessed for reprogramming to an earlier expiry time.
-Reduce the conditionals related to those two struct members.
+The remote enqueue check is currently conditional on
+CONFIG_HIGH_RES_TIMERS=y and hrtimer_cpu_base.hres_active. There is no
+compelling reason to make this conditional.
+
+Move hrtimer_cpu_base.expires_next out of the CONFIG_HIGH_RES_TIMERS=y
+guarded area and remove the conditionals in hrtimer_check_target().
+
+The check is currently a NOOP for the CONFIG_HIGH_RES_TIMERS=n and the
+!hrtimer_cpu_base.hres_active case because in these cases nothing updates
+hrtimer_cpu_base.expires_next yet. This will be changed with later patches
+which further reduce the #ifdef zoo in this code.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- include/linux/hrtimer.h | 10 +++++-----
- kernel/time/hrtimer.c | 24 +++++-------------------
- 2 files changed, 10 insertions(+), 24 deletions(-)
+ include/linux/hrtimer.h | 6 +++---
+ kernel/time/hrtimer.c | 26 ++++++--------------------
+ 2 files changed, 9 insertions(+), 23 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -160,13 +160,13 @@ enum hrtimer_base_type {
- * @in_hrtirq: hrtimer_interrupt() is currently executing
+@@ -164,13 +164,13 @@ enum hrtimer_base_type {
* @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
* @hang_detected: The last hrtimer interrupt detected a hang
- * @expires_next: absolute time of the next event, is required for remote
- * hrtimer enqueue
-- * @next_timer: Pointer to the first expiring timer
+ * @next_timer: Pointer to the first expiring timer
* @nr_events: Total number of hrtimer interrupt events
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue
-+ * @next_timer: Pointer to the first expiring timer
* @clock_base: array of clock bases for this cpu
*
* Note: next_timer is just an optimization for __remove_hrtimer().
-@@ -184,13 +184,13 @@ struct hrtimer_cpu_base {
+@@ -186,13 +186,13 @@ struct hrtimer_cpu_base {
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int in_hrtirq : 1,
hang_detected : 1;
- ktime_t expires_next;
-- struct hrtimer *next_timer;
+ struct hrtimer *next_timer;
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+ ktime_t expires_next;
-+ struct hrtimer *next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -154,16 +154,16 @@ struct hrtimer_clock_base *lock_hrtimer_
+@@ -154,26 +154,21 @@ struct hrtimer_clock_base *lock_hrtimer_
}
/*
- * With HIGHRES=y we do not migrate the timer when it is expiring
- * before the next event on the target cpu because we cannot reprogram
- * the target cpu hardware and we would cause it to fire late.
-+ * With high resolution timers enabled we do not migrate the timer
-+ * when it is expiring before the next event on the target cpu because
-+ * we cannot reprogram the target cpu hardware and we would cause it
-+ * to fire late.
++ * We do not migrate the timer when it is expiring before the next
++ * event on the target cpu. When high resolution is enabled, we cannot
++ * reprogram the target cpu hardware and we would cause it to fire
++ * late. To keep it simple, we handle the high resolution enabled and
++ * disabled case similar.
*
* Called with cpu_base->lock of target cpu held.
*/
@@ -73,9 +83,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#ifdef CONFIG_HIGH_RES_TIMERS
ktime_t expires;
- if (!new_base->cpu_base->hres_active)
-@@ -171,9 +171,6 @@ hrtimer_check_target(struct hrtimer *tim
-
+- if (!new_base->cpu_base->hres_active)
+- return 0;
+-
expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
return expires <= new_base->cpu_base->expires_next;
-#else
@@ -83,18 +93,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-#endif
}
- #ifdef CONFIG_NO_HZ_COMMON
-@@ -456,9 +453,7 @@ static inline void debug_deactivate(stru
- static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
- struct hrtimer *timer)
- {
--#ifdef CONFIG_HIGH_RES_TIMERS
- cpu_base->next_timer = timer;
--#endif
- }
-
- static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
-@@ -649,14 +644,6 @@ static void hrtimer_reprogram(struct hrt
+ static inline
+@@ -657,14 +652,6 @@ static void hrtimer_reprogram(struct hrt
}
/*
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Retrigger next event is called after clock was set
*
* Called with interrupts disabled via on_each_cpu()
-@@ -721,7 +708,6 @@ static inline int hrtimer_reprogram(stru
+@@ -729,7 +716,6 @@ static inline int hrtimer_reprogram(stru
{
return 0;
}
@@ -117,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void retrigger_next_event(void *arg) { }
#endif /* CONFIG_HIGH_RES_TIMERS */
-@@ -1590,7 +1576,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1601,7 +1587,7 @@ int hrtimers_prepare_cpu(unsigned int cp
cpu_base->cpu = cpu;
cpu_base->hres_active = 0;
diff --git a/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch b/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
deleted file mode 100644
index e2da5a4b28cc..000000000000
--- a/patches/0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
+++ /dev/null
@@ -1,77 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:08 +0000
-Subject: [PATCH 16/25] hrtimer: Allow function reuse for softirq based hrtimer
-
-The softirq based hrtimer can utilize most of the existing hrtimer
-functions, but need to operate on a different data set. Add an active_mask
-argument to various functions so the hard and soft bases can be
-selected. Fixup the existing callers and hand in the ACTIVE_HARD mask.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 20 +++++++++++++++-----
- 1 file changed, 15 insertions(+), 5 deletions(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -72,6 +72,14 @@
- #define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
-
- /*
-+ * Masks for selecting the soft and hard context timers from
-+ * cpu_base->active
-+ */
-+#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
-+#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
-+#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
-+
-+/*
- * The timer bases:
- *
- * There are more clockids than hrtimer bases. Thus, we index
-@@ -527,11 +535,12 @@ static ktime_t __hrtimer_next_event_base
-
- static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
- {
-- unsigned int active = cpu_base->active_bases;
-+ unsigned int active;
- ktime_t expires_next = KTIME_MAX;
-
- hrtimer_update_next_timer(cpu_base, NULL);
-
-+ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
- expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
-
- return expires_next;
-@@ -1264,9 +1273,10 @@ static void __run_hrtimer(struct hrtimer
- base->running = NULL;
- }
-
--static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
-+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
-+ unsigned int active_mask)
- {
-- unsigned int active = cpu_base->active_bases;
-+ unsigned int active = cpu_base->active_bases & active_mask;
-
- while (active) {
- unsigned int id = __ffs(active);
-@@ -1333,7 +1343,7 @@ void hrtimer_interrupt(struct clock_even
- */
- cpu_base->expires_next = KTIME_MAX;
-
-- __hrtimer_run_queues(cpu_base, now);
-+ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
-
- /* Reevaluate the clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
-@@ -1438,7 +1448,7 @@ void hrtimer_run_queues(void)
-
- raw_spin_lock(&cpu_base->lock);
- now = hrtimer_update_base(cpu_base);
-- __hrtimer_run_queues(cpu_base, now);
-+ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
- raw_spin_unlock(&cpu_base->lock);
- }
-
diff --git a/patches/0016-hrtimer-Make-hrtimer_cpu_base.next_timer-handling-un.patch b/patches/0016-hrtimer-Make-hrtimer_cpu_base.next_timer-handling-un.patch
new file mode 100644
index 000000000000..d094edb672d5
--- /dev/null
+++ b/patches/0016-hrtimer-Make-hrtimer_cpu_base.next_timer-handling-un.patch
@@ -0,0 +1,98 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:54 +0200
+Subject: [PATCH 16/36] hrtimer: Make hrtimer_cpu_base.next_timer handling
+ unconditional
+
+hrtimer_cpu_base.next_timer stores the pointer to the next expiring timer
+in a cpu base.
+
+This pointer cannot be dereferenced and is solely used to check whether a
+hrtimer which is removed is the hrtimer which is the first to expire in the
+CPU base. If this is the case, then the timer hardware needs to be
+reprogrammed to avoid an extra interrupt for nothing.
+
+Again, this is conditional functionality, but there is no compelling reason
+to make this conditional. As a preparation, hrtimer_cpu_base.next_timer
+needs to be available unconditonal. Aside of that the upcoming support for
+softirq based hrtimers requires access to this pointer unconditionally.
+
+Make the update of hrtimer_cpu_base.next_timer unconditional and remove the
+ifdef cruft. The impact on CONFIG_HIGH_RES_TIMERS=n && CONFIG_NOHZ=n is
+marginal as it's just a store on an already dirtied cacheline.
+
+No functional change.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 4 ++--
+ kernel/time/hrtimer.c | 12 ++----------
+ 2 files changed, 4 insertions(+), 12 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -164,13 +164,13 @@ enum hrtimer_base_type {
+ * @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
+- * @next_timer: Pointer to the first expiring timer
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+ * hrtimer enqueue
++ * @next_timer: Pointer to the first expiring timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+@@ -186,13 +186,13 @@ struct hrtimer_cpu_base {
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int in_hrtirq : 1,
+ hang_detected : 1;
+- struct hrtimer *next_timer;
+ unsigned int nr_events;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
+ unsigned int max_hang_time;
+ #endif
+ ktime_t expires_next;
++ struct hrtimer *next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ } ____cacheline_aligned;
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -442,14 +442,6 @@ static inline void debug_deactivate(stru
+ }
+
+ #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+-static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+- struct hrtimer *timer)
+-{
+-#ifdef CONFIG_HIGH_RES_TIMERS
+- cpu_base->next_timer = timer;
+-#endif
+-}
+-
+ static struct hrtimer_clock_base *
+ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
+ {
+@@ -473,7 +465,7 @@ static ktime_t __hrtimer_get_next_event(
+ unsigned int active = cpu_base->active_bases;
+ ktime_t expires, expires_next = KTIME_MAX;
+
+- hrtimer_update_next_timer(cpu_base, NULL);
++ cpu_base->next_timer = NULL;
+ for_each_active_base(base, cpu_base, active) {
+ struct timerqueue_node *next;
+ struct hrtimer *timer;
+@@ -483,7 +475,7 @@ static ktime_t __hrtimer_get_next_event(
+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+ if (expires < expires_next) {
+ expires_next = expires;
+- hrtimer_update_next_timer(cpu_base, timer);
++ cpu_base->next_timer = timer;
+ }
+ }
+ /*
diff --git a/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch b/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
deleted file mode 100644
index 219d41b13810..000000000000
--- a/patches/0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
+++ /dev/null
@@ -1,303 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:09 +0000
-Subject: [PATCH 17/25] hrtimer: Implementation of softirq hrtimer handling
-
-hrtimers are executed always in hard irq context. If a hrtimer callback
-function needs to be exectued in softirq context, the detour using tasklets
-is required. To facilitate this, also in regards to real time specific
-handling of hrtimers, new clock ids ease the use of hrtimers in softirq
-context.
-
-Every clock ID is available for soft and hard hrtimers. The hrtimers are
-handled the same way when they are enqueued. When the hrtimer_interrupt
-raises, a check is implemented, if the HRTIMER_SOFTIRQ has to be raised as
-well. If it is raised, the soft hrtimers are not taken into account when
-for example _hrtimer_get_next_event() is called. At the end of the softirq,
-all hrtimer_cpu_base struct members are updated, so that the soft hrtimers
-are also taken into account.
-
-Suggested-by: Thomas Gleixner <tglx@linutronix.de>
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/hrtimer.h | 8 ++-
- kernel/time/hrtimer.c | 125 ++++++++++++++++++++++++++++++++++++++++++++----
- 2 files changed, 122 insertions(+), 11 deletions(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -161,6 +161,8 @@ enum hrtimer_base_type {
- * @clock_was_set_seq: Sequence counter of clock was set events
- * @migration_enabled: The migration of hrtimers to other cpus is enabled
- * @nohz_active: The nohz functionality is enabled
-+ * @softirq_activated: displays, if the softirq is raised - update of softirq
-+ * related settings is not required then.
- * @in_hrtirq: hrtimer_interrupt() is currently executing
- * @hres_active: State of high resolution mode
- * @hang_detected: The last hrtimer interrupt detected a hang
-@@ -169,8 +171,10 @@ enum hrtimer_base_type {
- * @nr_hangs: Total number of hrtimer interrupt hangs
- * @max_hang_time: Maximum time spent in hrtimer_interrupt
- * @expires_next: absolute time of the next event, is required for remote
-- * hrtimer enqueue
-+ * hrtimer enqueue; it is the total first expiry time (hard
-+ * and soft hrtimer are taken into account)
- * @next_timer: Pointer to the first expiring timer
-+ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
- * @clock_base: array of clock bases for this cpu
- *
- * Note: next_timer is just an optimization for __remove_hrtimer().
-@@ -184,6 +188,7 @@ struct hrtimer_cpu_base {
- unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
-+ bool softirq_activated;
- unsigned int hres_active : 1,
- in_hrtirq : 1,
- hang_detected : 1;
-@@ -195,6 +200,7 @@ struct hrtimer_cpu_base {
- #endif
- ktime_t expires_next;
- struct hrtimer *next_timer;
-+ ktime_t softirq_expires_next;
- struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
- } ____cacheline_aligned;
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -499,7 +499,6 @@ static inline void hrtimer_update_next_t
- cpu_base->next_timer = timer;
- }
-
--#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
- static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
- unsigned int active,
- ktime_t expires_next)
-@@ -540,12 +539,23 @@ static ktime_t __hrtimer_get_next_event(
-
- hrtimer_update_next_timer(cpu_base, NULL);
-
-+ if (!cpu_base->softirq_activated) {
-+ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
-+ expires_next = __hrtimer_next_event_base(cpu_base, active,
-+ expires_next);
-+ cpu_base->softirq_expires_next = expires_next;
-+ }
-+
- active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
- expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
-
-+ /*
-+ * cpu_base->expires_next is not updated here. It is set only
-+ * in hrtimer_reprogramming path!
-+ */
-+
- return expires_next;
- }
--#endif
-
- static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
- {
-@@ -969,6 +979,49 @@ static inline ktime_t hrtimer_update_low
- return tim;
- }
-
-+static void hrtimer_reprogram_softirq(struct hrtimer *timer)
-+{
-+ struct hrtimer_clock_base *base = timer->base;
-+ struct hrtimer_cpu_base *cpu_base = base->cpu_base;
-+ ktime_t expires;
-+
-+ /*
-+ * The softirq timer is not rearmed, when the softirq was raised
-+ * and has not yet run to completion.
-+ */
-+ if (cpu_base->softirq_activated)
-+ return;
-+
-+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-+
-+ if (!ktime_before(expires, cpu_base->softirq_expires_next))
-+ return;
-+
-+ cpu_base->softirq_expires_next = expires;
-+
-+ if (!ktime_before(expires, cpu_base->expires_next))
-+ return;
-+ hrtimer_reprogram(timer);
-+}
-+
-+static void hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base,
-+ bool reprogram)
-+{
-+ ktime_t expires;
-+
-+ expires = __hrtimer_get_next_event(cpu_base);
-+
-+ if (!reprogram || !ktime_before(expires, cpu_base->expires_next))
-+ return;
-+ /*
-+ * next_timer can be used here, because
-+ * hrtimer_get_next_event() updated the next
-+ * timer. expires_next is only set when reprogramming function
-+ * is called.
-+ */
-+ hrtimer_reprogram(cpu_base->next_timer);
-+}
-+
- static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- u64 delta_ns, const enum hrtimer_mode mode,
- struct hrtimer_clock_base *base)
-@@ -1007,9 +1060,12 @@ void hrtimer_start_range_ns(struct hrtim
-
- base = lock_hrtimer_base(timer, &flags);
-
-- if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
-- hrtimer_reprogram(timer);
--
-+ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base)) {
-+ if (timer->base->index < HRTIMER_BASE_MONOTONIC_SOFT)
-+ hrtimer_reprogram(timer);
-+ else
-+ hrtimer_reprogram_softirq(timer);
-+ }
- unlock_hrtimer_base(timer, &flags);
- }
- EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
-@@ -1206,7 +1262,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
-
- static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
- struct hrtimer_clock_base *base,
-- struct hrtimer *timer, ktime_t *now)
-+ struct hrtimer *timer, ktime_t *now,
-+ bool hardirq)
- {
- enum hrtimer_restart (*fn)(struct hrtimer *);
- int restart;
-@@ -1241,11 +1298,19 @@ static void __run_hrtimer(struct hrtimer
- * protected against migration to a different CPU even if the lock
- * is dropped.
- */
-- raw_spin_unlock(&cpu_base->lock);
-+ if (hardirq)
-+ raw_spin_unlock(&cpu_base->lock);
-+ else
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+
- trace_hrtimer_expire_entry(timer, now);
- restart = fn(timer);
- trace_hrtimer_expire_exit(timer);
-- raw_spin_lock(&cpu_base->lock);
-+
-+ if (hardirq)
-+ raw_spin_lock(&cpu_base->lock);
-+ else
-+ raw_spin_lock_irq(&cpu_base->lock);
-
- /*
- * Note: We clear the running state after enqueue_hrtimer and
-@@ -1309,11 +1374,28 @@ static void __hrtimer_run_queues(struct
- if (basenow < hrtimer_get_softexpires_tv64(timer))
- break;
-
-- __run_hrtimer(cpu_base, base, timer, &basenow);
-+ __run_hrtimer(cpu_base, base, timer, &basenow,
-+ active_mask == HRTIMER_ACTIVE_HARD);
- }
- }
- }
-
-+static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
-+{
-+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-+ ktime_t now;
-+
-+ raw_spin_lock_irq(&cpu_base->lock);
-+
-+ now = hrtimer_update_base(cpu_base);
-+ __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_SOFT);
-+
-+ cpu_base->softirq_activated = 0;
-+ hrtimer_update_softirq_timer(cpu_base, true);
-+
-+ raw_spin_unlock_irq(&cpu_base->lock);
-+}
-+
- #ifdef CONFIG_HIGH_RES_TIMERS
-
- /*
-@@ -1343,9 +1425,15 @@ void hrtimer_interrupt(struct clock_even
- */
- cpu_base->expires_next = KTIME_MAX;
-
-+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
-+ cpu_base->softirq_expires_next = KTIME_MAX;
-+ cpu_base->softirq_activated = 1;
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ }
-+
- __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
-
-- /* Reevaluate the clock bases for the next expiry */
-+ /* Reevaluate the hard interrupt clock bases for the next expiry */
- expires_next = __hrtimer_get_next_event(cpu_base);
- /*
- * Store the new expiry value so the migration code can verify
-@@ -1448,6 +1536,13 @@ void hrtimer_run_queues(void)
-
- raw_spin_lock(&cpu_base->lock);
- now = hrtimer_update_base(cpu_base);
-+
-+ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
-+ cpu_base->softirq_expires_next = KTIME_MAX;
-+ cpu_base->softirq_activated = 1;
-+ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-+ }
-+
- __hrtimer_run_queues(cpu_base, now, HRTIMER_ACTIVE_HARD);
- raw_spin_unlock(&cpu_base->lock);
- }
-@@ -1629,6 +1724,7 @@ int hrtimers_prepare_cpu(unsigned int cp
- cpu_base->cpu = cpu;
- cpu_base->hres_active = 0;
- cpu_base->expires_next = KTIME_MAX;
-+ cpu_base->softirq_expires_next = KTIME_MAX;
- return 0;
- }
-
-@@ -1672,6 +1768,7 @@ int hrtimers_dead_cpu(unsigned int scpu)
- BUG_ON(cpu_online(scpu));
- tick_cancel_sched_timer(scpu);
-
-+ local_bh_disable();
- local_irq_disable();
- old_base = &per_cpu(hrtimer_bases, scpu);
- new_base = this_cpu_ptr(&hrtimer_bases);
-@@ -1687,12 +1784,19 @@ int hrtimers_dead_cpu(unsigned int scpu)
- &new_base->clock_base[i]);
- }
-
-+ /*
-+ * The migration might have changed the first expiring softirq
-+ * timer on this CPU. Update it.
-+ */
-+ hrtimer_update_softirq_timer(new_base, false);
-+
- raw_spin_unlock(&old_base->lock);
- raw_spin_unlock(&new_base->lock);
-
- /* Check, if we got expired work to do */
- __hrtimer_peek_ahead_timers();
- local_irq_enable();
-+ local_bh_enable();
- return 0;
- }
-
-@@ -1707,6 +1811,7 @@ void __init hrtimers_init(void)
- BUILD_BUG_ON_NOT_POWER_OF_2(HRTIMER_BASE_SOFT_MASK);
-
- hrtimers_prepare_cpu(smp_processor_id());
-+ open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
- }
-
- /**
diff --git a/patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch b/patches/0017-hrtimer-Make-hrtimer_reprogramm-unconditional.patch
index e5d6ac5f2f9f..182cf6b1db28 100644
--- a/patches/0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch
+++ b/patches/0017-hrtimer-Make-hrtimer_reprogramm-unconditional.patch
@@ -1,29 +1,29 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:05 +0000
-Subject: [PATCH 09/25] hrtimer: Reduce conditional code (hrtimer_reprogram())
+Date: Sun, 22 Oct 2017 23:39:55 +0200
+Subject: [PATCH 17/36] hrtimer: Make hrtimer_reprogramm() unconditional
-The hrtimer_reprogram() is currently required only when
-CONFIG_HIGH_RES_TIMERS is set. Additional bitfields of hrtimer_cpu_base
-struct are high resolution timer specific as well.
+hrtimer_reprogram() needs to be available unconditionally for softirq based
+hrtimers. Move the function and all required struct members out of the
+CONFIG_HIGH_RES_TIMERS #ifdef.
-To simplify the hrtimer code, the behaviour of CONFIG_HIGH_RES_TIMERS and
-!CONFIG_HIGH_RES_TIMERS should be similar. As preparation for this, the
-function hrtimer_reprogram() and required hrtimer_cpu_base struct members
-are moved outside the conditional area.
+There is no functional change because hrtimer_reprogram() is only invoked
+when hrtimer_cpu_base.hres_active is true. Making it unconditional
+increases the text size for the CONFIG_HIGH_RES_TIMERS=n case, but avoids
+replication of that code for the upcoming softirq based hrtimers support.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/hrtimer.h | 6 +-
- kernel/time/hrtimer.c | 131 +++++++++++++++++++++++-------------------------
- 2 files changed, 66 insertions(+), 71 deletions(-)
+ kernel/time/hrtimer.c | 129 +++++++++++++++++++++++-------------------------
+ 2 files changed, 65 insertions(+), 70 deletions(-)
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -180,10 +180,10 @@ struct hrtimer_cpu_base {
+@@ -182,10 +182,10 @@ struct hrtimer_cpu_base {
+ unsigned int cpu;
+ unsigned int active_bases;
unsigned int clock_was_set_seq;
- bool migration_enabled;
- bool nohz_active;
- unsigned int hres_active : 1;
-#ifdef CONFIG_HIGH_RES_TIMERS
- unsigned int in_hrtirq : 1,
@@ -32,25 +32,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hang_detected : 1;
+#ifdef CONFIG_HIGH_RES_TIMERS
unsigned int nr_events;
- unsigned int nr_retries;
- unsigned int nr_hangs;
+ unsigned short nr_retries;
+ unsigned short nr_hangs;
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -449,13 +449,13 @@ static inline void debug_deactivate(stru
- trace_hrtimer_cancel(timer);
- }
-
--#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
- static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
- struct hrtimer *timer)
- {
- cpu_base->next_timer = timer;
- }
-
-+#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
- static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
- {
- unsigned int active = cpu_base->active_bases;
@@ -582,68 +582,6 @@ hrtimer_force_reprogram(struct hrtimer_c
}
@@ -97,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- return;
-
- /* Update the pointer to the next expiring timer */
-- hrtimer_update_next_timer(cpu_base, timer);
+- cpu_base->next_timer = timer;
-
- /*
- * If a hang was detected in the last timer interrupt then we
@@ -176,7 +161,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return;
+
+ /* Update the pointer to the next expiring timer */
-+ hrtimer_update_next_timer(cpu_base, timer);
++ cpu_base->next_timer = timer;
+
+ /*
+ * If a hang was detected in the last timer interrupt then we
diff --git a/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch b/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
deleted file mode 100644
index 5b0372fd610f..000000000000
--- a/patches/0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:09 +0000
-Subject: [PATCH 18/25] hrtimer: Enable soft and hard hrtimer
-
-Move the definition of the clock ids, to be available not only
-internal. The transition between clock id and hrtimer base is now
-expanded by the soft hrtimer bases and the corresponding clock
-ids. Update all hard hrtimer restricted queries to handle soft and
-hard hrtimers similarly.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/linux/hrtimer.h | 11 +++++++++++
- kernel/time/hrtimer.c | 22 +++++++---------------
- 2 files changed, 18 insertions(+), 15 deletions(-)
-
---- a/include/linux/hrtimer.h
-+++ b/include/linux/hrtimer.h
-@@ -23,6 +23,17 @@
- #include <linux/timer.h>
- #include <linux/timerqueue.h>
-
-+/*
-+ * Clock ids for hrtimers which expire in softirq context. These clock ids
-+ * are kernel internal and never exported to user space.
-+ */
-+#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
-+
-+#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
-+#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
-+
- struct hrtimer_clock_base;
- struct hrtimer_cpu_base;
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -60,18 +60,6 @@
- #include "tick-internal.h"
-
- /*
-- * Clock ids for timers which expire in softirq context. These clock ids
-- * are kernel internal and never exported to user space. Kept internal
-- * until the rest of the functionality is in place.
-- */
--#define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
--
--#define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
--#define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
--#define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
--#define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
--
--/*
- * Masks for selecting the soft and hard context timers from
- * cpu_base->active
- */
-@@ -1173,7 +1161,7 @@ u64 hrtimer_get_next_event(void)
-
- static inline int hrtimer_clockid_to_base(clockid_t clock_id)
- {
-- if (likely(clock_id < MAX_CLOCKS)) {
-+ if (likely(clock_id < MAX_CLOCKS_HRT)) {
- int base = hrtimer_clock_to_base_table[clock_id];
-
- if (likely(base != HRTIMER_MAX_CLOCK_BASES))
-@@ -1193,8 +1181,12 @@ static void __hrtimer_init(struct hrtime
-
- cpu_base = raw_cpu_ptr(&hrtimer_bases);
-
-- if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
-- clock_id = CLOCK_MONOTONIC;
-+ if (mode != HRTIMER_MODE_ABS) {
-+ if (clock_id == CLOCK_REALTIME)
-+ clock_id = CLOCK_MONOTONIC;
-+ else if (clock_id == CLOCK_REALTIME_SOFT)
-+ clock_id = CLOCK_MONOTONIC_SOFT;
-+ }
-
- base = hrtimer_clockid_to_base(clock_id);
- timer->base = &cpu_base->clock_base[base];
diff --git a/patches/0018-hrtimer-Reduce-conditional-code-and-make-hrtimer_for.patch b/patches/0018-hrtimer-Reduce-conditional-code-and-make-hrtimer_for.patch
new file mode 100644
index 000000000000..e2e440513aed
--- /dev/null
+++ b/patches/0018-hrtimer-Reduce-conditional-code-and-make-hrtimer_for.patch
@@ -0,0 +1,104 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:56 +0200
+Subject: [PATCH 18/36] hrtimer: Reduce conditional code and make
+ hrtimer_force_reprogramm() unconditional
+
+hrtimer_force_reprogram() needs to be available unconditionally for softirq
+based hrtimers. Move the function and all required struct members out of
+the CONFIG_HIGH_RES_TIMERS #ifdef.
+
+There is no functional change because hrtimer_force_reprogram() is
+only invoked when hrtimer_cpu_base.hres_active is true and
+CONFIG_HIGH_RES_TIMERS=y. Making it unconditional increases the text
+size for the CONFIG_HIGH_RES_TIMERS=n case slightly, but avoids
+replication of that code for the upcoming softirq based hrtimers
+support. Most of the code gets eliminated in the
+CONFIG_HIGH_RES_TIMERS=n case by the compiler.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 58 ++++++++++++++++++++++++--------------------------
+ 1 file changed, 28 insertions(+), 30 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -513,34 +513,6 @@ static inline int hrtimer_hres_active(vo
+ return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
+ }
+
+-/* High resolution timer related functions */
+-#ifdef CONFIG_HIGH_RES_TIMERS
+-
+-/*
+- * High resolution timer enabled ?
+- */
+-static bool hrtimer_hres_enabled __read_mostly = true;
+-unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+-EXPORT_SYMBOL_GPL(hrtimer_resolution);
+-
+-/*
+- * Enable / Disable high resolution mode
+- */
+-static int __init setup_hrtimer_hres(char *str)
+-{
+- return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
+-}
+-
+-__setup("highres=", setup_hrtimer_hres);
+-
+-/*
+- * hrtimer_high_res_enabled - query, if the highres mode is enabled
+- */
+-static inline int hrtimer_is_hres_enabled(void)
+-{
+- return hrtimer_hres_enabled;
+-}
+-
+ /*
+ * Reprogram the event source with checking both queues for the
+ * next event
+@@ -581,6 +553,34 @@ hrtimer_force_reprogram(struct hrtimer_c
+ tick_program_event(cpu_base->expires_next, 1);
+ }
+
++/* High resolution timer related functions */
++#ifdef CONFIG_HIGH_RES_TIMERS
++
++/*
++ * High resolution timer enabled ?
++ */
++static bool hrtimer_hres_enabled __read_mostly = true;
++unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
++EXPORT_SYMBOL_GPL(hrtimer_resolution);
++
++/*
++ * Enable / Disable high resolution mode
++ */
++static int __init setup_hrtimer_hres(char *str)
++{
++ return (kstrtobool(str, &hrtimer_hres_enabled) == 0);
++}
++
++__setup("highres=", setup_hrtimer_hres);
++
++/*
++ * hrtimer_high_res_enabled - query, if the highres mode is enabled
++ */
++static inline int hrtimer_is_hres_enabled(void)
++{
++ return hrtimer_hres_enabled;
++}
++
+ /*
+ * Retrigger next event is called after clock was set
+ *
+@@ -639,8 +639,6 @@ void clock_was_set_delayed(void)
+
+ static inline int hrtimer_is_hres_enabled(void) { return 0; }
+ static inline void hrtimer_switch_to_hres(void) { }
+-static inline void
+-hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
+ static inline void retrigger_next_event(void *arg) { }
+
+ #endif /* CONFIG_HIGH_RES_TIMERS */
diff --git a/patches/0018-tracing-Add-per-element-variable-support-to-tracing_.patch b/patches/0018-tracing-Add-per-element-variable-support-to-tracing_.patch
index e1bf618e80dc..8e0ee7c7b958 100644
--- a/patches/0018-tracing-Add-per-element-variable-support-to-tracing_.patch
+++ b/patches/0018-tracing-Add-per-element-variable-support-to-tracing_.patch
@@ -171,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/tracing_map.h
+++ b/kernel/trace/tracing_map.h
-@@ -9,6 +9,7 @@
+@@ -10,6 +10,7 @@
#define TRACING_MAP_VALS_MAX 3
#define TRACING_MAP_FIELDS_MAX (TRACING_MAP_KEYS_MAX + \
TRACING_MAP_VALS_MAX)
@@ -179,7 +179,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define TRACING_MAP_SORT_KEYS_MAX 2
typedef int (*tracing_map_cmp_fn_t) (void *val_a, void *val_b);
-@@ -136,6 +137,8 @@ struct tracing_map_field {
+@@ -137,6 +138,8 @@ struct tracing_map_field {
struct tracing_map_elt {
struct tracing_map *map;
struct tracing_map_field *fields;
@@ -188,7 +188,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *key;
void *private_data;
};
-@@ -191,6 +194,7 @@ struct tracing_map {
+@@ -192,6 +195,7 @@ struct tracing_map {
int key_idx[TRACING_MAP_KEYS_MAX];
unsigned int n_keys;
struct tracing_map_sort_key sort_key;
@@ -196,7 +196,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic64_t hits;
atomic64_t drops;
};
-@@ -240,6 +244,7 @@ tracing_map_create(unsigned int map_bits
+@@ -241,6 +245,7 @@ tracing_map_create(unsigned int map_bits
extern int tracing_map_init(struct tracing_map *map);
extern int tracing_map_add_sum_field(struct tracing_map *map);
@@ -204,7 +204,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int tracing_map_add_key_field(struct tracing_map *map,
unsigned int offset,
tracing_map_cmp_fn_t cmp_fn);
-@@ -259,7 +264,13 @@ extern int tracing_map_cmp_none(void *va
+@@ -260,7 +265,13 @@ extern int tracing_map_cmp_none(void *va
extern void tracing_map_update_sum(struct tracing_map_elt *elt,
unsigned int i, u64 n);
diff --git a/patches/0019-hrtimer-Unify-handling-of-hrtimer-remove.patch b/patches/0019-hrtimer-Unify-handling-of-hrtimer-remove.patch
new file mode 100644
index 000000000000..b35482983804
--- /dev/null
+++ b/patches/0019-hrtimer-Unify-handling-of-hrtimer-remove.patch
@@ -0,0 +1,88 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:57 +0200
+Subject: [PATCH 19/36] hrtimer: Unify handling of hrtimer remove
+
+When the first hrtimer on the current CPU is removed,
+hrtimer_force_reprogram() is invoked but only when
+CONFIG_HIGH_RES_TIMERS=y and hrtimer_cpu_base.hres_active is set.
+
+hrtimer_force_reprogram() updates hrtimer_cpu_base.expires_next and
+reprograms the clock event device. When CONFIG_HIGH_RES_TIMERS=y and
+hrtimer_cpu_base.hres_active is set, a pointless hrtimer interrupt can be
+prevented.
+
+hrtimer_check_target() makes the 'can remote enqueue' decision. As soon as
+hrtimer_check_target() is unconditionally available and
+hrtimer_cpu_base.expires_next is updated by hrtimer_reprogram(),
+hrtimer_force_reprogram() needs to be available unconditionally as well to
+prevent the following scenario with CONFIG_HIGH_RES_TIMERS=n:
+
+- the first hrtimer on this CPU is removed and hrtimer_force_reprogram() is
+ not executed
+
+- CPU goes idle (next timer is calculated and hrtimers are taken into
+ account)
+
+- a hrtimer is enqueued remote on the idle CPU: hrtimer_check_target()
+ compares expiry value and hrtimer_cpu_base.expires_next. The expiry value
+ is after expires_next, so the hrtimer is enqueued. This timer will fire
+ late, if it expires before the effective first hrtimer on this CPU and
+ the comparison was with an outdated expires_next value.
+
+To prevent this scenario, make hrtimer_force_reprogram() unconditional
+except the effective reprogramming part, which gets eliminated by the
+compiler in the CONFIG_HIGH_RES_TIMERS=n case.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 10 ++++------
+ 1 file changed, 4 insertions(+), 6 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -523,9 +523,6 @@ hrtimer_force_reprogram(struct hrtimer_c
+ {
+ ktime_t expires_next;
+
+- if (!__hrtimer_hres_active(cpu_base))
+- return;
+-
+ expires_next = __hrtimer_get_next_event(cpu_base);
+
+ if (skip_equal && expires_next == cpu_base->expires_next)
+@@ -534,6 +531,9 @@ hrtimer_force_reprogram(struct hrtimer_c
+ cpu_base->expires_next = expires_next;
+
+ /*
++ * If hres is not active, hardware does not have to be
++ * reprogrammed yet.
++ *
+ * If a hang was detected in the last timer interrupt then we
+ * leave the hang delay active in the hardware. We want the
+ * system to make progress. That also prevents the following
+@@ -547,7 +547,7 @@ hrtimer_force_reprogram(struct hrtimer_c
+ * set. So we'd effectivly block all timers until the T2 event
+ * fires.
+ */
+- if (cpu_base->hang_detected)
++ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ tick_program_event(cpu_base->expires_next, 1);
+@@ -848,7 +848,6 @@ static void __remove_hrtimer(struct hrti
+ if (!timerqueue_del(&base->active, &timer->node))
+ cpu_base->active_bases &= ~(1 << base->index);
+
+-#ifdef CONFIG_HIGH_RES_TIMERS
+ /*
+ * Note: If reprogram is false we do not update
+ * cpu_base->next_timer. This happens when we remove the first
+@@ -859,7 +858,6 @@ static void __remove_hrtimer(struct hrti
+ */
+ if (reprogram && timer == cpu_base->next_timer)
+ hrtimer_force_reprogram(cpu_base, 1);
+-#endif
+ }
+
+ /*
diff --git a/patches/0020-hrtimer-Unify-handling-of-remote-enqueue.patch b/patches/0020-hrtimer-Unify-handling-of-remote-enqueue.patch
new file mode 100644
index 000000000000..65647643e800
--- /dev/null
+++ b/patches/0020-hrtimer-Unify-handling-of-remote-enqueue.patch
@@ -0,0 +1,157 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:39:58 +0200
+Subject: [PATCH 20/36] hrtimer: Unify handling of remote enqueue
+
+hrtimer_reprogram() is conditionally invoked from hrtimer_start_range_ns()
+when hrtimer_cpu_base.hres_active is true.
+
+In the !hres_active case there is a special condition for the nohz_active
+case:
+
+ If the newly enqueued timer expires before the first expiring timer on a
+ remote CPU then the remote CPU needs to be notified and woken up from a
+ NOHZ idle sleep to take the new first expiring timer into account.
+
+Previous changes have already established the prerequisites to make the
+remote enqueue behaviour the same whether high resolution mode is active or
+not:
+
+ If the to be enqueued timer expires before the first expiring timer on a
+ remote CPU, then it cannot be enqueued there.
+
+This was done for the high resolution mode because there is no way to
+access the remote CPU timer hardware. The same is true for NOHZ, but was
+handled differently by unconditionally enqueuing the timer and waking up
+the remote CPU so it can reprogram its timer. Again there is no compelling
+reason for this difference.
+
+hrtimer_check_target(), which makes the 'can remote enqueue' decision is
+already unconditional, but not yet functional because nothing updates
+hrtimer_cpu_base.expires_next in the !hres_active case.
+
+To unify this the following changes are required:
+
+ 1) Make the store of the new first expiry time unconditonal in
+ hrtimer_reprogram() and check __hrtimer_hres_active() before proceeding
+ to the actual hardware access. This check also lets the compiler
+ eliminate the rest of the function in case of CONFIG_HIGH_RES_TIMERS=n.
+
+ 2) Invoke hrtimer_reprogram() unconditionally from
+ hrtimer_start_range_ns()
+
+ 3) Remove the remote wakeup special case for the !high_res && nohz_active
+ case.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 18 ++++++------------
+ kernel/time/tick-internal.h | 11 -----------
+ kernel/time/timer.c | 15 ++++++++++++++-
+ 3 files changed, 20 insertions(+), 24 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -687,21 +687,24 @@ static void hrtimer_reprogram(struct hrt
+
+ /* Update the pointer to the next expiring timer */
+ cpu_base->next_timer = timer;
++ cpu_base->expires_next = expires;
+
+ /*
++ * If hres is not active, hardware does not have to be
++ * programmed yet.
++ *
+ * If a hang was detected in the last timer interrupt then we
+ * do not schedule a timer which is earlier than the expiry
+ * which we enforced in the hang detection. We want the system
+ * to make progress.
+ */
+- if (cpu_base->hang_detected)
++ if (!__hrtimer_hres_active(cpu_base) || cpu_base->hang_detected)
+ return;
+
+ /*
+ * Program the timer hardware. We enforce the expiry for
+ * events which are already in the past.
+ */
+- cpu_base->expires_next = expires;
+ tick_program_event(expires, 1);
+ }
+
+@@ -940,16 +943,7 @@ void hrtimer_start_range_ns(struct hrtim
+ if (!leftmost)
+ goto unlock;
+
+- if (!hrtimer_is_hres_active(timer)) {
+- /*
+- * Kick to reschedule the next tick to handle the new timer
+- * on dynticks target.
+- */
+- if (is_timers_nohz_active())
+- wake_up_nohz_cpu(new_base->cpu_base->cpu);
+- } else {
+- hrtimer_reprogram(timer, new_base);
+- }
++ hrtimer_reprogram(timer, new_base);
+ unlock:
+ unlock_hrtimer_base(timer, &flags);
+ }
+--- a/kernel/time/tick-internal.h
++++ b/kernel/time/tick-internal.h
+@@ -151,12 +151,6 @@ static inline void tick_nohz_init(void)
+ #ifdef CONFIG_NO_HZ_COMMON
+ extern unsigned long tick_nohz_active;
+ extern void timers_update_nohz(void);
+-extern struct static_key_false timers_nohz_active;
+-
+-static inline bool is_timers_nohz_active(void)
+-{
+- return static_branch_unlikely(&timers_nohz_active);
+-}
+
+ #ifdef CONFIG_SMP
+ extern struct static_key_false timers_migration_enabled;
+@@ -164,11 +158,6 @@ extern struct static_key_false timers_mi
+ #else
+ static inline void timers_update_nohz(void) { }
+ #define tick_nohz_active (0)
+-
+-static inline bool is_timers_nohz_active(void)
+-{
+- return false;
+-}
+ #endif
+
+ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -210,7 +210,7 @@ static DEFINE_PER_CPU(struct timer_base,
+
+ #ifdef CONFIG_NO_HZ_COMMON
+
+-DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
++static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+ static DEFINE_MUTEX(timer_keys_mutex);
+
+ static void timer_update_keys(struct work_struct *work);
+@@ -260,6 +260,19 @@ int timer_migration_handler(struct ctl_t
+ mutex_unlock(&timer_keys_mutex);
+ return ret;
+ }
++
++static inline bool is_timers_nohz_active(void)
++{
++ return static_branch_unlikely(&timers_nohz_active);
++}
++
++#else
++
++static inline bool is_timers_nohz_active(void)
++{
++ return false;
++}
++
+ #endif /* NO_HZ_COMMON */
+
+ static unsigned long round_jiffies_common(unsigned long j, int cpu,
diff --git a/patches/0020-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch b/patches/0020-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
index 08b8db054d7f..f5174ec40e32 100644
--- a/patches/0020-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
+++ b/patches/0020-tracing-Add-usecs-modifier-for-hist-trigger-timestam.patch
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -287,6 +287,8 @@ extern void trace_array_put(struct trace
+@@ -289,6 +289,8 @@ extern void trace_array_put(struct trace
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
diff --git a/patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch b/patches/0021-hrtimer-Make-remote-enqueue-decision-less-restrictiv.patch
index 85c85c5f9bba..8b3fbb3f5318 100644
--- a/patches/0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch
+++ b/patches/0021-hrtimer-Make-remote-enqueue-decision-less-restrictiv.patch
@@ -1,16 +1,16 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:06 +0000
-Subject: [PATCH 11/25] hrtimer: Allow remote hrtimer enqueue with
- "expires_next" as expiry time
+Date: Sun, 22 Oct 2017 23:39:59 +0200
+Subject: [PATCH 21/36] hrtimer: Make remote enqueue decision less restrictive
-When enqueuing a timer with expiry X into a timer queue, where already
-a timer with expriy X is queued, the new timer is queued on the
-right-hand side of the already queued timer.
+The current decision whether a timer can be queued on a remote CPU checks
+for timer->expiry <= remote_cpu_base.expires_next.
-Therefore it is no problem, to enqueue a hrtimer on a remote CPU with the
-same expiry time than the programmed expiry time (expires_next) on this
-CPU, because the reprogramming path is not executed - it is not the
-"leftmost" hrtimer.
+This is too restrictive because a timer with the same expiry time as an
+existing timer will be enqueued on right-hand size of the existing timer
+inside the rbtree, i.e. behind the first expiring timer.
+
+So its safe to allow enqueuing timers with the same expiry time as the
+first expiring timer on a remote CPU base.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -28,4 +28,4 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ return expires < new_base->cpu_base->expires_next;
}
- #ifdef CONFIG_NO_HZ_COMMON
+ static inline
diff --git a/patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch b/patches/0022-hrtimer-Remove-base-argument-from-hrtimer_reprogram.patch
index 1735b2b9e521..544d7d74b767 100644
--- a/patches/0012-hrtimer-Simplify-hrtimer_reprogram-call.patch
+++ b/patches/0022-hrtimer-Remove-base-argument-from-hrtimer_reprogram.patch
@@ -1,10 +1,21 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:07 +0000
-Subject: [PATCH 12/25] hrtimer: Simplify hrtimer_reprogram() call
+Date: Sun, 22 Oct 2017 23:40:00 +0200
+Subject: [PATCH 22/36] hrtimer: Remove base argument from hrtimer_reprogram()
-The hrtimer_reprogramm() call can be simplified by dereferencing the
-hrtimer clock base inside the function. It is a preparatory change for
-softirq based hrtimers.
+hrtimer_reprogram() must have access to the hrtimer_clock_base of the new
+first expiring timer to access hrtimer_clock_base.offset for adjusting the
+expiry time to CLOCK_MONOTONIC. This is required to evaluate whether the
+new left most timer in the hrtimer_clock_base is the first expiring timer
+of all clock bases in a hrtimer_cpu_base.
+
+The only user of hrtimer_reprogram() is hrtimer_start_range_ns(), which has
+a pointer to hrtimer_clock_base already and hands it in as an argument. But
+hrtimer_start_range_ns() will be split for the upcoming support for softirq
+based hrtimers to avoid code duplication and will lose the direct access to
+the clock base pointer.
+
+Instead of handing in timer and timer->base as an argument remove the base
+argument from hrtimer_reprogram() and retrieve the clock base internally.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -27,12 +38,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
-@@ -944,7 +944,7 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -943,7 +943,7 @@ void hrtimer_start_range_ns(struct hrtim
if (!leftmost)
goto unlock;
- hrtimer_reprogram(timer, new_base);
+ hrtimer_reprogram(timer);
-
unlock:
unlock_hrtimer_base(timer, &flags);
+ }
diff --git a/patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch b/patches/0023-hrtimer-Split-hrtimer_start_range_ns.patch
index 6ae61f41b2fa..5c8484a41322 100644
--- a/patches/0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch
+++ b/patches/0023-hrtimer-Split-hrtimer_start_range_ns.patch
@@ -1,9 +1,9 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:07 +0000
-Subject: [PATCH 13/25] hrtimer: Split out code from hrtimer_start_range_ns()
- for reuse
+Date: Sun, 22 Oct 2017 23:40:01 +0200
+Subject: [PATCH 23/36] hrtimer: Split hrtimer_start_range_ns()
-Preparatory patch for softirq based hrtimers. No functional change.
+Preparatory patch for softirq based hrtimers to avoid code duplication. No
+functional change.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -13,17 +13,17 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -910,22 +910,11 @@ static inline ktime_t hrtimer_update_low
+@@ -909,22 +909,11 @@ static inline ktime_t hrtimer_update_low
return tim;
}
-/**
-- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+- * hrtimer_start_range_ns - (re)start an hrtimer
- * @timer: the timer to be added
- * @tim: expiry time
- * @delta_ns: "slack" range for the timer
-- * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
-- * relative (HRTIMER_MODE_REL)
+- * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
- */
-void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
- u64 delta_ns, const enum hrtimer_mode mode)
@@ -40,23 +40,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Remove an active timer from the queue: */
remove_hrtimer(timer, base, true);
-@@ -940,13 +929,28 @@ void hrtimer_start_range_ns(struct hrtim
+@@ -939,12 +928,27 @@ void hrtimer_start_range_ns(struct hrtim
/* Switch the timer base, if necessary: */
new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
-- leftmost = enqueue_hrtimer(timer, new_base);
+- leftmost = enqueue_hrtimer(timer, new_base, mode);
- if (!leftmost)
- goto unlock;
-+ return enqueue_hrtimer(timer, new_base);
++ return enqueue_hrtimer(timer, new_base, mode);
+}
-+
+/**
-+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
++ * hrtimer_start_range_ns - (re)start an hrtimer
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
-+ * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
-+ * relative (HRTIMER_MODE_REL)
++ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
++ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
+ */
+void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode)
@@ -65,11 +64,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ unsigned long flags;
+
+ base = lock_hrtimer_base(timer, &flags);
-
-- hrtimer_reprogram(timer);
++
+ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+ hrtimer_reprogram(timer);
+- hrtimer_reprogram(timer);
-unlock:
unlock_hrtimer_base(timer, &flags);
}
diff --git a/patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch b/patches/0024-hrtimer-Split-__hrtimer_get_next_event.patch
index 1590a8ddb010..c1d974ef5472 100644
--- a/patches/0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch
+++ b/patches/0024-hrtimer-Split-__hrtimer_get_next_event.patch
@@ -1,9 +1,9 @@
From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:08 +0000
-Subject: [PATCH 14/25] hrtimer: Split out code from __hrtimer_get_next_event()
- for reuse
+Date: Sun, 22 Oct 2017 23:40:02 +0200
+Subject: [PATCH 24/36] hrtimer: Split __hrtimer_get_next_event()
-Preparatory patch for softirq based hrtimers. No functional change.
+Preparatory patch for softirq based hrtimers to avoid code duplication. No
+functional change.
Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
@@ -13,24 +13,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -454,12 +454,12 @@ static inline void hrtimer_update_next_t
- }
+@@ -459,13 +459,13 @@ static struct hrtimer_clock_base *
+ #define for_each_active_base(base, cpu_base, active) \
+ while ((base = __next_base((cpu_base), &(active))))
- #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
+static ktime_t __hrtimer_next_event_base(struct hrtimer_cpu_base *cpu_base,
+ unsigned int active,
+ ktime_t expires_next)
{
+ struct hrtimer_clock_base *base;
- unsigned int active = cpu_base->active_bases;
- ktime_t expires, expires_next = KTIME_MAX;
+ ktime_t expires;
-- hrtimer_update_next_timer(cpu_base, NULL);
- while (active) {
- unsigned int id = __ffs(active);
- struct hrtimer_clock_base *base;
-@@ -486,6 +486,18 @@ static ktime_t __hrtimer_get_next_event(
+- cpu_base->next_timer = NULL;
+ for_each_active_base(base, cpu_base, active) {
+ struct timerqueue_node *next;
+ struct hrtimer *timer;
+@@ -487,6 +487,18 @@ static ktime_t __hrtimer_get_next_event(
expires_next = 0;
return expires_next;
}
@@ -40,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ unsigned int active = cpu_base->active_bases;
+ ktime_t expires_next = KTIME_MAX;
+
-+ hrtimer_update_next_timer(cpu_base, NULL);
++ cpu_base->next_timer = NULL;
+
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+
diff --git a/patches/0025-hrtimer-Use-irqsave-irqrestore-around-__run_hrtimer.patch b/patches/0025-hrtimer-Use-irqsave-irqrestore-around-__run_hrtimer.patch
new file mode 100644
index 000000000000..cfa43b1557db
--- /dev/null
+++ b/patches/0025-hrtimer-Use-irqsave-irqrestore-around-__run_hrtimer.patch
@@ -0,0 +1,144 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:03 +0200
+Subject: [PATCH 25/36] hrtimer: Use irqsave/irqrestore around __run_hrtimer()
+
+__run_hrtimer() is called with the hrtimer_cpu_base.lock held and
+interrupts disabled. Before invoking the timer callback the base lock is
+dropped, but interrupts stay disabled.
+
+The upcoming support for softirq based hrtimers requires that interrupts
+are enabled before the timer callback is invoked.
+
+To avoid code duplication, take hrtimer_cpu_base.lock with
+raw_spin_lock_irqsave(flags) at the call site and hand in the flags as
+argument. So raw_spin_unlock_irqrestore() before the callback invocation
+will either keep interrupts disabled in interrupt context or restore to
+interrupt enabled state when called from softirq context.
+
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 31 ++++++++++++++++++-------------
+ 1 file changed, 18 insertions(+), 13 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1163,7 +1163,8 @@ EXPORT_SYMBOL_GPL(hrtimer_active);
+
+ static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+ struct hrtimer_clock_base *base,
+- struct hrtimer *timer, ktime_t *now)
++ struct hrtimer *timer, ktime_t *now,
++ unsigned long flags)
+ {
+ enum hrtimer_restart (*fn)(struct hrtimer *);
+ int restart;
+@@ -1198,11 +1199,11 @@ static void __run_hrtimer(struct hrtimer
+ * protected against migration to a different CPU even if the lock
+ * is dropped.
+ */
+- raw_spin_unlock(&cpu_base->lock);
++ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ trace_hrtimer_expire_entry(timer, now);
+ restart = fn(timer);
+ trace_hrtimer_expire_exit(timer);
+- raw_spin_lock(&cpu_base->lock);
++ raw_spin_lock_irq(&cpu_base->lock);
+
+ /*
+ * Note: We clear the running state after enqueue_hrtimer and
+@@ -1230,7 +1231,8 @@ static void __run_hrtimer(struct hrtimer
+ base->running = NULL;
+ }
+
+-static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
++static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
++ unsigned long flags)
+ {
+ struct hrtimer_clock_base *base;
+ unsigned int active = cpu_base->active_bases;
+@@ -1261,7 +1263,7 @@ static void __hrtimer_run_queues(struct
+ if (basenow < hrtimer_get_softexpires_tv64(timer))
+ break;
+
+- __run_hrtimer(cpu_base, base, timer, &basenow);
++ __run_hrtimer(cpu_base, base, timer, &basenow, flags);
+ }
+ }
+ }
+@@ -1276,13 +1278,14 @@ void hrtimer_interrupt(struct clock_even
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ ktime_t expires_next, now, entry_time, delta;
++ unsigned long flags;
+ int retries = 0;
+
+ BUG_ON(!cpu_base->hres_active);
+ cpu_base->nr_events++;
+ dev->next_event = KTIME_MAX;
+
+- raw_spin_lock(&cpu_base->lock);
++ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ entry_time = now = hrtimer_update_base(cpu_base);
+ retry:
+ cpu_base->in_hrtirq = 1;
+@@ -1295,7 +1298,7 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = KTIME_MAX;
+
+- __hrtimer_run_queues(cpu_base, now);
++ __hrtimer_run_queues(cpu_base, now, flags);
+
+ /* Reevaluate the clock bases for the next expiry */
+ expires_next = __hrtimer_get_next_event(cpu_base);
+@@ -1305,7 +1308,7 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = expires_next;
+ cpu_base->in_hrtirq = 0;
+- raw_spin_unlock(&cpu_base->lock);
++ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
+ /* Reprogramming necessary ? */
+ if (!tick_program_event(expires_next, 0)) {
+@@ -1326,7 +1329,7 @@ void hrtimer_interrupt(struct clock_even
+ * Acquire base lock for updating the offsets and retrieving
+ * the current time.
+ */
+- raw_spin_lock(&cpu_base->lock);
++ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ now = hrtimer_update_base(cpu_base);
+ cpu_base->nr_retries++;
+ if (++retries < 3)
+@@ -1339,7 +1342,8 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->nr_hangs++;
+ cpu_base->hang_detected = 1;
+- raw_spin_unlock(&cpu_base->lock);
++ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
++
+ delta = ktime_sub(now, entry_time);
+ if ((unsigned int)delta > cpu_base->max_hang_time)
+ cpu_base->max_hang_time = (unsigned int) delta;
+@@ -1381,6 +1385,7 @@ static inline void __hrtimer_peek_ahead_
+ void hrtimer_run_queues(void)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++ unsigned long flags;
+ ktime_t now;
+
+ if (__hrtimer_hres_active(cpu_base))
+@@ -1398,10 +1403,10 @@ void hrtimer_run_queues(void)
+ return;
+ }
+
+- raw_spin_lock(&cpu_base->lock);
++ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ now = hrtimer_update_base(cpu_base);
+- __hrtimer_run_queues(cpu_base, now);
+- raw_spin_unlock(&cpu_base->lock);
++ __hrtimer_run_queues(cpu_base, now, flags);
++ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ }
+
+ /*
diff --git a/patches/0026-hrtimer-Add-clock-bases-and-hrtimer-mode-for-soft-ir.patch b/patches/0026-hrtimer-Add-clock-bases-and-hrtimer-mode-for-soft-ir.patch
new file mode 100644
index 000000000000..c0815eb28f1b
--- /dev/null
+++ b/patches/0026-hrtimer-Add-clock-bases-and-hrtimer-mode-for-soft-ir.patch
@@ -0,0 +1,108 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:04 +0200
+Subject: [PATCH 26/36] hrtimer: Add clock bases and hrtimer mode for soft irq
+ context
+
+hrtimer callback functions are always executed in hard interrupt
+context. Users of hrtimer which need their timer function to be executed
+in soft interrupt context, make use of tasklets to get the proper context.
+
+Add additional hrtimer clock bases for timers which must expire in softirq
+context, so the detour via the tasklet can be avoided. This is also
+required for RT, where the majority of hrtimer is moved into softirq
+hrtimer context.
+
+The selection of the expiry mode happens via a mode bit. Introduce
+HRTIMER_MODE_SOFT and the matching combinations with the ABS/REL/PINNED
+bits and update the decoding of hrtimer_mode in tracepoints.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 14 ++++++++++++++
+ include/trace/events/timer.h | 6 +++++-
+ kernel/time/hrtimer.c | 20 ++++++++++++++++++++
+ 3 files changed, 39 insertions(+), 1 deletion(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -33,14 +33,24 @@ struct hrtimer_cpu_base;
+ * HRTIMER_MODE_REL - Time value is relative to now
+ * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered
+ * when starting the timer)
++ * HRTIMER_MODE_SOFT - Timer callback function will be executed in
++ * soft irq context
+ */
+ enum hrtimer_mode {
+ HRTIMER_MODE_ABS = 0x00,
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
++ HRTIMER_MODE_SOFT = 0x04,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
++
++ HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT,
++ HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT,
++
++ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
++ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
++
+ };
+
+ /*
+@@ -151,6 +161,10 @@ enum hrtimer_base_type {
+ HRTIMER_BASE_REALTIME,
+ HRTIMER_BASE_BOOTTIME,
+ HRTIMER_BASE_TAI,
++ HRTIMER_BASE_MONOTONIC_SOFT,
++ HRTIMER_BASE_REALTIME_SOFT,
++ HRTIMER_BASE_BOOTTIME_SOFT,
++ HRTIMER_BASE_TAI_SOFT,
+ HRTIMER_MAX_CLOCK_BASES,
+ };
+
+--- a/include/trace/events/timer.h
++++ b/include/trace/events/timer.h
+@@ -148,7 +148,11 @@ DEFINE_EVENT(timer_class, timer_cancel,
+ { HRTIMER_MODE_ABS, "ABS" }, \
+ { HRTIMER_MODE_REL, "REL" }, \
+ { HRTIMER_MODE_ABS_PINNED, "ABS|PINNED" }, \
+- { HRTIMER_MODE_REL_PINNED, "REL|PINNED" })
++ { HRTIMER_MODE_REL_PINNED, "REL|PINNED" }, \
++ { HRTIMER_MODE_ABS_SOFT, "ABS|SOFT" }, \
++ { HRTIMER_MODE_REL_SOFT, "REL|SOFT" }, \
++ { HRTIMER_MODE_ABS_PINNED_SOFT, "ABS|PINNED|SOFT" }, \
++ { HRTIMER_MODE_REL_PINNED_SOFT, "REL|PINNED|SOFT" })
+
+ /**
+ * hrtimer_init - called when the hrtimer is initialized
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -92,6 +92,26 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
+ .clockid = CLOCK_TAI,
+ .get_time = &ktime_get_clocktai,
+ },
++ {
++ .index = HRTIMER_BASE_MONOTONIC_SOFT,
++ .clockid = CLOCK_MONOTONIC,
++ .get_time = &ktime_get,
++ },
++ {
++ .index = HRTIMER_BASE_REALTIME_SOFT,
++ .clockid = CLOCK_REALTIME,
++ .get_time = &ktime_get_real,
++ },
++ {
++ .index = HRTIMER_BASE_BOOTTIME_SOFT,
++ .clockid = CLOCK_BOOTTIME,
++ .get_time = &ktime_get_boottime,
++ },
++ {
++ .index = HRTIMER_BASE_TAI_SOFT,
++ .clockid = CLOCK_TAI,
++ .get_time = &ktime_get_clocktai,
++ },
+ }
+ };
+
diff --git a/patches/0027-hrtimer-Prepare-handling-of-hard-and-softirq-based-h.patch b/patches/0027-hrtimer-Prepare-handling-of-hard-and-softirq-based-h.patch
new file mode 100644
index 000000000000..210bb91fe176
--- /dev/null
+++ b/patches/0027-hrtimer-Prepare-handling-of-hard-and-softirq-based-h.patch
@@ -0,0 +1,116 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:05 +0200
+Subject: [PATCH 27/36] hrtimer: Prepare handling of hard and softirq based
+ hrtimers
+
+The softirq based hrtimer can utilize most of the existing hrtimers
+functions, but need to operate on a different data set.
+
+Add an active_mask argument to various functions so the hard and soft bases
+can be selected. Fixup the existing callers and hand in the ACTIVE_HARD
+mask.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 38 +++++++++++++++++++++++++++++---------
+ 1 file changed, 29 insertions(+), 9 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -60,6 +60,15 @@
+ #include "tick-internal.h"
+
+ /*
++ * Masks for selecting the soft and hard context timers from
++ * cpu_base->active
++ */
++#define MASK_SHIFT (HRTIMER_BASE_MONOTONIC_SOFT)
++#define HRTIMER_ACTIVE_HARD ((1U << MASK_SHIFT) - 1)
++#define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
++#define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
++
++/*
+ * The timer bases:
+ *
+ * There are more clockids than hrtimer bases. Thus, we index
+@@ -508,13 +517,24 @@ static ktime_t __hrtimer_next_event_base
+ return expires_next;
+ }
+
+-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
++/*
++ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
++ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
++ *
++ * @active_mask must be one of:
++ * - HRTIMER_ACTIVE,
++ * - HRTIMER_ACTIVE_SOFT, or
++ * - HRTIMER_ACTIVE_HARD.
++ */
++static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
++ unsigned int active_mask)
+ {
+- unsigned int active = cpu_base->active_bases;
++ unsigned int active;
+ ktime_t expires_next = KTIME_MAX;
+
+ cpu_base->next_timer = NULL;
+
++ active = cpu_base->active_bases & active_mask;
+ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
+
+ return expires_next;
+@@ -555,7 +575,7 @@ hrtimer_force_reprogram(struct hrtimer_c
+ {
+ ktime_t expires_next;
+
+- expires_next = __hrtimer_get_next_event(cpu_base);
++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
+
+ if (skip_equal && expires_next == cpu_base->expires_next)
+ return;
+@@ -1078,7 +1098,7 @@ u64 hrtimer_get_next_event(void)
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ if (!__hrtimer_hres_active(cpu_base))
+- expires = __hrtimer_get_next_event(cpu_base);
++ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
+@@ -1252,10 +1272,10 @@ static void __run_hrtimer(struct hrtimer
+ }
+
+ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
+- unsigned long flags)
++ unsigned long flags, unsigned int active_mask)
+ {
+ struct hrtimer_clock_base *base;
+- unsigned int active = cpu_base->active_bases;
++ unsigned int active = cpu_base->active_bases & active_mask;
+
+ for_each_active_base(base, cpu_base, active) {
+ struct timerqueue_node *node;
+@@ -1318,10 +1338,10 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = KTIME_MAX;
+
+- __hrtimer_run_queues(cpu_base, now, flags);
++ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+
+ /* Reevaluate the clock bases for the next expiry */
+- expires_next = __hrtimer_get_next_event(cpu_base);
++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
+ /*
+ * Store the new expiry value so the migration code can verify
+ * against it.
+@@ -1425,7 +1445,7 @@ void hrtimer_run_queues(void)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ now = hrtimer_update_base(cpu_base);
+- __hrtimer_run_queues(cpu_base, now, flags);
++ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ }
+
diff --git a/patches/0028-hrtimer-Implement-support-for-softirq-based-hrtimers.patch b/patches/0028-hrtimer-Implement-support-for-softirq-based-hrtimers.patch
new file mode 100644
index 000000000000..3af1ceef100c
--- /dev/null
+++ b/patches/0028-hrtimer-Implement-support-for-softirq-based-hrtimers.patch
@@ -0,0 +1,508 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:06 +0200
+Subject: [PATCH 28/36] hrtimer: Implement support for softirq based hrtimers
+
+hrtimer callbacks are always invoked in hard interrupt context. Several
+users in tree require soft interrupt context for their callbacks and
+achieve this by combining a hrtimer with a tasklet. The hrtimer schedules
+the tasklet in hard interrupt context and the tasklet callback gets invoked
+in softirq context later.
+
+That's suboptimal and aside of that the real-time patch moves most of the
+hrtimers into softirq context. So adding native support for hrtimers
+expiring in softirq context is a valuable extension for both mainline and
+the RT patch set.
+
+Each valid hrtimer clock id has two associated hrtimer clock bases: one for
+timers expiring in hardirq context and one for timers expiring in softirq
+context.
+
+Implement the functionality to associate a hrtimer with the hard or softirq
+related clock bases and update the relevant functions to take them into
+account when the next expiry time needs to be evaluated.
+
+Add a check into the hard interrupt context handler functions to check
+whether the first expiring softirq based timer has expired. If it's expired
+the softirq is raised and the accounting of softirq based timers to
+evaluate the next expiry time for programming the timer hardware is skipped
+until the softirq processing has finished. At the end of the softirq
+processing the regular processing is resumed.
+
+Suggested-by: Thomas Gleixner <tglx@linutronix.de>
+Suggested-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ include/linux/hrtimer.h | 20 +++-
+ kernel/time/hrtimer.c | 201 ++++++++++++++++++++++++++++++++++++++++--------
+ 2 files changed, 185 insertions(+), 36 deletions(-)
+
+--- a/include/linux/hrtimer.h
++++ b/include/linux/hrtimer.h
+@@ -113,6 +113,7 @@ struct hrtimer {
+ struct hrtimer_clock_base *base;
+ u8 state;
+ u8 is_rel;
++ u8 is_soft;
+ };
+
+ /**
+@@ -178,13 +179,18 @@ enum hrtimer_base_type {
+ * @hres_active: State of high resolution mode
+ * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @hang_detected: The last hrtimer interrupt detected a hang
++ * @softirq_activated: displays, if the softirq is raised - update of softirq
++ * related settings is not required then.
+ * @nr_events: Total number of hrtimer interrupt events
+ * @nr_retries: Total number of hrtimer interrupt retries
+ * @nr_hangs: Total number of hrtimer interrupt hangs
+ * @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @expires_next: absolute time of the next event, is required for remote
+- * hrtimer enqueue
++ * hrtimer enqueue; it is the total first expiry time (hard
++ * and soft hrtimer are taken into account)
+ * @next_timer: Pointer to the first expiring timer
++ * @softirq_expires_next: Time to check, if soft queues needs also to be expired
++ * @softirq_next_timer: Pointer to the first expiring softirq based timer
+ * @clock_base: array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+@@ -196,9 +202,10 @@ struct hrtimer_cpu_base {
+ unsigned int cpu;
+ unsigned int active_bases;
+ unsigned int clock_was_set_seq;
+- unsigned int hres_active : 1,
+- in_hrtirq : 1,
+- hang_detected : 1;
++ unsigned int hres_active : 1,
++ in_hrtirq : 1,
++ hang_detected : 1,
++ softirq_activated : 1;
+ #ifdef CONFIG_HIGH_RES_TIMERS
+ unsigned int nr_events;
+ unsigned short nr_retries;
+@@ -207,6 +214,8 @@ struct hrtimer_cpu_base {
+ #endif
+ ktime_t expires_next;
+ struct hrtimer *next_timer;
++ ktime_t softirq_expires_next;
++ struct hrtimer *softirq_next_timer;
+ struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
+ } ____cacheline_aligned;
+
+@@ -379,7 +388,8 @@ extern void hrtimer_start_range_ns(struc
+ * @timer: the timer to be added
+ * @tim: expiry time
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
++ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
++ * softirq based mode is considered for debug purpose only!
+ */
+ static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+ const enum hrtimer_mode mode)
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -68,6 +68,9 @@
+ #define HRTIMER_ACTIVE_SOFT (HRTIMER_ACTIVE_HARD << MASK_SHIFT)
+ #define HRTIMER_ACTIVE_ALL (HRTIMER_ACTIVE_SOFT | HRTIMER_ACTIVE_HARD)
+
++/* Define for debug mode check */
++#define HRTIMER_MODECHECK true
++
+ /*
+ * The timer bases:
+ *
+@@ -411,8 +414,17 @@ static inline void debug_hrtimer_init(st
+ debug_object_init(timer, &hrtimer_debug_descr);
+ }
+
+-static inline void debug_hrtimer_activate(struct hrtimer *timer)
++static inline void debug_hrtimer_activate(struct hrtimer *timer,
++ enum hrtimer_mode mode,
++ bool modecheck)
+ {
++ /*
++ * Check whether the HRTIMER_MODE_SOFT bit and hrtimer.is_soft
++ * match, when a timer is started via__hrtimer_start_range_ns().
++ */
++ if (modecheck)
++ WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
++
+ debug_object_activate(timer, &hrtimer_debug_descr);
+ }
+
+@@ -444,8 +456,11 @@ void destroy_hrtimer_on_stack(struct hrt
+ EXPORT_SYMBOL_GPL(destroy_hrtimer_on_stack);
+
+ #else
++
+ static inline void debug_hrtimer_init(struct hrtimer *timer) { }
+-static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
++static inline void debug_hrtimer_activate(struct hrtimer *timer,
++ enum hrtimer_mode mode,
++ bool modecheck) { }
+ static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
+ #endif
+
+@@ -458,9 +473,10 @@ debug_init(struct hrtimer *timer, clocki
+ }
+
+ static inline void debug_activate(struct hrtimer *timer,
+- enum hrtimer_mode mode)
++ enum hrtimer_mode mode,
++ bool modecheck)
+ {
+- debug_hrtimer_activate(timer);
++ debug_hrtimer_activate(timer, mode, modecheck);
+ trace_hrtimer_start(timer, mode);
+ }
+
+@@ -470,7 +486,6 @@ static inline void debug_deactivate(stru
+ trace_hrtimer_cancel(timer);
+ }
+
+-#if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+ static struct hrtimer_clock_base *
+ __next_base(struct hrtimer_cpu_base *cpu_base, unsigned int *active)
+ {
+@@ -504,7 +519,10 @@ static ktime_t __hrtimer_next_event_base
+ expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
+ if (expires < expires_next) {
+ expires_next = expires;
+- cpu_base->next_timer = timer;
++ if (timer->is_soft)
++ cpu_base->softirq_next_timer = timer;
++ else
++ cpu_base->next_timer = timer;
+ }
+ }
+ /*
+@@ -521,25 +539,42 @@ static ktime_t __hrtimer_next_event_base
+ * Recomputes cpu_base::*next_timer and returns the earliest expires_next but
+ * does not set cpu_base::*expires_next, that is done by hrtimer_reprogram.
+ *
++ * When a softirq is pending, we can ignore the HRTIMER_ACTIVE_SOFT bases,
++ * those timers will get run whenever the softirq gets handled, at the end of
++ * hrtimer_run_softirq(), hrtimer_update_softirq_timer() will re-add these bases.
++ *
++ * Therefore softirq values are those from the HRTIMER_ACTIVE_SOFT clock bases.
++ * The !softirq values are the minima across HRTIMER_ACTIVE, unless an actual
++ * softirq is pending, in which case they're the minima of HRTIMER_ACTIVE_HARD.
++ *
+ * @active_mask must be one of:
+ * - HRTIMER_ACTIVE,
+ * - HRTIMER_ACTIVE_SOFT, or
+ * - HRTIMER_ACTIVE_HARD.
+ */
+-static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base,
+- unsigned int active_mask)
++static ktime_t
++__hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base, unsigned int active_mask)
+ {
+ unsigned int active;
++ struct hrtimer *next_timer = NULL;
+ ktime_t expires_next = KTIME_MAX;
+
+- cpu_base->next_timer = NULL;
++ if (!cpu_base->softirq_activated && (active_mask & HRTIMER_ACTIVE_SOFT)) {
++ active = cpu_base->active_bases & HRTIMER_ACTIVE_SOFT;
++ cpu_base->softirq_next_timer = NULL;
++ expires_next = __hrtimer_next_event_base(cpu_base, active, KTIME_MAX);
++
++ next_timer = cpu_base->softirq_next_timer;
++ }
+
+- active = cpu_base->active_bases & active_mask;
+- expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
++ if (active_mask & HRTIMER_ACTIVE_HARD) {
++ active = cpu_base->active_bases & HRTIMER_ACTIVE_HARD;
++ cpu_base->next_timer = next_timer;
++ expires_next = __hrtimer_next_event_base(cpu_base, active, expires_next);
++ }
+
+ return expires_next;
+ }
+-#endif
+
+ static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+ {
+@@ -547,8 +582,14 @@ static inline ktime_t hrtimer_update_bas
+ ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+ ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
+
+- return ktime_get_update_offsets_now(&base->clock_was_set_seq,
++ ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
+ offs_real, offs_boot, offs_tai);
++
++ base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
++ base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
++ base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
++
++ return now;
+ }
+
+ /*
+@@ -575,7 +616,23 @@ hrtimer_force_reprogram(struct hrtimer_c
+ {
+ ktime_t expires_next;
+
+- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
++ /*
++ * Find the current next expiration time.
++ */
++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
++
++ if (cpu_base->next_timer && cpu_base->next_timer->is_soft) {
++ /*
++ * When the softirq is activated, hrtimer has to be
++ * programmed with the first hard hrtimer because soft
++ * timer interrupt could occur too late.
++ */
++ if (cpu_base->softirq_activated)
++ expires_next = __hrtimer_get_next_event(cpu_base,
++ HRTIMER_ACTIVE_HARD);
++ else
++ cpu_base->softirq_expires_next = expires_next;
++ }
+
+ if (skip_equal && expires_next == cpu_base->expires_next)
+ return;
+@@ -702,7 +759,7 @@ static inline void retrigger_next_event(
+ *
+ * Called with interrupts disabled and base->cpu_base.lock held
+ */
+-static void hrtimer_reprogram(struct hrtimer *timer)
++static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
+ {
+ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+ struct hrtimer_clock_base *base = timer->base;
+@@ -711,6 +768,28 @@ static void hrtimer_reprogram(struct hrt
+ WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
+
+ /*
++ * CLOCK_REALTIME timer might be requested with an absolute
++ * expiry time which is less than base->offset. Set it to 0.
++ */
++ if (expires < 0)
++ expires = 0;
++
++ if (timer->is_soft) {
++ if (cpu_base->softirq_activated)
++ return;
++
++ if (!ktime_before(expires, cpu_base->softirq_expires_next))
++ return;
++
++ cpu_base->softirq_next_timer = timer;
++ cpu_base->softirq_expires_next = expires;
++
++ if (!ktime_before(expires, cpu_base->expires_next) ||
++ !reprogram)
++ return;
++ }
++
++ /*
+ * If the timer is not on the current cpu, we cannot reprogram
+ * the other cpus clock event device.
+ */
+@@ -727,13 +806,6 @@ static void hrtimer_reprogram(struct hrt
+ if (cpu_base->in_hrtirq)
+ return;
+
+- /*
+- * CLOCK_REALTIME timer might be requested with an absolute
+- * expiry time which is less than base->offset. Set it to 0.
+- */
+- if (expires < 0)
+- expires = 0;
+-
+ if (expires >= cpu_base->expires_next)
+ return;
+
+@@ -868,9 +940,10 @@ EXPORT_SYMBOL_GPL(hrtimer_forward);
+ */
+ static int enqueue_hrtimer(struct hrtimer *timer,
+ struct hrtimer_clock_base *base,
+- enum hrtimer_mode mode)
++ enum hrtimer_mode mode,
++ bool modecheck)
+ {
+- debug_activate(timer, mode);
++ debug_activate(timer, mode, modecheck);
+
+ base->cpu_base->active_bases |= 1 << base->index;
+
+@@ -961,6 +1034,31 @@ static inline ktime_t hrtimer_update_low
+ return tim;
+ }
+
++static void
++hrtimer_update_softirq_timer(struct hrtimer_cpu_base *cpu_base, bool reprogram)
++{
++ ktime_t expires;
++
++ /*
++ * Find the next SOFT expiration.
++ */
++ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_SOFT);
++
++ /*
++ * reprogramming needs to be triggered, even if the next soft
++ * hrtimer expires at the same time than the next hard
++ * hrtimer. cpu_base->softirq_expires_next needs to be updated!
++ */
++ if (!reprogram || expires == KTIME_MAX)
++ return;
++
++ /*
++ * cpu_base->*next_timer is recomputed by __hrtimer_get_next_event()
++ * cpu_base->*expires_next is only set by hrtimer_reprogram()
++ */
++ hrtimer_reprogram(cpu_base->softirq_next_timer, reprogram);
++}
++
+ static int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode,
+ struct hrtimer_clock_base *base)
+@@ -980,7 +1078,7 @@ static int __hrtimer_start_range_ns(stru
+ /* Switch the timer base, if necessary: */
+ new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
+
+- return enqueue_hrtimer(timer, new_base, mode);
++ return enqueue_hrtimer(timer, new_base, mode, HRTIMER_MODECHECK);
+ }
+ /**
+ * hrtimer_start_range_ns - (re)start an hrtimer
+@@ -988,7 +1086,8 @@ static int __hrtimer_start_range_ns(stru
+ * @tim: expiry time
+ * @delta_ns: "slack" range for the timer
+ * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED)
++ * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED);
++ * softirq based mode is considered for debug purpose only!
+ */
+ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+ u64 delta_ns, const enum hrtimer_mode mode)
+@@ -999,7 +1098,7 @@ void hrtimer_start_range_ns(struct hrtim
+ base = lock_hrtimer_base(timer, &flags);
+
+ if (__hrtimer_start_range_ns(timer, tim, delta_ns, mode, base))
+- hrtimer_reprogram(timer);
++ hrtimer_reprogram(timer, true);
+
+ unlock_hrtimer_base(timer, &flags);
+ }
+@@ -1098,7 +1197,7 @@ u64 hrtimer_get_next_event(void)
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+
+ if (!__hrtimer_hres_active(cpu_base))
+- expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
++ expires = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+
+@@ -1256,7 +1355,8 @@ static void __run_hrtimer(struct hrtimer
+ */
+ if (restart != HRTIMER_NORESTART &&
+ !(timer->state & HRTIMER_STATE_ENQUEUED))
+- enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS);
++ enqueue_hrtimer(timer, base, HRTIMER_MODE_ABS,
++ !HRTIMER_MODECHECK);
+
+ /*
+ * Separate the ->running assignment from the ->state assignment.
+@@ -1308,6 +1408,23 @@ static void __hrtimer_run_queues(struct
+ }
+ }
+
++static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
++{
++ struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
++ unsigned long flags;
++ ktime_t now;
++
++ raw_spin_lock_irqsave(&cpu_base->lock, flags);
++
++ now = hrtimer_update_base(cpu_base);
++ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_SOFT);
++
++ cpu_base->softirq_activated = 0;
++ hrtimer_update_softirq_timer(cpu_base, true);
++
++ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
++}
++
+ #ifdef CONFIG_HIGH_RES_TIMERS
+
+ /*
+@@ -1338,10 +1455,16 @@ void hrtimer_interrupt(struct clock_even
+ */
+ cpu_base->expires_next = KTIME_MAX;
+
++ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
++ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->softirq_activated = 1;
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+
+ /* Reevaluate the clock bases for the next expiry */
+- expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_HARD);
++ expires_next = __hrtimer_get_next_event(cpu_base, HRTIMER_ACTIVE_ALL);
+ /*
+ * Store the new expiry value so the migration code can verify
+ * against it.
+@@ -1445,6 +1568,13 @@ void hrtimer_run_queues(void)
+
+ raw_spin_lock_irqsave(&cpu_base->lock, flags);
+ now = hrtimer_update_base(cpu_base);
++
++ if (!ktime_before(now, cpu_base->softirq_expires_next)) {
++ cpu_base->softirq_expires_next = KTIME_MAX;
++ cpu_base->softirq_activated = 1;
++ raise_softirq_irqoff(HRTIMER_SOFTIRQ);
++ }
++
+ __hrtimer_run_queues(cpu_base, now, flags, HRTIMER_ACTIVE_HARD);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ }
+@@ -1626,6 +1756,7 @@ int hrtimers_prepare_cpu(unsigned int cp
+ cpu_base->cpu = cpu;
+ cpu_base->hres_active = 0;
+ cpu_base->expires_next = KTIME_MAX;
++ cpu_base->softirq_expires_next = KTIME_MAX;
+ return 0;
+ }
+
+@@ -1657,7 +1788,8 @@ static void migrate_hrtimer_list(struct
+ * sort out already expired timers and reprogram the
+ * event device.
+ */
+- enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS);
++ enqueue_hrtimer(timer, new_base, HRTIMER_MODE_ABS,
++ !HRTIMER_MODECHECK);
+ }
+ }
+
+@@ -1684,6 +1816,12 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ &new_base->clock_base[i]);
+ }
+
++ /*
++ * The migration might have changed the first expiring softirq
++ * timer on this CPU. Update it.
++ */
++ hrtimer_update_softirq_timer(new_base, false);
++
+ raw_spin_unlock(&old_base->lock);
+ raw_spin_unlock(&new_base->lock);
+
+@@ -1698,6 +1836,7 @@ int hrtimers_dead_cpu(unsigned int scpu)
+ void __init hrtimers_init(void)
+ {
+ hrtimers_prepare_cpu(smp_processor_id());
++ open_softirq(HRTIMER_SOFTIRQ, hrtimer_run_softirq);
+ }
+
+ /**
diff --git a/patches/0028-tracing-Add-variable-reference-handling-to-hist-trig.patch b/patches/0028-tracing-Add-variable-reference-handling-to-hist-trig.patch
index c3862a8cbf1d..54fabc25bc31 100644
--- a/patches/0028-tracing-Add-variable-reference-handling-to-hist-trig.patch
+++ b/patches/0028-tracing-Add-variable-reference-handling-to-hist-trig.patch
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -7756,6 +7756,7 @@ static int instance_mkdir(const char *na
+@@ -7766,6 +7766,7 @@ static int instance_mkdir(const char *na
INIT_LIST_HEAD(&tr->systems);
INIT_LIST_HEAD(&tr->events);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (allocate_trace_buffers(tr, trace_buf_size) < 0)
goto out_free_tr;
-@@ -8503,6 +8504,7 @@ ssize_t trace_parse_run_command(struct f
+@@ -8513,6 +8514,7 @@ ssize_t trace_parse_run_command(struct f
INIT_LIST_HEAD(&global_trace.systems);
INIT_LIST_HEAD(&global_trace.events);
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
apply_trace_boot_options();
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -272,6 +272,7 @@ struct trace_array {
+@@ -274,6 +274,7 @@ struct trace_array {
int function_enabled;
#endif
int time_stamp_abs_ref;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
enum {
-@@ -1546,6 +1547,8 @@ extern void pause_named_trigger(struct e
+@@ -1550,6 +1551,8 @@ extern void pause_named_trigger(struct e
extern void unpause_named_trigger(struct event_trigger_data *data);
extern void set_named_trigger_data(struct event_trigger_data *data,
struct event_trigger_data *named_data);
diff --git a/patches/0029-hrtimer-Implement-SOFT-HARD-clock-base-selection.patch b/patches/0029-hrtimer-Implement-SOFT-HARD-clock-base-selection.patch
new file mode 100644
index 000000000000..b7324d5e3029
--- /dev/null
+++ b/patches/0029-hrtimer-Implement-SOFT-HARD-clock-base-selection.patch
@@ -0,0 +1,55 @@
+From: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:07 +0200
+Subject: [PATCH 29/36] hrtimer: Implement SOFT/HARD clock base selection
+
+All prerequisites to handle hrtimers for expiry in either hard or soft
+interrupt context are in place.
+
+Add the missing bit in hrtimer_init() which associates the timer to the
+hard or the soft irq clock base.
+
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/hrtimer.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/kernel/time/hrtimer.c
++++ b/kernel/time/hrtimer.c
+@@ -1220,8 +1220,9 @@ static inline int hrtimer_clockid_to_bas
+ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+ {
++ bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
++ int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
+ struct hrtimer_cpu_base *cpu_base;
+- int base;
+
+ memset(timer, 0, sizeof(struct hrtimer));
+
+@@ -1235,7 +1236,8 @@ static void __hrtimer_init(struct hrtime
+ if (clock_id == CLOCK_REALTIME && mode & HRTIMER_MODE_REL)
+ clock_id = CLOCK_MONOTONIC;
+
+- base = hrtimer_clockid_to_base(clock_id);
++ base += hrtimer_clockid_to_base(clock_id);
++ timer->is_soft = softtimer;
+ timer->base = &cpu_base->clock_base[base];
+ timerqueue_init(&timer->node);
+ }
+@@ -1244,8 +1246,13 @@ static void __hrtimer_init(struct hrtime
+ * hrtimer_init - initialize a timer to the given clock
+ * @timer: the timer to be initialized
+ * @clock_id: the clock to be used
+- * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or
+- * relative (HRTIMER_MODE_REL); pinned is not considered here!
++ * @mode: The modes which are relevant for intitialization:
++ * HRTIMER_MODE_ABS, HRTIMER_MODE_REL, HRTIMER_MODE_ABS_SOFT,
++ * HRTIMER_MODE_REL_SOFT
++ *
++ * The PINNED variants of the above can be handed in,
++ * but the PINNED bit is ignored as pinning happens
++ * when the hrtimer is started
+ */
+ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
diff --git a/patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch b/patches/0030-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
index 9f2dcbc000bd..02570fc75b40 100644
--- a/patches/0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
+++ b/patches/0030-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
@@ -1,9 +1,9 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:10 +0000
-Subject: [PATCH 19/25] can/bcm: Replace hrtimer_tasklet with softirq based
+Date: Sun, 22 Oct 2017 23:40:08 +0200
+Subject: [PATCH 30/36] can/bcm: Replace hrtimer_tasklet with softirq based
hrtimer
-Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+Switch the timer to HRTIMER_MODE_SOFT, which executed the timer
callback in softirq context and remove the hrtimer_tasklet.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -13,8 +13,8 @@ Cc: Marc Kleine-Budde <mkl@pengutronix.de>
Cc: linux-can@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- net/can/bcm.c | 150 ++++++++++++++++++----------------------------------------
- 1 file changed, 49 insertions(+), 101 deletions(-)
+ net/can/bcm.c | 156 +++++++++++++++++++---------------------------------------
+ 1 file changed, 52 insertions(+), 104 deletions(-)
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
- struct bcm_op *op = (struct bcm_op *)data;
+ if (bcm_tx_set_expiry(op, &op->timer))
-+ hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS);
++ hrtimer_start_expires(&op->timer, HRTIMER_MODE_ABS_SOFT);
+}
+
+/* bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */
@@ -98,8 +98,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -542,11 +540,18 @@ static void bcm_rx_starttimer(struct bcm
- hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
+@@ -480,7 +478,7 @@ static void bcm_rx_update_and_send(struc
+ /* do not send the saved data - only start throttle timer */
+ hrtimer_start(&op->thrtimer,
+ ktime_add(op->kt_lastmsg, op->kt_ival2),
+- HRTIMER_MODE_ABS);
++ HRTIMER_MODE_ABS_SOFT);
+ return;
+ }
+
+@@ -539,14 +537,21 @@ static void bcm_rx_starttimer(struct bcm
+ return;
+
+ if (op->kt_ival1)
+- hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL);
++ hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL_SOFT);
}
-static void bcm_rx_timeout_tsklet(unsigned long data)
@@ -244,8 +257,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* initialize uninitialized (kzalloc) structure */
- hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
-+ HRTIMER_MODE_REL);
++ hrtimer_init(&op->timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL_SOFT);
op->timer.function = bcm_tx_timeout_handler;
- /* initialize tasklet for tx countevent notification */
@@ -254,8 +267,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
/* currently unused in tx_ops */
- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
-+ HRTIMER_MODE_REL);
++ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL_SOFT);
/* add this bcm_op to the list of the tx_ops */
list_add(&op->list, &bo->tx_ops);
@@ -264,8 +277,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* initialize uninitialized (kzalloc) structure */
- hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&op->timer, CLOCK_MONOTONIC_SOFT,
-+ HRTIMER_MODE_REL);
++ hrtimer_init(&op->timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL_SOFT);
op->timer.function = bcm_rx_timeout_handler;
- /* initialize tasklet for rx timeout notification */
@@ -273,8 +286,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- (unsigned long) op);
-
- hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC_SOFT,
-+ HRTIMER_MODE_REL);
++ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL_SOFT);
op->thrtimer.function = bcm_rx_thr_handler;
- /* initialize tasklet for rx throttle handling */
@@ -284,7 +297,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* add this bcm_op to the list of the rx_ops */
list_add(&op->list, &bo->rx_ops);
-@@ -1209,7 +1157,7 @@ static int bcm_rx_setup(struct bcm_msg_h
+@@ -1209,12 +1157,12 @@ static int bcm_rx_setup(struct bcm_msg_h
*/
op->kt_lastmsg = 0;
hrtimer_cancel(&op->thrtimer);
@@ -293,3 +306,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if ((op->flags & STARTTIMER) && op->kt_ival1)
+ hrtimer_start(&op->timer, op->kt_ival1,
+- HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
+ }
+
+ /* now we can register for can_ids, if we added a new bcm_op */
diff --git a/patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch b/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
index f7a1f680d75d..9fd7bb146df9 100644
--- a/patches/0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
+++ b/patches/0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
@@ -1,9 +1,9 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:10 +0000
-Subject: [PATCH 20/25] mac80211_hwsim: Replace hrtimer tasklet with softirq
+Date: Sun, 22 Oct 2017 23:40:09 +0200
+Subject: [PATCH 31/36] mac80211_hwsim: Replace hrtimer tasklet with softirq
hrtimer
-Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+Switch the timer to HRTIMER_MODE_SOFT, which executed the timer
callback in softirq context and remove the hrtimer_tasklet.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- HRTIMER_MODE_REL);
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * 1000),
-+ HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
}
return 0;
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- HRTIMER_MODE_REL);
+ hrtimer_start(&data->beacon_timer,
+ ns_to_ktime(until_tbtt * 1000),
-+ HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
} else if (!info->enable_beacon) {
unsigned int count = 0;
ieee80211_iterate_active_interfaces_atomic(
@@ -127,8 +127,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- tasklet_hrtimer_init(&data->beacon_timer,
- mac80211_hwsim_beacon,
- CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-+ hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC_SOFT,
-+ HRTIMER_MODE_ABS);
++ hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_ABS_SOFT);
+ data->beacon_timer.function = mac80211_hwsim_beacon;
spin_lock_bh(&hwsim_radio_lock);
diff --git a/patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch b/patches/0032-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
index edc93abcd6ca..fc7ffb4f0bd0 100644
--- a/patches/0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
+++ b/patches/0032-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
@@ -1,8 +1,8 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:10 +0000
-Subject: [PATCH 21/25] xfrm: Replace hrtimer tasklet with softirq hrtimer
+Date: Sun, 22 Oct 2017 23:40:10 +0200
+Subject: [PATCH 32/36] xfrm: Replace hrtimer tasklet with softirq hrtimer
-Switch the timer to CLOCK_MONOTONIC_SOFT, which executed the timer
+Switch the timer to HRTIMER_MODE_SOFT, which executed the timer
callback in softirq context and remove the hrtimer_tasklet.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
@@ -14,12 +14,12 @@ Cc: netdev@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/net/xfrm.h | 2 +-
- net/xfrm/xfrm_state.c | 29 +++++++++++++++++------------
- 2 files changed, 18 insertions(+), 13 deletions(-)
+ net/xfrm/xfrm_state.c | 30 ++++++++++++++++++------------
+ 2 files changed, 19 insertions(+), 13 deletions(-)
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
-@@ -213,7 +213,7 @@ struct xfrm_state {
+@@ -217,7 +217,7 @@ struct xfrm_state {
struct xfrm_stats stats;
struct xfrm_lifetime_cur curlft;
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
-@@ -418,7 +418,7 @@ static void xfrm_put_mode(struct xfrm_mo
+@@ -426,7 +426,7 @@ static void xfrm_put_mode(struct xfrm_mo
static void xfrm_state_gc_destroy(struct xfrm_state *x)
{
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
del_timer_sync(&x->rtimer);
kfree(x->aead);
kfree(x->aalg);
-@@ -463,8 +463,8 @@ static void xfrm_state_gc_task(struct wo
+@@ -471,8 +471,8 @@ static void xfrm_state_gc_task(struct wo
static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
{
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
unsigned long now = get_seconds();
long next = LONG_MAX;
int warn = 0;
-@@ -528,7 +528,8 @@ static enum hrtimer_restart xfrm_timer_h
+@@ -536,7 +536,8 @@ static enum hrtimer_restart xfrm_timer_h
km_state_expired(x, 0, 0);
resched:
if (next != LONG_MAX) {
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
goto out;
-@@ -545,7 +546,7 @@ static enum hrtimer_restart xfrm_timer_h
+@@ -553,7 +554,7 @@ static enum hrtimer_restart xfrm_timer_h
out:
spin_unlock(&x->lock);
@@ -69,63 +69,64 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void xfrm_replay_timer_handler(unsigned long data);
-@@ -564,8 +565,8 @@ struct xfrm_state *xfrm_state_alloc(stru
+@@ -572,8 +573,8 @@ struct xfrm_state *xfrm_state_alloc(stru
INIT_HLIST_NODE(&x->bydst);
INIT_HLIST_NODE(&x->bysrc);
INIT_HLIST_NODE(&x->byspi);
- tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler,
- CLOCK_BOOTTIME, HRTIMER_MODE_ABS);
-+ hrtimer_init(&x->mtimer, CLOCK_BOOTTIME_SOFT, HRTIMER_MODE_ABS);
++ hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT);
+ x->mtimer.function = xfrm_timer_handler;
setup_timer(&x->rtimer, xfrm_replay_timer_handler,
(unsigned long)x);
x->curlft.add_time = get_seconds();
-@@ -1021,7 +1022,9 @@ xfrm_state_find(const xfrm_address_t *da
+@@ -1030,7 +1031,9 @@ xfrm_state_find(const xfrm_address_t *da
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer,
+ ktime_set(net->xfrm.sysctl_acq_expires, 0),
-+ HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
net->xfrm.state_num++;
xfrm_hash_grow_check(net, x->bydst.next != NULL);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
-@@ -1132,7 +1135,7 @@ static void __xfrm_state_insert(struct x
+@@ -1141,7 +1144,7 @@ static void __xfrm_state_insert(struct x
hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h);
}
- tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
-+ hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT);
if (x->replay_maxage)
mod_timer(&x->rtimer, jiffies + x->replay_maxage);
-@@ -1236,7 +1239,9 @@ static struct xfrm_state *__find_acq_cor
+@@ -1245,7 +1248,9 @@ static struct xfrm_state *__find_acq_cor
x->mark.m = m->m;
x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires;
xfrm_state_hold(x);
- tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL);
+ hrtimer_start(&x->mtimer,
+ ktime_set(net->xfrm.sysctl_acq_expires, 0),
-+ HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
list_add(&x->km.all, &net->xfrm.state_all);
hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h);
h = xfrm_src_hash(net, daddr, saddr, family);
-@@ -1535,7 +1540,7 @@ int xfrm_state_update(struct xfrm_state
+@@ -1544,7 +1549,8 @@ int xfrm_state_update(struct xfrm_state
memcpy(&x1->lft, &x->lft, sizeof(x1->lft));
x1->km.dying = 0;
- tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
-+ hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL);
++ hrtimer_start(&x1->mtimer, ktime_set(1, 0),
++ HRTIMER_MODE_REL_SOFT);
if (x1->curlft.use_time)
xfrm_state_check_expire(x1);
-@@ -1559,7 +1564,7 @@ int xfrm_state_check_expire(struct xfrm_
+@@ -1568,7 +1574,7 @@ int xfrm_state_check_expire(struct xfrm_
if (x->curlft.bytes >= x->lft.hard_byte_limit ||
x->curlft.packets >= x->lft.hard_packet_limit) {
x->km.state = XFRM_STATE_EXPIRED;
- tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
-+ hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL);
++ hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT);
return -EINVAL;
}
diff --git a/patches/0022-softirq-Remove-tasklet_hrtimer.patch b/patches/0033-softirq-Remove-tasklet_hrtimer.patch
index 317c650b8a49..a7fce4daeaf2 100644
--- a/patches/0022-softirq-Remove-tasklet_hrtimer.patch
+++ b/patches/0033-softirq-Remove-tasklet_hrtimer.patch
@@ -1,6 +1,6 @@
From: Thomas Gleixner <tglx@linutronix.de>
-Date: Thu, 31 Aug 2017 11:03:11 +0000
-Subject: [PATCH 22/25] softirq: Remove tasklet_hrtimer
+Date: Sun, 22 Oct 2017 23:40:11 +0200
+Subject: [PATCH 33/36] softirq: Remove tasklet_hrtimer
There are no more tasklet_hrtimer users of this interface.
Remove it.
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -631,31 +631,6 @@ extern void tasklet_kill_immediate(struc
+@@ -633,31 +633,6 @@ extern void tasklet_kill_immediate(struc
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
diff --git a/patches/0034-ALSA-dummy-Replace-tasklet-with-softirq-hrtimer.patch b/patches/0034-ALSA-dummy-Replace-tasklet-with-softirq-hrtimer.patch
new file mode 100644
index 000000000000..6566b5782e23
--- /dev/null
+++ b/patches/0034-ALSA-dummy-Replace-tasklet-with-softirq-hrtimer.patch
@@ -0,0 +1,98 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:12 +0200
+Subject: [PATCH 34/36] ALSA/dummy: Replace tasklet with softirq hrtimer
+
+The tasklet is used to defer the execution of snd_pcm_period_elapsed() to
+the softirq context. Using the HRTIMER_MODE_SOFT mode invokes the timer
+callback in softirq context as well which renders the tasklet useless.
+
+[o-takashi: avoid stall due to a call of hrtimer_cancel() on a callback
+ of hrtimer]
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Jaroslav Kysela <perex@perex.cz>
+Cc: Takashi Iwai <tiwai@suse.com>
+Cc: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Cc: alsa-devel@alsa-project.org
+Link: http://lkml.kernel.org/r/20170905161820.jtysvxtfleunbbmf@breakpoint.cc
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ sound/drivers/dummy.c | 27 ++++++++++++---------------
+ 1 file changed, 12 insertions(+), 15 deletions(-)
+
+--- a/sound/drivers/dummy.c
++++ b/sound/drivers/dummy.c
+@@ -376,17 +376,9 @@ struct dummy_hrtimer_pcm {
+ ktime_t period_time;
+ atomic_t running;
+ struct hrtimer timer;
+- struct tasklet_struct tasklet;
+ struct snd_pcm_substream *substream;
+ };
+
+-static void dummy_hrtimer_pcm_elapsed(unsigned long priv)
+-{
+- struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv;
+- if (atomic_read(&dpcm->running))
+- snd_pcm_period_elapsed(dpcm->substream);
+-}
+-
+ static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer)
+ {
+ struct dummy_hrtimer_pcm *dpcm;
+@@ -394,7 +386,14 @@ static enum hrtimer_restart dummy_hrtime
+ dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer);
+ if (!atomic_read(&dpcm->running))
+ return HRTIMER_NORESTART;
+- tasklet_schedule(&dpcm->tasklet);
++ /*
++ * In cases of XRUN and draining, this calls .trigger to stop PCM
++ * substream.
++ */
++ snd_pcm_period_elapsed(dpcm->substream);
++ if (!atomic_read(&dpcm->running))
++ return HRTIMER_NORESTART;
++
+ hrtimer_forward_now(timer, dpcm->period_time);
+ return HRTIMER_RESTART;
+ }
+@@ -404,7 +403,7 @@ static int dummy_hrtimer_start(struct sn
+ struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
+
+ dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer);
+- hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL);
++ hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL_SOFT);
+ atomic_set(&dpcm->running, 1);
+ return 0;
+ }
+@@ -414,14 +413,14 @@ static int dummy_hrtimer_stop(struct snd
+ struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data;
+
+ atomic_set(&dpcm->running, 0);
+- hrtimer_cancel(&dpcm->timer);
++ if (!hrtimer_callback_running(&dpcm->timer))
++ hrtimer_cancel(&dpcm->timer);
+ return 0;
+ }
+
+ static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm)
+ {
+ hrtimer_cancel(&dpcm->timer);
+- tasklet_kill(&dpcm->tasklet);
+ }
+
+ static snd_pcm_uframes_t
+@@ -466,12 +465,10 @@ static int dummy_hrtimer_create(struct s
+ if (!dpcm)
+ return -ENOMEM;
+ substream->runtime->private_data = dpcm;
+- hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ dpcm->timer.function = dummy_hrtimer_callback;
+ dpcm->substream = substream;
+ atomic_set(&dpcm->running, 0);
+- tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed,
+- (unsigned long)dpcm);
+ return 0;
+ }
+
diff --git a/patches/0035-usb-gadget-NCM-Replace-tasklet-with-softirq-hrtimer.patch b/patches/0035-usb-gadget-NCM-Replace-tasklet-with-softirq-hrtimer.patch
new file mode 100644
index 000000000000..cb437ab395d4
--- /dev/null
+++ b/patches/0035-usb-gadget-NCM-Replace-tasklet-with-softirq-hrtimer.patch
@@ -0,0 +1,96 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:14 +0200
+Subject: [PATCH 35/36] usb/gadget/NCM: Replace tasklet with softirq hrtimer
+
+The tx_tasklet tasklet is used in invoke the hrtimer (task_timer) in
+softirq context. This can be also achieved without the tasklet but
+with HRTIMER_MODE_SOFT as hrtimer mode.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: Felipe Balbi <balbi@kernel.org>
+Cc: linux-usb@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/usb/gadget/function/f_ncm.c | 30 +++++++-----------------------
+ 1 file changed, 7 insertions(+), 23 deletions(-)
+
+--- a/drivers/usb/gadget/function/f_ncm.c
++++ b/drivers/usb/gadget/function/f_ncm.c
+@@ -77,9 +77,7 @@ struct f_ncm {
+ struct sk_buff *skb_tx_ndp;
+ u16 ndp_dgram_count;
+ bool timer_force_tx;
+- struct tasklet_struct tx_tasklet;
+ struct hrtimer task_timer;
+-
+ bool timer_stopping;
+ };
+
+@@ -1108,7 +1106,7 @@ static struct sk_buff *ncm_wrap_ntb(stru
+
+ /* Delay the timer. */
+ hrtimer_start(&ncm->task_timer, TX_TIMEOUT_NSECS,
+- HRTIMER_MODE_REL);
++ HRTIMER_MODE_REL_SOFT);
+
+ /* Add the datagram position entries */
+ ntb_ndp = skb_put_zero(ncm->skb_tx_ndp, dgram_idx_len);
+@@ -1152,17 +1150,15 @@ static struct sk_buff *ncm_wrap_ntb(stru
+ }
+
+ /*
+- * This transmits the NTB if there are frames waiting.
++ * The transmit should only be run if no skb data has been sent
++ * for a certain duration.
+ */
+-static void ncm_tx_tasklet(unsigned long data)
++static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
+ {
+- struct f_ncm *ncm = (void *)data;
+-
+- if (ncm->timer_stopping)
+- return;
++ struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
+
+ /* Only send if data is available. */
+- if (ncm->skb_tx_data) {
++ if (!ncm->timer_stopping && ncm->skb_tx_data) {
+ ncm->timer_force_tx = true;
+
+ /* XXX This allowance of a NULL skb argument to ndo_start_xmit
+@@ -1175,16 +1171,6 @@ static void ncm_tx_tasklet(unsigned long
+
+ ncm->timer_force_tx = false;
+ }
+-}
+-
+-/*
+- * The transmit should only be run if no skb data has been sent
+- * for a certain duration.
+- */
+-static enum hrtimer_restart ncm_tx_timeout(struct hrtimer *data)
+-{
+- struct f_ncm *ncm = container_of(data, struct f_ncm, task_timer);
+- tasklet_schedule(&ncm->tx_tasklet);
+ return HRTIMER_NORESTART;
+ }
+
+@@ -1517,8 +1503,7 @@ static int ncm_bind(struct usb_configura
+ ncm->port.open = ncm_open;
+ ncm->port.close = ncm_close;
+
+- tasklet_init(&ncm->tx_tasklet, ncm_tx_tasklet, (unsigned long) ncm);
+- hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hrtimer_init(&ncm->task_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_SOFT);
+ ncm->task_timer.function = ncm_tx_timeout;
+
+ DBG(cdev, "CDC Network: %s speed IN/%s OUT/%s NOTIFY/%s\n",
+@@ -1627,7 +1612,6 @@ static void ncm_unbind(struct usb_config
+ DBG(c->cdev, "ncm unbind\n");
+
+ hrtimer_cancel(&ncm->task_timer);
+- tasklet_kill(&ncm->tx_tasklet);
+
+ ncm_string_defs[0].id = 0;
+ usb_free_all_descriptors(f);
diff --git a/patches/0036-net-mvpp2-Replace-tasklet-with-softirq-hrtimer.patch b/patches/0036-net-mvpp2-Replace-tasklet-with-softirq-hrtimer.patch
new file mode 100644
index 000000000000..41b7a3418228
--- /dev/null
+++ b/patches/0036-net-mvpp2-Replace-tasklet-with-softirq-hrtimer.patch
@@ -0,0 +1,132 @@
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Sun, 22 Oct 2017 23:40:15 +0200
+Subject: [PATCH 36/36] net/mvpp2: Replace tasklet with softirq hrtimer
+
+The tx_done_tasklet tasklet is used in invoke the hrtimer
+(mvpp2_hr_timer_cb) in softirq context. This can be also achieved without
+the tasklet but with HRTIMER_MODE_SOFT as hrtimer mode.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+Cc: netdev@vger.kernel.org
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/net/ethernet/marvell/mvpp2.c | 62 ++++++++++++++---------------------
+ 1 file changed, 25 insertions(+), 37 deletions(-)
+
+--- a/drivers/net/ethernet/marvell/mvpp2.c
++++ b/drivers/net/ethernet/marvell/mvpp2.c
+@@ -830,9 +830,8 @@ struct mvpp2_pcpu_stats {
+ /* Per-CPU port control */
+ struct mvpp2_port_pcpu {
+ struct hrtimer tx_done_timer;
++ struct net_device *dev;
+ bool timer_scheduled;
+- /* Tasklet for egress finalization */
+- struct tasklet_struct tx_done_tasklet;
+ };
+
+ struct mvpp2_queue_vector {
+@@ -5979,46 +5978,34 @@ static void mvpp2_link_event(struct net_
+ }
+ }
+
+-static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
+-{
+- ktime_t interval;
+-
+- if (!port_pcpu->timer_scheduled) {
+- port_pcpu->timer_scheduled = true;
+- interval = MVPP2_TXDONE_HRTIMER_PERIOD_NS;
+- hrtimer_start(&port_pcpu->tx_done_timer, interval,
+- HRTIMER_MODE_REL_PINNED);
+- }
+-}
+-
+-static void mvpp2_tx_proc_cb(unsigned long data)
++static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+ {
+- struct net_device *dev = (struct net_device *)data;
+- struct mvpp2_port *port = netdev_priv(dev);
+- struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
++ struct net_device *dev;
++ struct mvpp2_port *port;
++ struct mvpp2_port_pcpu *port_pcpu;
+ unsigned int tx_todo, cause;
+
++ port_pcpu = container_of(timer, struct mvpp2_port_pcpu, tx_done_timer);
++ dev = port_pcpu->dev;
++
+ if (!netif_running(dev))
+- return;
++ return HRTIMER_NORESTART;
++
+ port_pcpu->timer_scheduled = false;
++ port = netdev_priv(dev);
+
+ /* Process all the Tx queues */
+ cause = (1 << port->ntxqs) - 1;
+ tx_todo = mvpp2_tx_done(port, cause, smp_processor_id());
+
+ /* Set the timer in case not all the packets were processed */
+- if (tx_todo)
+- mvpp2_timer_set(port_pcpu);
+-}
+-
+-static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
+-{
+- struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
+- struct mvpp2_port_pcpu,
+- tx_done_timer);
+-
+- tasklet_schedule(&port_pcpu->tx_done_tasklet);
++ if (tx_todo && !port_pcpu->timer_scheduled) {
++ port_pcpu->timer_scheduled = true;
++ hrtimer_forward_now(&port_pcpu->tx_done_timer,
++ MVPP2_TXDONE_HRTIMER_PERIOD_NS);
+
++ return HRTIMER_RESTART;
++ }
+ return HRTIMER_NORESTART;
+ }
+
+@@ -6507,7 +6494,12 @@ static int mvpp2_tx(struct sk_buff *skb,
+ txq_pcpu->count > 0) {
+ struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
+
+- mvpp2_timer_set(port_pcpu);
++ if (!port_pcpu->timer_scheduled) {
++ port_pcpu->timer_scheduled = true;
++ hrtimer_start(&port_pcpu->tx_done_timer,
++ MVPP2_TXDONE_HRTIMER_PERIOD_NS,
++ HRTIMER_MODE_REL_PINNED_SOFT);
++ }
+ }
+
+ return NETDEV_TX_OK;
+@@ -6896,7 +6888,6 @@ static int mvpp2_stop(struct net_device
+
+ hrtimer_cancel(&port_pcpu->tx_done_timer);
+ port_pcpu->timer_scheduled = false;
+- tasklet_kill(&port_pcpu->tx_done_tasklet);
+ }
+ }
+ mvpp2_cleanup_rxqs(port);
+@@ -7664,13 +7655,10 @@ static int mvpp2_port_probe(struct platf
+ port_pcpu = per_cpu_ptr(port->pcpu, cpu);
+
+ hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
+- HRTIMER_MODE_REL_PINNED);
++ HRTIMER_MODE_REL_PINNED_SOFT);
+ port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
+ port_pcpu->timer_scheduled = false;
+-
+- tasklet_init(&port_pcpu->tx_done_tasklet,
+- mvpp2_tx_proc_cb,
+- (unsigned long)dev);
++ port_pcpu->dev = dev;
+ }
+ }
+
diff --git a/patches/0039-tracing-Make-tracing_set_clock-non-static.patch b/patches/0039-tracing-Make-tracing_set_clock-non-static.patch
index 8c9bce73c8bd..049b2913e1f8 100644
--- a/patches/0039-tracing-Make-tracing_set_clock-non-static.patch
+++ b/patches/0039-tracing-Make-tracing_set_clock-non-static.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -6216,7 +6216,7 @@ static int tracing_clock_show(struct seq
+@@ -6226,7 +6226,7 @@ static int tracing_clock_show(struct seq
return 0;
}
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -287,6 +287,7 @@ extern int trace_array_get(struct trace_
+@@ -289,6 +289,7 @@ extern int trace_array_get(struct trace_
extern void trace_array_put(struct trace_array *tr);
extern int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs);
diff --git a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
index 0282b1dde960..578878355883 100644
--- a/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
+++ b/patches/HACK-printk-drop-the-logbuf_lock-more-often.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1408,6 +1408,8 @@ static int syslog_print_all(char __user
+@@ -1407,6 +1407,8 @@ static int syslog_print_all(char __user
{
char *text;
int len = 0;
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
if (!text)
-@@ -1419,6 +1421,14 @@ static int syslog_print_all(char __user
+@@ -1418,6 +1420,14 @@ static int syslog_print_all(char __user
u64 seq;
u32 idx;
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Find first record that fits, including all following records,
* into the user-provided buffer for this dump.
-@@ -1431,6 +1441,14 @@ static int syslog_print_all(char __user
+@@ -1430,6 +1440,14 @@ static int syslog_print_all(char __user
len += msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* move first record forward until length fits into the buffer */
-@@ -1442,6 +1460,14 @@ static int syslog_print_all(char __user
+@@ -1441,6 +1459,14 @@ static int syslog_print_all(char __user
len -= msg_print_text(msg, true, NULL, 0);
idx = log_next(idx);
seq++;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* last message fitting into this dump */
-@@ -1480,6 +1506,7 @@ static int syslog_print_all(char __user
+@@ -1479,6 +1505,7 @@ static int syslog_print_all(char __user
clear_seq = log_next_seq;
clear_idx = log_next_idx;
}
diff --git a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
index af8cce4068c0..3fa92f157958 100644
--- a/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
+++ b/patches/KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
-@@ -645,7 +645,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -650,7 +650,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
* involves poking the GIC, which must be done in a
* non-preemptible context.
*/
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kvm_pmu_flush_hwstate(vcpu);
-@@ -682,7 +682,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -687,7 +687,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
}
-@@ -737,7 +737,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
+@@ -742,7 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_v
kvm_vgic_sync_hwstate(vcpu);
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 1ba7e78d6df5..eac7860fe659 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
put_nfs_open_context(ctx);
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
-@@ -111,7 +111,7 @@ struct nfs4_state_owner {
+@@ -112,7 +112,7 @@ struct nfs4_state_owner {
unsigned long so_flags;
struct list_head so_states;
struct nfs_seqid_counter so_seqid;
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2615,7 +2615,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2641,7 +2641,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata);
if (ret != 0)
-@@ -2653,7 +2653,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2679,7 +2679,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
diff --git a/patches/RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch b/patches/RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch
index 29c575ef833b..28105f4965f0 100644
--- a/patches/RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch
+++ b/patches/RCU-we-need-to-skip-that-warning-but-only-on-sleepin.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -317,9 +317,13 @@ static void rcu_preempt_note_context_swi
+@@ -323,9 +323,13 @@ static void rcu_preempt_note_context_swi
struct task_struct *t = current;
struct rcu_data *rdp;
struct rcu_node *rnp;
diff --git a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
index 298239b10735..1e8bce7fd5e8 100644
--- a/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
+++ b/patches/acpi-rt-Convert-acpi_gbl_hardware-lock-back-to-a-raw.patch
@@ -142,7 +142,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Delete the reader/writer lock */
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
-@@ -133,6 +133,7 @@
+@@ -134,6 +134,7 @@
#define acpi_cache_t struct kmem_cache
#define acpi_spinlock spinlock_t *
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define acpi_cpu_flags unsigned long
/* Use native linux version of acpi_os_allocate_zeroed */
-@@ -151,6 +152,20 @@
+@@ -152,6 +153,20 @@
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
#define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index bb1cdba90f35..d6b48f1faa2a 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -12,7 +12,7 @@ Subject: kernel/sched/core: add migrate_disable()
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -184,6 +184,22 @@ do { \
+@@ -185,6 +185,22 @@ do { \
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
@@ -35,7 +35,7 @@ Subject: kernel/sched/core: add migrate_disable()
#ifdef CONFIG_PREEMPT
#define preempt_enable() \
do { \
-@@ -252,6 +268,13 @@ do { \
+@@ -253,6 +269,13 @@ do { \
#define preempt_enable_notrace() barrier()
#define preemptible() 0
@@ -67,7 +67,7 @@ Subject: kernel/sched/core: add migrate_disable()
int rcu_read_lock_nesting;
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
-@@ -197,6 +197,9 @@ static inline int get_boot_cpu_id(void)
+@@ -202,6 +202,9 @@ static inline int get_boot_cpu_id(void)
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
@@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable()
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1018,7 +1018,15 @@ void set_cpus_allowed_common(struct task
+@@ -1023,7 +1023,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable()
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1047,6 +1055,20 @@ void do_set_cpus_allowed(struct task_str
+@@ -1052,6 +1060,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable()
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1105,9 +1127,16 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1110,9 +1132,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -6716,3 +6745,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -6760,3 +6789,100 @@ const u32 sched_prio_to_wmult[40] = {
/* 10 */ 39045157, 49367440, 61356676, 76695844, 95443717,
/* 15 */ 119304647, 148102320, 186737708, 238609294, 286331153,
};
@@ -238,7 +238,7 @@ Subject: kernel/sched/core: add migrate_disable()
+#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -971,6 +971,10 @@ void proc_sched_show_task(struct task_st
+@@ -1017,6 +1017,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
diff --git a/patches/apparmor-use-a-locallock-instead-preempt_disable.patch b/patches/apparmor-use-a-locallock-instead-preempt_disable.patch
index 13e9a775f5bf..b3d8f2cca0ba 100644
--- a/patches/apparmor-use-a-locallock-instead-preempt_disable.patch
+++ b/patches/apparmor-use-a-locallock-instead-preempt_disable.patch
@@ -72,7 +72,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __AA_PATH_H */
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
-@@ -43,7 +43,7 @@
+@@ -44,7 +44,7 @@
int apparmor_initialized;
DEFINE_PER_CPU(struct aa_buffers, aa_buffers);
diff --git a/patches/arch-arm64-Add-lazy-preempt-support.patch b/patches/arch-arm64-Add-lazy-preempt-support.patch
index 859d2087a81f..8661fc7be51f 100644
--- a/patches/arch-arm64-Add-lazy-preempt-support.patch
+++ b/patches/arch-arm64-Add-lazy-preempt-support.patch
@@ -12,15 +12,15 @@ indicate that support for full RT preemption is now available.
Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
---
arch/arm64/Kconfig | 1 +
- arch/arm64/include/asm/thread_info.h | 7 +++++--
+ arch/arm64/include/asm/thread_info.h | 6 +++++-
arch/arm64/kernel/asm-offsets.c | 1 +
arch/arm64/kernel/entry.S | 12 +++++++++---
arch/arm64/kernel/signal.c | 2 +-
- 5 files changed, 17 insertions(+), 6 deletions(-)
+ 5 files changed, 17 insertions(+), 5 deletions(-)
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -102,6 +102,7 @@ config ARM64
+@@ -103,6 +103,7 @@ config ARM64
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -30,7 +30,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/arm64/include/asm/thread_info.h
+++ b/arch/arm64/include/asm/thread_info.h
-@@ -51,6 +51,7 @@ struct thread_info {
+@@ -43,6 +43,7 @@ struct thread_info {
u64 ttbr0; /* saved TTBR0_EL1 */
#endif
int preempt_count; /* 0 => preemptable, <0 => bug */
@@ -38,15 +38,15 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
};
#define INIT_THREAD_INFO(tsk) \
-@@ -86,6 +87,7 @@ struct thread_info {
- #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
+@@ -82,6 +83,7 @@ void arch_setup_new_exec(void);
#define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */
#define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */
-+#define TIF_NEED_RESCHED_LAZY 5
+ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */
++#define TIF_NEED_RESCHED_LAZY 6
#define TIF_NOHZ 7
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
-@@ -101,6 +103,7 @@ struct thread_info {
+@@ -97,6 +99,7 @@ void arch_setup_new_exec(void);
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE)
@@ -54,13 +54,13 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#define _TIF_NOHZ (1 << TIF_NOHZ)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-@@ -111,8 +114,8 @@ struct thread_info {
+@@ -108,8 +111,9 @@ void arch_setup_new_exec(void);
#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
_TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
-- _TIF_UPROBE)
--
-+ _TIF_UPROBE | _TIF_NEED_RESCHED_LAZY)
+- _TIF_UPROBE | _TIF_FSCHECK)
++ _TIF_UPROBE | _TIF_FSCHECK | _TIF_NEED_RESCHED_LAZY)
+
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
_TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
@@ -77,7 +77,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
DEFINE(TSK_TI_TTBR0, offsetof(struct task_struct, thread_info.ttbr0));
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
-@@ -488,11 +488,16 @@ ENDPROC(el1_sync)
+@@ -570,11 +570,16 @@ ENDPROC(el1_sync)
#ifdef CONFIG_PREEMPT
ldr w24, [tsk, #TSK_TI_PREEMPT] // get preempt count
@@ -97,7 +97,7 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_on
-@@ -506,6 +511,7 @@ ENDPROC(el1_irq)
+@@ -588,6 +593,7 @@ ENDPROC(el1_irq)
1: bl preempt_schedule_irq // irq en/disable is done inside
ldr x0, [tsk, #TSK_TI_FLAGS] // get new tasks TI_FLAGS
tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling?
@@ -107,10 +107,10 @@ Signed-off-by: Anders Roxell <anders.roxell@linaro.org>
--- a/arch/arm64/kernel/signal.c
+++ b/arch/arm64/kernel/signal.c
-@@ -750,7 +750,7 @@ asmlinkage void do_notify_resume(struct
- */
- trace_hardirqs_off();
- do {
+@@ -755,7 +755,7 @@ asmlinkage void do_notify_resume(struct
+ /* Check valid user FS if needed */
+ addr_limit_user_check();
+
- if (thread_flags & _TIF_NEED_RESCHED) {
+ if (thread_flags & _TIF_NEED_RESCHED_MASK) {
schedule();
diff --git a/patches/arm-enable-highmem-for-rt.patch b/patches/arm-enable-highmem-for-rt.patch
index a3535b8f53d7..3891177cb5ec 100644
--- a/patches/arm-enable-highmem-for-rt.patch
+++ b/patches/arm-enable-highmem-for-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/include/asm/switch_to.h
+++ b/arch/arm/include/asm/switch_to.h
-@@ -3,6 +3,13 @@
+@@ -4,6 +4,13 @@
#include <linux/thread_info.h>
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* For v7 SMP cores running a preemptible kernel we may be pre-empted
* during a TLB maintenance operation, so execute an inner-shareable dsb
-@@ -25,6 +32,7 @@ extern struct task_struct *__switch_to(s
+@@ -26,6 +33,7 @@ extern struct task_struct *__switch_to(s
#define switch_to(prev,next,last) \
do { \
__complete_pending_tlbi(); \
@@ -163,7 +163,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
-@@ -7,6 +7,7 @@
+@@ -8,6 +8,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
#include <linux/hardirq.h>
diff --git a/patches/arm-include-definition-for-cpumask_t.patch b/patches/arm-include-definition-for-cpumask_t.patch
index 3750303cfa6f..4bb2672be979 100644
--- a/patches/arm-include-definition-for-cpumask_t.patch
+++ b/patches/arm-include-definition-for-cpumask_t.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
-@@ -22,6 +22,8 @@
+@@ -23,6 +23,8 @@
#endif
#ifndef __ASSEMBLY__
diff --git a/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch b/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch
index 79706c7faa47..9a0fa6413899 100644
--- a/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch
+++ b/patches/arm-kprobe-replace-patch_lock-to-raw-lock.patch
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
-@@ -15,7 +15,7 @@ struct patch {
+@@ -16,7 +16,7 @@ struct patch {
unsigned int insn;
};
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
__acquires(&patch_lock)
-@@ -32,7 +32,7 @@ static void __kprobes *patch_map(void *a
+@@ -33,7 +33,7 @@ static void __kprobes *patch_map(void *a
return addr;
if (flags)
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
else
__acquire(&patch_lock);
-@@ -47,7 +47,7 @@ static void __kprobes patch_unmap(int fi
+@@ -48,7 +48,7 @@ static void __kprobes patch_unmap(int fi
clear_fixmap(fixmap);
if (flags)
diff --git a/patches/arm-preempt-lazy-support.patch b/patches/arm-preempt-lazy-support.patch
index b15cdbfc2a2a..52faabe2e4b3 100644
--- a/patches/arm-preempt-lazy-support.patch
+++ b/patches/arm-preempt-lazy-support.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -84,6 +84,7 @@ config ARM
+@@ -85,6 +85,7 @@ config ARM
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -113,9 +113,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__und_fault:
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
-@@ -41,7 +41,9 @@
- UNWIND(.cantunwind )
- disable_irq_notrace @ disable interrupts
+@@ -53,7 +53,9 @@ saved_pc .req lr
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
@@ -123,14 +123,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+ tst r1, #_TIF_SECCOMP
bne fast_work_pending
- /* perform architecture specific actions before user return */
-@@ -67,8 +69,11 @@ ENDPROC(ret_fast_syscall)
- str r0, [sp, #S_R0 + S_OFF]! @ save returned r0
- disable_irq_notrace @ disable interrupts
+
+@@ -83,8 +85,11 @@ ENDPROC(ret_fast_syscall)
+ cmp r2, #TASK_SIZE
+ blne addr_limit_check_failed
ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing
- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP)
-+ bne do_slower_path
++ bne do_slower_path
+ tst r1, #_TIF_SECCOMP
beq no_work_pending
+do_slower_path:
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
-@@ -614,7 +614,8 @@ do_work_pending(struct pt_regs *regs, un
+@@ -615,7 +615,8 @@ do_work_pending(struct pt_regs *regs, un
*/
trace_hardirqs_off();
do {
diff --git a/patches/arm-xen-don-t-inclide-rwlock.h-directly.patch b/patches/arm-xen-don-t-inclide-rwlock.h-directly.patch
deleted file mode 100644
index 36a500ec7815..000000000000
--- a/patches/arm-xen-don-t-inclide-rwlock.h-directly.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 5 Oct 2017 14:38:52 +0200
-Subject: [PATCH] arm/xen: don't inclide rwlock.h directly.
-
-rwlock.h should not be included directly. Instead linux/splinlock.h
-should be included. One thing it does is to break the RT build.
-
-Cc: Stefano Stabellini <sstabellini@kernel.org>
-Cc: xen-devel@lists.xenproject.org
-Cc: linux-arm-kernel@lists.infradead.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/xen/p2m.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/arch/arm/xen/p2m.c
-+++ b/arch/arm/xen/p2m.c
-@@ -1,7 +1,7 @@
- #include <linux/bootmem.h>
- #include <linux/gfp.h>
- #include <linux/export.h>
--#include <linux/rwlock.h>
-+#include <linux/spinlock.h>
- #include <linux/slab.h>
- #include <linux/types.h>
- #include <linux/dma-mapping.h>
diff --git a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
index fffd54beb518..85237d71532a 100644
--- a/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
+++ b/patches/arm64-xen--Make-XEN-depend-on-non-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
-@@ -773,7 +773,7 @@ config XEN_DOM0
+@@ -774,7 +774,7 @@ config XEN_DOM0
config XEN
bool "Xen guest support on ARM64"
diff --git a/patches/at91_dont_enable_disable_clock.patch b/patches/at91_dont_enable_disable_clock.patch
index 1d4b4f9a6ece..417e2e792c14 100644
--- a/patches/at91_dont_enable_disable_clock.patch
+++ b/patches/at91_dont_enable_disable_clock.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -125,6 +125,7 @@ static struct clocksource clksrc = {
+@@ -126,6 +126,7 @@ static struct clocksource clksrc = {
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void __iomem *regs;
};
-@@ -142,6 +143,24 @@ static struct tc_clkevt_device *to_tc_cl
+@@ -143,6 +144,24 @@ static struct tc_clkevt_device *to_tc_cl
*/
static u32 timer_clock;
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-@@ -149,8 +168,14 @@ static int tc_shutdown(struct clock_even
+@@ -150,8 +169,14 @@ static int tc_shutdown(struct clock_even
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -163,7 +188,7 @@ static int tc_set_oneshot(struct clock_e
+@@ -164,7 +189,7 @@ static int tc_set_oneshot(struct clock_e
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* slow clock, count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
-@@ -185,7 +210,7 @@ static int tc_set_periodic(struct clock_
+@@ -186,7 +211,7 @@ static int tc_set_periodic(struct clock_
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* slow clock, count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-@@ -219,7 +244,7 @@ static struct tc_clkevt_device clkevt =
+@@ -220,7 +245,7 @@ static struct tc_clkevt_device clkevt =
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
diff --git a/patches/block-blk-mq-use-swait.patch b/patches/block-blk-mq-use-swait.patch
index 4f5321d5c662..d21a048632b6 100644
--- a/patches/block-blk-mq-use-swait.patch
+++ b/patches/block-blk-mq-use-swait.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void blk_rq_timed_out_timer(unsigned long data)
-@@ -875,7 +875,7 @@ struct request_queue *blk_alloc_queue_no
+@@ -878,7 +878,7 @@ struct request_queue *blk_alloc_queue_no
q->bypass_depth = 1;
__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Init percpu_ref in atomic mode so that it's faster to shutdown.
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -97,14 +97,14 @@ EXPORT_SYMBOL_GPL(blk_freeze_queue_start
+@@ -132,14 +132,14 @@ EXPORT_SYMBOL_GPL(blk_freeze_queue_start
void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
percpu_ref_is_zero(&q->q_usage_counter),
timeout);
}
-@@ -145,7 +145,7 @@ void blk_mq_unfreeze_queue(struct reques
+@@ -180,7 +180,7 @@ void blk_mq_unfreeze_queue(struct reques
WARN_ON_ONCE(freeze_depth < 0);
if (!freeze_depth) {
percpu_ref_reinit(&q->q_usage_counter);
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
-@@ -226,7 +226,7 @@ void blk_mq_wake_waiters(struct request_
+@@ -261,7 +261,7 @@ void blk_mq_wake_waiters(struct request_
* dying, we need to ensure that processes currently waiting on
* the queue are notified as well.
*/
@@ -111,7 +111,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -579,7 +579,7 @@ struct request_queue {
+@@ -581,7 +581,7 @@ struct request_queue {
struct throtl_data *td;
#endif
struct rcu_head rcu_head;
diff --git a/patches/block-mq-don-t-complete-requests-via-IPI.patch b/patches/block-mq-don-t-complete-requests-via-IPI.patch
index 84c461da0c20..851038f5dc61 100644
--- a/patches/block-mq-don-t-complete-requests-via-IPI.patch
+++ b/patches/block-mq-don-t-complete-requests-via-IPI.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->__sector = (sector_t) -1;
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -283,6 +283,9 @@ static struct request *blk_mq_rq_ctx_ini
+@@ -318,6 +318,9 @@ static struct request *blk_mq_rq_ctx_ini
/* tag was already set */
rq->extra_len = 0;
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&rq->timeout_list);
rq->timeout = 0;
-@@ -477,12 +480,24 @@ void blk_mq_end_request(struct request *
+@@ -512,12 +515,24 @@ void blk_mq_end_request(struct request *
}
EXPORT_SYMBOL(blk_mq_end_request);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void __blk_mq_complete_request(struct request *rq)
{
-@@ -507,10 +522,18 @@ static void __blk_mq_complete_request(st
+@@ -542,10 +557,18 @@ static void __blk_mq_complete_request(st
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
-@@ -227,7 +227,7 @@ static inline u16 blk_mq_unique_tag_to_t
+@@ -226,7 +226,7 @@ static inline u16 blk_mq_unique_tag_to_t
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
@@ -94,7 +94,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void blk_mq_end_request(struct request *rq, blk_status_t error);
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
-@@ -133,6 +133,9 @@ typedef __u32 __bitwise req_flags_t;
+@@ -134,6 +134,9 @@ typedef __u32 __bitwise req_flags_t;
*/
struct request {
struct list_head queuelist;
@@ -102,5 +102,5 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ struct work_struct work;
+#endif
union {
- struct call_single_data csd;
+ call_single_data_t csd;
u64 fifo_time;
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index ff9ccee07aed..9510f6363d1a 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -502,7 +502,7 @@ static void __blk_mq_complete_request(st
+@@ -537,7 +537,7 @@ static void __blk_mq_complete_request(st
return;
}
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
shared = cpus_share_cache(cpu, ctx->cpu);
-@@ -514,7 +514,7 @@ static void __blk_mq_complete_request(st
+@@ -549,7 +549,7 @@ static void __blk_mq_complete_request(st
} else {
rq->q->softirq_done_fn(rq);
}
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1153,14 +1153,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1197,14 +1197,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/block-mq-use-cpu_light.patch b/patches/block-mq-use-cpu_light.patch
index 49394064d38c..6c14c233f8a6 100644
--- a/patches/block-mq-use-cpu_light.patch
+++ b/patches/block-mq-use-cpu_light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
-@@ -97,12 +97,12 @@ static inline struct blk_mq_ctx *__blk_m
+@@ -98,12 +98,12 @@ static inline struct blk_mq_ctx *__blk_m
*/
static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
{
diff --git a/patches/block-shorten-interrupt-disabled-regions.patch b/patches/block-shorten-interrupt-disabled-regions.patch
index bbab37e5ed33..344ef49e4e06 100644
--- a/patches/block-shorten-interrupt-disabled-regions.patch
+++ b/patches/block-shorten-interrupt-disabled-regions.patch
@@ -47,7 +47,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
--- a/block/blk-core.c
+++ b/block/blk-core.c
-@@ -3276,7 +3276,7 @@ static void queue_unplugged(struct reque
+@@ -3288,7 +3288,7 @@ static void queue_unplugged(struct reque
blk_run_queue_async(q);
else
__blk_run_queue(q);
@@ -56,7 +56,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
-@@ -3324,7 +3324,6 @@ EXPORT_SYMBOL(blk_check_plugged);
+@@ -3336,7 +3336,6 @@ EXPORT_SYMBOL(blk_check_plugged);
void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
struct request_queue *q;
@@ -64,7 +64,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
struct request *rq;
LIST_HEAD(list);
unsigned int depth;
-@@ -3344,11 +3343,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3356,11 +3355,6 @@ void blk_flush_plug_list(struct blk_plug
q = NULL;
depth = 0;
@@ -76,7 +76,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
while (!list_empty(&list)) {
rq = list_entry_rq(list.next);
list_del_init(&rq->queuelist);
-@@ -3361,7 +3355,7 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3373,7 +3367,7 @@ void blk_flush_plug_list(struct blk_plug
queue_unplugged(q, depth, from_schedule);
q = rq->q;
depth = 0;
@@ -85,7 +85,7 @@ Link: http://lkml.kernel.org/r/20110622174919.025446432@linutronix.de
}
/*
-@@ -3388,8 +3382,6 @@ void blk_flush_plug_list(struct blk_plug
+@@ -3400,8 +3394,6 @@ void blk_flush_plug_list(struct blk_plug
*/
if (q)
queue_unplugged(q, depth, from_schedule);
diff --git a/patches/block-use-cpu-chill.patch b/patches/block-use-cpu-chill.patch
index 3b0f81eef6e5..83b2351bddc4 100644
--- a/patches/block-use-cpu-chill.patch
+++ b/patches/block-use-cpu-chill.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
-@@ -8,6 +8,7 @@
+@@ -9,6 +9,7 @@
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/sched/task.h>
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "blk.h"
-@@ -117,7 +118,7 @@ static void ioc_release_fn(struct work_s
+@@ -118,7 +119,7 @@ static void ioc_release_fn(struct work_s
spin_unlock(q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_lock_irqsave_nested(&ioc->lock, flags, 1);
}
}
-@@ -201,7 +202,7 @@ void put_io_context_active(struct io_con
+@@ -202,7 +203,7 @@ void put_io_context_active(struct io_con
spin_unlock(icq->q->queue_lock);
} else {
spin_unlock_irqrestore(&ioc->lock, flags);
diff --git a/patches/bug-rt-dependend-variants.patch b/patches/bug-rt-dependend-variants.patch
index a15ab6c1a81a..a8c5919342da 100644
--- a/patches/bug-rt-dependend-variants.patch
+++ b/patches/bug-rt-dependend-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
-@@ -232,6 +232,20 @@ void __warn(const char *file, int line,
+@@ -233,6 +233,20 @@ void __warn(const char *file, int line,
# define WARN_ON_SMP(x) ({0;})
#endif
diff --git a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
index fdb796a12355..8bf4ffaa1e99 100644
--- a/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
+++ b/patches/cgroups-scheduling-while-atomic-in-cgroup-code.patch
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1709,6 +1709,7 @@ struct memcg_stock_pcp {
+@@ -1723,6 +1723,7 @@ struct memcg_stock_pcp {
#define FLUSHING_CACHED_CHARGE 0
};
static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static DEFINE_MUTEX(percpu_charge_mutex);
/**
-@@ -1731,7 +1732,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1745,7 +1746,7 @@ static bool consume_stock(struct mem_cgr
if (nr_pages > CHARGE_BATCH)
return ret;
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (memcg == stock->cached && stock->nr_pages >= nr_pages) {
-@@ -1739,7 +1740,7 @@ static bool consume_stock(struct mem_cgr
+@@ -1753,7 +1754,7 @@ static bool consume_stock(struct mem_cgr
ret = true;
}
@@ -68,10 +68,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1766,13 +1767,13 @@ static void drain_local_stock(struct wor
- struct memcg_stock_pcp *stock;
- unsigned long flags;
-
+@@ -1784,13 +1785,13 @@ static void drain_local_stock(struct wor
+ * The only protection from memory hotplug vs. drain_stock races is
+ * that we always operate on local CPU stock here with IRQ disabled
+ */
- local_irq_save(flags);
+ local_lock_irqsave(memcg_stock_ll, flags);
@@ -84,7 +84,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1784,7 +1785,7 @@ static void refill_stock(struct mem_cgro
+@@ -1802,7 +1803,7 @@ static void refill_stock(struct mem_cgro
struct memcg_stock_pcp *stock;
unsigned long flags;
@@ -93,9 +93,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
stock = this_cpu_ptr(&memcg_stock);
if (stock->cached != memcg) { /* reset if necessary */
-@@ -1793,7 +1794,7 @@ static void refill_stock(struct mem_cgro
- }
- stock->nr_pages += nr_pages;
+@@ -1814,7 +1815,7 @@ static void refill_stock(struct mem_cgro
+ if (stock->nr_pages > CHARGE_BATCH)
+ drain_stock(stock);
- local_irq_restore(flags);
+ local_unlock_irqrestore(memcg_stock_ll, flags);
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index 3f26e8f40031..9f2804d86808 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
-@@ -18,6 +18,7 @@
+@@ -19,6 +19,7 @@
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
#include <linux/bpf-cgroup.h>
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_CGROUPS
-@@ -146,6 +147,7 @@ struct cgroup_subsys_state {
+@@ -152,6 +153,7 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* PI: the parent css. Placed here for cache proximity to following
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
-@@ -4001,10 +4001,10 @@ static void css_free_rcu_fn(struct rcu_h
+@@ -4500,10 +4500,10 @@ static void css_free_rcu_fn(struct rcu_h
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4049,8 +4049,8 @@ static void css_release(struct percpu_re
+@@ -4554,8 +4554,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
@@ -76,7 +76,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void init_and_link_css(struct cgroup_subsys_state *css,
-@@ -4712,6 +4712,7 @@ static int __init cgroup_wq_init(void)
+@@ -5261,6 +5261,7 @@ static int __init cgroup_wq_init(void)
*/
cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
BUG_ON(!cgroup_destroy_wq);
diff --git a/patches/clocksource-tclib-allow-higher-clockrates.patch b/patches/clocksource-tclib-allow-higher-clockrates.patch
index 4a1f9831c834..891ab76ba6f1 100644
--- a/patches/clocksource-tclib-allow-higher-clockrates.patch
+++ b/patches/clocksource-tclib-allow-higher-clockrates.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/clocksource/tcb_clksrc.c
+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -24,8 +24,7 @@
+@@ -25,8 +25,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* A boot clocksource and clockevent source are also currently needed,
* unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -126,6 +125,7 @@ struct tc_clkevt_device {
+@@ -127,6 +126,7 @@ struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
bool clk_enabled;
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __iomem *regs;
};
-@@ -134,13 +134,6 @@ static struct tc_clkevt_device *to_tc_cl
+@@ -135,13 +135,6 @@ static struct tc_clkevt_device *to_tc_cl
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static u32 timer_clock;
static void tc_clk_disable(struct clock_event_device *d)
-@@ -190,7 +183,7 @@ static int tc_set_oneshot(struct clock_e
+@@ -191,7 +184,7 @@ static int tc_set_oneshot(struct clock_e
tc_clk_enable(d);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -212,10 +205,10 @@ static int tc_set_periodic(struct clock_
+@@ -213,10 +206,10 @@ static int tc_set_periodic(struct clock_
*/
tc_clk_enable(d);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -242,7 +235,11 @@ static struct tc_clkevt_device clkevt =
+@@ -243,7 +236,11 @@ static struct tc_clkevt_device clkevt =
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.set_next_event = tc_next_event,
.set_state_shutdown = tc_shutdown_clk_off,
.set_state_periodic = tc_set_periodic,
-@@ -264,8 +261,9 @@ static irqreturn_t ch2_irq(int irq, void
+@@ -265,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void
return IRQ_NONE;
}
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-@@ -286,7 +284,11 @@ static int __init setup_clkevents(struct
+@@ -287,7 +285,11 @@ static int __init setup_clkevents(struct
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
@@ -106,7 +106,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clkevt.clkevt.cpumask = cpumask_of(0);
-@@ -297,7 +299,7 @@ static int __init setup_clkevents(struct
+@@ -298,7 +300,7 @@ static int __init setup_clkevents(struct
return ret;
}
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -434,7 +436,11 @@ static int __init tcb_clksrc_init(void)
+@@ -435,7 +437,11 @@ static int __init tcb_clksrc_init(void)
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index c43447449315..2055554a983e 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -11,15 +11,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
drivers/net/wireless/intersil/orinoco/orinoco_usb.c | 2 -
drivers/usb/gadget/function/f_fs.c | 2 -
drivers/usb/gadget/legacy/inode.c | 4 +-
- include/linux/completion.h | 9 ++---
+ include/linux/completion.h | 10 ++---
include/linux/suspend.h | 6 +++
include/linux/swait.h | 1
kernel/power/hibernate.c | 7 ++++
- kernel/power/suspend.c | 5 +++
- kernel/sched/completion.c | 32 ++++++++++----------
- kernel/sched/core.c | 10 +++++-
- kernel/sched/swait.c | 20 ++++++++++++
- 11 files changed, 71 insertions(+), 27 deletions(-)
+ kernel/power/suspend.c | 4 ++
+ kernel/sched/completion.c | 34 ++++++++++----------
+ kernel/sched/core.c | 10 ++++-
+ kernel/sched/swait.c | 20 +++++++++++
+ 11 files changed, 72 insertions(+), 28 deletions(-)
--- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
+++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c
@@ -34,15 +34,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
break;
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1609,7 +1609,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1610,7 +1610,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
- waitqueue_active(&ffs->ep0req_completion.wait) ||
+ swait_active(&ffs->ep0req_completion.wait) ||
waitqueue_active(&ffs->wait));
+ destroy_workqueue(ffs->io_completion_wq);
kfree(ffs->dev_name);
- kfree(ffs);
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -347,7 +347,7 @@ ep_io (struct ep_data *epdata, void *buf
@@ -65,32 +65,40 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
-@@ -7,8 +7,7 @@
- * Atomic wait-for-completion handler data structures.
+@@ -9,7 +9,7 @@
* See kernel/sched/completion.c for details.
*/
--
+
-#include <linux/wait.h>
+#include <linux/swait.h>
-
- /*
- * struct completion - structure used to maintain state for a "completion"
-@@ -24,11 +23,11 @@
+ #ifdef CONFIG_LOCKDEP_COMPLETIONS
+ #include <linux/lockdep.h>
+ #endif
+@@ -28,7 +28,7 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_queue_head wait;
- };
+ #ifdef CONFIG_LOCKDEP_COMPLETIONS
+ struct lockdep_map_cross map;
+ #endif
+@@ -67,11 +67,11 @@ static inline void complete_release_comm
+ #ifdef CONFIG_LOCKDEP_COMPLETIONS
+ #define COMPLETION_INITIALIZER(work) \
+- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
++ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait), \
+ STATIC_CROSS_LOCKDEP_MAP_INIT("(complete)" #work, &(work)) }
+ #else
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ #endif
#define COMPLETION_INITIALIZER_ONSTACK(work) \
- ({ init_completion(&work); work; })
-@@ -73,7 +72,7 @@ struct completion {
- static inline void init_completion(struct completion *x)
+@@ -117,7 +117,7 @@ static inline void complete_release_comm
+ static inline void __init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
@@ -100,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
-@@ -195,6 +195,12 @@ struct platform_freeze_ops {
+@@ -196,6 +196,12 @@ struct platform_s2idle_ops {
void (*end)(void);
};
@@ -111,11 +119,11 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+
#ifdef CONFIG_SUSPEND
- /**
- * suspend_set_ops - set platform dependent suspend operations
+ extern suspend_state_t mem_sleep_current;
+ extern suspend_state_t mem_sleep_default;
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
-@@ -87,6 +87,7 @@ static inline int swait_active(struct sw
+@@ -147,6 +147,7 @@ static inline bool swq_has_sleeper(struc
extern void swake_up(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
@@ -145,17 +153,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lock_system_sleep();
/* The snapshot device should not be opened while we're running */
if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
-@@ -769,6 +775,7 @@ int hibernate(void)
+@@ -770,6 +776,7 @@ int hibernate(void)
atomic_inc(&snapshot_device_available);
Unlock:
unlock_system_sleep();
+ pm_in_action = false;
- return error;
- }
+ pr_info("hibernation exit\n");
+ return error;
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -569,6 +569,8 @@ static int enter_state(suspend_state_t s
+@@ -593,6 +593,8 @@ static int enter_state(suspend_state_t s
return error;
}
@@ -164,31 +172,35 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* pm_suspend - Externally visible function for suspending the system.
* @state: System sleep state to enter.
-@@ -583,6 +585,8 @@ int pm_suspend(suspend_state_t state)
+@@ -607,6 +609,7 @@ int pm_suspend(suspend_state_t state)
if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX)
return -EINVAL;
+ pm_in_action = true;
-+
+ pr_info("suspend entry (%s)\n", mem_sleep_labels[state]);
error = enter_state(state);
if (error) {
- suspend_stats.fail++;
-@@ -590,6 +594,7 @@ int pm_suspend(suspend_state_t state)
- } else {
+@@ -616,6 +619,7 @@ int pm_suspend(suspend_state_t state)
suspend_stats.success++;
}
+ pr_info("suspend exit\n");
+ pm_in_action = false;
return error;
}
EXPORT_SYMBOL(pm_suspend);
--- a/kernel/sched/completion.c
+++ b/kernel/sched/completion.c
-@@ -31,11 +31,11 @@ void complete(struct completion *x)
+@@ -32,7 +32,7 @@ void complete(struct completion *x)
{
unsigned long flags;
- spin_lock_irqsave(&x->wait.lock, flags);
+ raw_spin_lock_irqsave(&x->wait.lock, flags);
+
+ /*
+ * Perform commit of crossrelease here.
+@@ -41,8 +41,8 @@ void complete(struct completion *x)
+
if (x->done != UINT_MAX)
x->done++;
- __wake_up_locked(&x->wait, TASK_NORMAL, 1);
@@ -198,7 +210,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete);
-@@ -52,10 +52,10 @@ void complete_all(struct completion *x)
+@@ -66,10 +66,10 @@ void complete_all(struct completion *x)
{
unsigned long flags;
@@ -212,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(complete_all);
-@@ -64,20 +64,20 @@ do_wait_for_common(struct completion *x,
+@@ -78,20 +78,20 @@ do_wait_for_common(struct completion *x,
long (*action)(long), long timeout, int state)
{
if (!x->done) {
@@ -238,19 +250,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!x->done)
return timeout;
}
-@@ -92,9 +92,9 @@ static inline long __sched
- {
- might_sleep();
+@@ -108,9 +108,9 @@ static inline long __sched
+
+ complete_acquire(x);
- spin_lock_irq(&x->wait.lock);
+ raw_spin_lock_irq(&x->wait.lock);
timeout = do_wait_for_common(x, action, timeout, state);
- spin_unlock_irq(&x->wait.lock);
+ raw_spin_unlock_irq(&x->wait.lock);
- return timeout;
- }
-@@ -280,12 +280,12 @@ bool try_wait_for_completion(struct comp
+ complete_release(x);
+
+@@ -299,12 +299,12 @@ bool try_wait_for_completion(struct comp
if (!READ_ONCE(x->done))
return 0;
@@ -265,18 +277,20 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(try_wait_for_completion);
-@@ -314,7 +314,7 @@ bool completion_done(struct completion *
- * after it's acquired the lock.
+@@ -330,8 +330,8 @@ bool completion_done(struct completion *
+ * otherwise we can end up freeing the completion before complete()
+ * is done referencing it.
*/
- smp_rmb();
-- spin_unlock_wait(&x->wait.lock);
-+ raw_spin_unlock_wait(&x->wait.lock);
+- spin_lock_irqsave(&x->wait.lock, flags);
+- spin_unlock_irqrestore(&x->wait.lock, flags);
++ raw_spin_lock_irqsave(&x->wait.lock, flags);
++ raw_spin_unlock_irqrestore(&x->wait.lock, flags);
return true;
}
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6886,7 +6886,10 @@ void migrate_disable(void)
+@@ -6930,7 +6930,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -288,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -6916,7 +6919,10 @@ void migrate_enable(void)
+@@ -6960,7 +6963,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
@@ -302,14 +316,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
WARN_ON_ONCE(p->migrate_disable <= 0);
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
-@@ -1,5 +1,6 @@
+@@ -1,6 +1,7 @@
+ // SPDX-License-Identifier: GPL-2.0
#include <linux/sched/signal.h>
#include <linux/swait.h>
+#include <linux/suspend.h>
void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
struct lock_class_key *key)
-@@ -29,6 +30,25 @@ void swake_up_locked(struct swait_queue_
+@@ -30,6 +31,25 @@ void swake_up_locked(struct swait_queue_
}
EXPORT_SYMBOL(swake_up_locked);
diff --git a/patches/cond-resched-lock-rt-tweak.patch b/patches/cond-resched-lock-rt-tweak.patch
index 5147d94b2175..c3caef32f78c 100644
--- a/patches/cond-resched-lock-rt-tweak.patch
+++ b/patches/cond-resched-lock-rt-tweak.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -117,7 +117,11 @@
+@@ -118,7 +118,11 @@
/*
* The preempt_count offset after spin_lock()
*/
diff --git a/patches/cond-resched-softirq-rt.patch b/patches/cond-resched-softirq-rt.patch
index 7fb731f5cd00..4c6960402fbc 100644
--- a/patches/cond-resched-softirq-rt.patch
+++ b/patches/cond-resched-softirq-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1565,12 +1565,16 @@ extern int __cond_resched_lock(spinlock_
+@@ -1606,12 +1606,16 @@ extern int __cond_resched_lock(spinlock_
__cond_resched_lock(lock); \
})
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -4912,6 +4912,7 @@ int __cond_resched_lock(spinlock_t *lock
+@@ -4946,6 +4946,7 @@ int __cond_resched_lock(spinlock_t *lock
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4925,6 +4926,7 @@ int __sched __cond_resched_softirq(void)
+@@ -4959,6 +4960,7 @@ int __sched __cond_resched_softirq(void)
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch
index 69e01f8334c1..54ca45d0e358 100644
--- a/patches/cpu-hotplug--Implement-CPU-pinning.patch
+++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch
@@ -20,9 +20,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# endif
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -66,6 +66,11 @@ struct cpuhp_cpu_state {
-
- static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+@@ -73,6 +73,11 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+ .fail = CPUHP_INVALID,
+ };
+#ifdef CONFIG_HOTPLUG_CPU
+static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
@@ -30,9 +30,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+
#if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
- static struct lock_class_key cpuhp_state_key;
- static struct lockdep_map cpuhp_state_lock_map =
-@@ -216,7 +221,30 @@ static int cpu_hotplug_disabled;
+ static struct lockdep_map cpuhp_state_up_map =
+ STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
+@@ -291,7 +296,30 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -224,6 +252,13 @@ void pin_current_cpu(void)
+@@ -299,6 +327,13 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -621,6 +656,7 @@ static int take_cpu_down(void *_param)
+@@ -768,6 +803,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -634,11 +670,14 @@ static int takedown_cpu(unsigned int cpu
+@@ -781,11 +817,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -100,8 +100,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -657,6 +696,7 @@ static int takedown_cpu(unsigned int cpu
- wait_for_completion(&st->done);
+@@ -804,6 +843,7 @@ static int takedown_cpu(unsigned int cpu
+ wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+ __write_rt_unlock(cpuhp_pin);
diff --git a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
index 3c85f50f0de8..d6fe2370e89a 100644
--- a/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
+++ b/patches/cpu_chill-Add-a-UNINTERRUPTIBLE-hrtimer_nanosleep.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1709,12 +1709,13 @@ int nanosleep_copyout(struct restart_blo
+@@ -1739,12 +1739,13 @@ int nanosleep_copyout(struct restart_blo
return -ERESTART_RESTARTBLOCK;
}
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_start_expires(&t->timer, mode);
if (likely(t->task))
-@@ -1752,13 +1753,15 @@ static long __sched hrtimer_nanosleep_re
+@@ -1782,13 +1783,15 @@ static long __sched hrtimer_nanosleep_re
hrtimer_init_sleeper_on_stack(&t, restart->nanosleep.clockid,
HRTIMER_MODE_ABS, current);
hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct restart_block *restart;
struct hrtimer_sleeper t;
-@@ -1771,7 +1774,7 @@ long hrtimer_nanosleep(const struct time
+@@ -1801,7 +1804,7 @@ long hrtimer_nanosleep(const struct time
hrtimer_init_sleeper_on_stack(&t, clockid, mode, current);
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret != -ERESTART_RESTARTBLOCK)
goto out;
-@@ -1790,6 +1793,12 @@ long hrtimer_nanosleep(const struct time
+@@ -1820,6 +1823,12 @@ long hrtimer_nanosleep(const struct time
return ret;
}
@@ -90,12 +90,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
struct timespec __user *, rmtp)
{
-@@ -1837,7 +1846,8 @@ void cpu_chill(void)
+@@ -1867,7 +1876,8 @@ void cpu_chill(void)
unsigned int freeze_flag = current->flags & PF_NOFREEZE;
current->flags |= PF_NOFREEZE;
-- hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD);
-+ __hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD,
+- hrtimer_nanosleep(&tu, HRTIMER_MODE_REL_HARD, CLOCK_MONOTONIC);
++ __hrtimer_nanosleep(&tu, HRTIMER_MODE_REL_HARD, CLOCK_MONOTONIC,
+ TASK_UNINTERRUPTIBLE);
if (!freeze_flag)
current->flags &= ~PF_NOFREEZE;
diff --git a/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch b/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
deleted file mode 100644
index 14e5556fc329..000000000000
--- a/patches/cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
+++ /dev/null
@@ -1,176 +0,0 @@
-From: Alex Shi <alex.shi@linaro.org>
-Date: Fri, 28 Jul 2017 15:09:25 +0800
-Subject: PM / CPU: replace raw_notifier with atomic_notifier
-
-This patch replaces an rwlock and raw notifier by an atomic notifier
-protected by a spin_lock and RCU.
-
-The main reason for this change is due to a 'scheduling while atomic'
-bug with RT kernels on ARM/ARM64. On ARM/ARM64, the rwlock
-cpu_pm_notifier_lock in cpu_pm_enter/exit() causes a potential
-schedule after IRQ disable in the idle call chain:
-
-cpu_startup_entry
- cpu_idle_loop
- local_irq_disable()
- cpuidle_idle_call
- call_cpuidle
- cpuidle_enter
- cpuidle_enter_state
- ->enter :arm_enter_idle_state
- cpu_pm_enter/exit
- CPU_PM_CPU_IDLE_ENTER
- read_lock(&cpu_pm_notifier_lock); <-- sleep in idle
- __rt_spin_lock();
- schedule();
-
-The kernel panic is here:
-[ 4.609601] BUG: scheduling while atomic: swapper/1/0/0x00000002
-[ 4.609608] [<ffff0000086fae70>] arm_enter_idle_state+0x18/0x70
-[ 4.609614] Modules linked in:
-[ 4.609615] [<ffff0000086f9298>] cpuidle_enter_state+0xf0/0x218
-[ 4.609620] [<ffff0000086f93f8>] cpuidle_enter+0x18/0x20
-[ 4.609626] Preemption disabled at:
-[ 4.609627] [<ffff0000080fa234>] call_cpuidle+0x24/0x40
-[ 4.609635] [<ffff000008882fa4>] schedule_preempt_disabled+0x1c/0x28
-[ 4.609639] [<ffff0000080fa49c>] cpu_startup_entry+0x154/0x1f8
-[ 4.609645] [<ffff00000808e004>] secondary_start_kernel+0x15c/0x1a0
-
-Daniel Lezcano said this notification is needed on arm/arm64 platforms.
-Sebastian suggested using atomic_notifier instead of rwlock, which is not
-only removing the sleeping in idle, but also improving latency.
-
-Tony Lindgren found a miss use that rcu_read_lock used after rcu_idle_enter
-Paul McKenney suggested trying RCU_NONIDLE.
-
-Signed-off-by: Alex Shi <alex.shi@linaro.org>
-Tested-by: Tony Lindgren <tony@atomide.com>
-Acked-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-[ rjw: Subject & changelog ]
-Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/cpu_pm.c | 50 +++++++++++++-------------------------------------
- 1 file changed, 13 insertions(+), 37 deletions(-)
-
---- a/kernel/cpu_pm.c
-+++ b/kernel/cpu_pm.c
-@@ -22,15 +22,21 @@
- #include <linux/spinlock.h>
- #include <linux/syscore_ops.h>
-
--static DEFINE_RWLOCK(cpu_pm_notifier_lock);
--static RAW_NOTIFIER_HEAD(cpu_pm_notifier_chain);
-+static ATOMIC_NOTIFIER_HEAD(cpu_pm_notifier_chain);
-
- static int cpu_pm_notify(enum cpu_pm_event event, int nr_to_call, int *nr_calls)
- {
- int ret;
-
-- ret = __raw_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
-+ /*
-+ * __atomic_notifier_call_chain has a RCU read critical section, which
-+ * could be disfunctional in cpu idle. Copy RCU_NONIDLE code to let
-+ * RCU know this.
-+ */
-+ rcu_irq_enter_irqson();
-+ ret = __atomic_notifier_call_chain(&cpu_pm_notifier_chain, event, NULL,
- nr_to_call, nr_calls);
-+ rcu_irq_exit_irqson();
-
- return notifier_to_errno(ret);
- }
-@@ -47,14 +53,7 @@ static int cpu_pm_notify(enum cpu_pm_eve
- */
- int cpu_pm_register_notifier(struct notifier_block *nb)
- {
-- unsigned long flags;
-- int ret;
--
-- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
-- ret = raw_notifier_chain_register(&cpu_pm_notifier_chain, nb);
-- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
--
-- return ret;
-+ return atomic_notifier_chain_register(&cpu_pm_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_register_notifier);
-
-@@ -69,14 +68,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_register_notifi
- */
- int cpu_pm_unregister_notifier(struct notifier_block *nb)
- {
-- unsigned long flags;
-- int ret;
--
-- write_lock_irqsave(&cpu_pm_notifier_lock, flags);
-- ret = raw_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
-- write_unlock_irqrestore(&cpu_pm_notifier_lock, flags);
--
-- return ret;
-+ return atomic_notifier_chain_unregister(&cpu_pm_notifier_chain, nb);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
-
-@@ -100,7 +92,6 @@ int cpu_pm_enter(void)
- int nr_calls;
- int ret = 0;
-
-- read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_PM_ENTER, -1, &nr_calls);
- if (ret)
- /*
-@@ -108,7 +99,6 @@ int cpu_pm_enter(void)
- * PM entry who are notified earlier to prepare for it.
- */
- cpu_pm_notify(CPU_PM_ENTER_FAILED, nr_calls - 1, NULL);
-- read_unlock(&cpu_pm_notifier_lock);
-
- return ret;
- }
-@@ -128,13 +118,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_enter);
- */
- int cpu_pm_exit(void)
- {
-- int ret;
--
-- read_lock(&cpu_pm_notifier_lock);
-- ret = cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
-- read_unlock(&cpu_pm_notifier_lock);
--
-- return ret;
-+ return cpu_pm_notify(CPU_PM_EXIT, -1, NULL);
- }
- EXPORT_SYMBOL_GPL(cpu_pm_exit);
-
-@@ -159,7 +143,6 @@ int cpu_cluster_pm_enter(void)
- int nr_calls;
- int ret = 0;
-
-- read_lock(&cpu_pm_notifier_lock);
- ret = cpu_pm_notify(CPU_CLUSTER_PM_ENTER, -1, &nr_calls);
- if (ret)
- /*
-@@ -167,7 +150,6 @@ int cpu_cluster_pm_enter(void)
- * PM entry who are notified earlier to prepare for it.
- */
- cpu_pm_notify(CPU_CLUSTER_PM_ENTER_FAILED, nr_calls - 1, NULL);
-- read_unlock(&cpu_pm_notifier_lock);
-
- return ret;
- }
-@@ -190,13 +172,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
- */
- int cpu_cluster_pm_exit(void)
- {
-- int ret;
--
-- read_lock(&cpu_pm_notifier_lock);
-- ret = cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
-- read_unlock(&cpu_pm_notifier_lock);
--
-- return ret;
-+ return cpu_pm_notify(CPU_CLUSTER_PM_EXIT, -1, NULL);
- }
- EXPORT_SYMBOL_GPL(cpu_cluster_pm_exit);
-
diff --git a/patches/cpumask-disable-offstack-on-rt.patch b/patches/cpumask-disable-offstack-on-rt.patch
index 220a71e134f0..0dae377817e5 100644
--- a/patches/cpumask-disable-offstack-on-rt.patch
+++ b/patches/cpumask-disable-offstack-on-rt.patch
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -919,7 +919,7 @@ config IOMMU_HELPER
+@@ -921,7 +921,7 @@ config IOMMU_HELPER
config MAXSMP
bool "Enable Maximum number of SMP Processors and NUMA Nodes"
depends on X86_64 && SMP && DEBUG_KERNEL
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
If unsure, say N.
--- a/lib/Kconfig
+++ b/lib/Kconfig
-@@ -417,6 +417,7 @@ config CHECK_SIGNATURE
+@@ -428,6 +428,7 @@ config CHECK_SIGNATURE
config CPUMASK_OFFSTACK
bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS
diff --git a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
index 97c607f2fdc9..821fb08d641b 100644
--- a/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
+++ b/patches/cpuset-Convert-callback_lock-to-raw_spinlock_t.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
-@@ -287,7 +287,7 @@ static struct cpuset top_cpuset = {
+@@ -288,7 +288,7 @@ static struct cpuset top_cpuset = {
*/
static DEFINE_MUTEX(cpuset_mutex);
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static struct workqueue_struct *cpuset_migrate_mm_wq;
-@@ -910,9 +910,9 @@ static void update_cpumasks_hier(struct
+@@ -926,9 +926,9 @@ static void update_cpumasks_hier(struct
continue;
rcu_read_unlock();
@@ -69,9 +69,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ WARN_ON(!is_in_v2_mode() &&
!cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
-@@ -977,9 +977,9 @@ static int update_cpumask(struct cpuset
+@@ -993,9 +993,9 @@ static int update_cpumask(struct cpuset
if (retval < 0)
return retval;
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->cpus_allowed as a temp variable */
update_cpumasks_hier(cs, trialcs->cpus_allowed);
-@@ -1164,9 +1164,9 @@ static void update_nodemasks_hier(struct
+@@ -1179,9 +1179,9 @@ static void update_nodemasks_hier(struct
continue;
rcu_read_unlock();
@@ -93,9 +93,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- spin_unlock_irq(&callback_lock);
+ raw_spin_unlock_irq(&callback_lock);
- WARN_ON(!cgroup_subsys_on_dfl(cpuset_cgrp_subsys) &&
+ WARN_ON(!is_in_v2_mode() &&
!nodes_equal(cp->mems_allowed, cp->effective_mems));
-@@ -1234,9 +1234,9 @@ static int update_nodemask(struct cpuset
+@@ -1249,9 +1249,9 @@ static int update_nodemask(struct cpuset
if (retval < 0)
goto done;
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* use trialcs->mems_allowed as a temp variable */
update_nodemasks_hier(cs, &trialcs->mems_allowed);
-@@ -1327,9 +1327,9 @@ static int update_flag(cpuset_flagbits_t
+@@ -1342,9 +1342,9 @@ static int update_flag(cpuset_flagbits_t
spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
|| (is_spread_page(cs) != is_spread_page(trialcs)));
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
rebuild_sched_domains_locked();
-@@ -1744,7 +1744,7 @@ static int cpuset_common_seq_show(struct
+@@ -1759,7 +1759,7 @@ static int cpuset_common_seq_show(struct
cpuset_filetype_t type = seq_cft(sf)->private;
int ret = 0;
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
switch (type) {
case FILE_CPULIST:
-@@ -1763,7 +1763,7 @@ static int cpuset_common_seq_show(struct
+@@ -1778,7 +1778,7 @@ static int cpuset_common_seq_show(struct
ret = -EINVAL;
}
@@ -137,13 +137,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -1978,12 +1978,12 @@ static int cpuset_css_online(struct cgro
+@@ -1993,12 +1993,12 @@ static int cpuset_css_online(struct cgro
cpuset_inc();
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
+ if (is_in_v2_mode()) {
cpumask_copy(cs->effective_cpus, parent->effective_cpus);
cs->effective_mems = parent->effective_mems;
}
@@ -152,7 +152,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
goto out_unlock;
-@@ -2010,12 +2010,12 @@ static int cpuset_css_online(struct cgro
+@@ -2025,12 +2025,12 @@ static int cpuset_css_online(struct cgro
}
rcu_read_unlock();
@@ -167,16 +167,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
mutex_unlock(&cpuset_mutex);
return 0;
-@@ -2054,7 +2054,7 @@ static void cpuset_css_free(struct cgrou
+@@ -2069,7 +2069,7 @@ static void cpuset_css_free(struct cgrou
static void cpuset_bind(struct cgroup_subsys_state *root_css)
{
mutex_lock(&cpuset_mutex);
- spin_lock_irq(&callback_lock);
+ raw_spin_lock_irq(&callback_lock);
- if (cgroup_subsys_on_dfl(cpuset_cgrp_subsys)) {
+ if (is_in_v2_mode()) {
cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
-@@ -2065,7 +2065,7 @@ static void cpuset_bind(struct cgroup_su
+@@ -2080,7 +2080,7 @@ static void cpuset_bind(struct cgroup_su
top_cpuset.mems_allowed = top_cpuset.effective_mems;
}
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&cpuset_mutex);
}
-@@ -2163,12 +2163,12 @@ hotplug_update_tasks_legacy(struct cpuse
+@@ -2178,12 +2178,12 @@ hotplug_update_tasks_legacy(struct cpuse
{
bool is_empty;
@@ -200,7 +200,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Don't call update_tasks_cpumask() if the cpuset becomes empty,
-@@ -2205,10 +2205,10 @@ hotplug_update_tasks(struct cpuset *cs,
+@@ -2220,10 +2220,10 @@ hotplug_update_tasks(struct cpuset *cs,
if (nodes_empty(*new_mems))
*new_mems = parent_cs(cs)->effective_mems;
@@ -213,7 +213,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpus_updated)
update_tasks_cpumask(cs);
-@@ -2301,21 +2301,21 @@ static void cpuset_hotplug_workfn(struct
+@@ -2316,21 +2316,21 @@ static void cpuset_hotplug_workfn(struct
/* synchronize cpus_allowed to cpu_active_mask */
if (cpus_updated) {
@@ -239,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_tasks_nodemask(&top_cpuset);
}
-@@ -2420,11 +2420,11 @@ void cpuset_cpus_allowed(struct task_str
+@@ -2429,11 +2429,11 @@ void cpuset_cpus_allowed(struct task_str
{
unsigned long flags;
@@ -253,7 +253,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
-@@ -2472,11 +2472,11 @@ nodemask_t cpuset_mems_allowed(struct ta
+@@ -2481,11 +2481,11 @@ nodemask_t cpuset_mems_allowed(struct ta
nodemask_t mask;
unsigned long flags;
@@ -267,7 +267,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return mask;
}
-@@ -2568,14 +2568,14 @@ bool __cpuset_node_allowed(int node, gfp
+@@ -2577,14 +2577,14 @@ bool __cpuset_node_allowed(int node, gfp
return true;
/* Not hardwall and node outside mems_allowed: scan up cpusets */
diff --git a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
index e36b96ec1640..db318a6576ba 100644
--- a/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
+++ b/patches/crypto-Reduce-preempt-disabled-regions-more-algos.patch
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return err;
}
-@@ -311,7 +309,7 @@ static unsigned int __ctr_crypt(struct b
+@@ -310,7 +308,7 @@ static unsigned int __ctr_crypt(struct b
static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct blkcipher_walk walk;
int err;
-@@ -320,13 +318,12 @@ static int ctr_crypt(struct blkcipher_de
+@@ -319,13 +317,12 @@ static int ctr_crypt(struct blkcipher_de
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
diff --git a/patches/debugobjects-rt.patch b/patches/debugobjects-rt.patch
index 2ab2aad54bde..dd32133de09b 100644
--- a/patches/debugobjects-rt.patch
+++ b/patches/debugobjects-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
-@@ -334,7 +334,10 @@ static void
+@@ -336,7 +336,10 @@ static void
struct debug_obj *obj;
unsigned long flags;
diff --git a/patches/dm-make-rt-aware.patch b/patches/dm-make-rt-aware.patch
index 268b1d0a3f34..6f8e3e635e81 100644
--- a/patches/dm-make-rt-aware.patch
+++ b/patches/dm-make-rt-aware.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
-@@ -674,7 +674,7 @@ static void dm_old_request_fn(struct req
+@@ -671,7 +671,7 @@ static void dm_old_request_fn(struct req
/* Establish tio->ti before queuing work (map_tio_request) */
tio->ti = ti;
kthread_queue_work(&md->kworker, &tio->work);
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index e376e6d7c1c2..3f0fb4e9b950 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -10,12 +10,12 @@ Signed-off-by: Mike Galbraith <umgwanakikbuti@gmail.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/block/zram/zram_drv.c | 26 ++++++++++++++++++++++++++
- drivers/block/zram/zram_drv.h | 4 ++++
- 2 files changed, 30 insertions(+)
+ drivers/block/zram/zram_drv.h | 3 +++
+ 2 files changed, 29 insertions(+)
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -422,6 +422,30 @@ static DEVICE_ATTR_RO(io_stat);
+@@ -756,6 +756,30 @@ static DEVICE_ATTR_RO(io_stat);
static DEVICE_ATTR_RO(mm_stat);
static DEVICE_ATTR_RO(debug_stat);
@@ -46,15 +46,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void zram_slot_lock(struct zram *zram, u32 index)
{
bit_spin_lock(ZRAM_ACCESS, &zram->table[index].value);
-@@ -431,6 +455,7 @@ static void zram_slot_unlock(struct zram
+@@ -765,6 +789,7 @@ static void zram_slot_unlock(struct zram
{
bit_spin_unlock(ZRAM_ACCESS, &zram->table[index].value);
}
+#endif
- static bool zram_same_page_read(struct zram *zram, u32 index,
- struct page *page,
-@@ -505,6 +530,7 @@ static bool zram_meta_alloc(struct zram
+ static void zram_meta_free(struct zram *zram, u64 disksize)
+ {
+@@ -794,6 +819,7 @@ static bool zram_meta_alloc(struct zram
return false;
}
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
-@@ -76,6 +76,9 @@ struct zram_table_entry {
+@@ -77,6 +77,9 @@ struct zram_table_entry {
unsigned long element;
};
unsigned long value;
@@ -74,9 +74,3 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct zram_stats {
-@@ -116,4 +119,5 @@ struct zram {
- */
- bool claim; /* Protected by bdev->bd_mutex */
- };
-+
- #endif
diff --git a/patches/drivers-tty-fix-omap-lock-crap.patch b/patches/drivers-tty-fix-omap-lock-crap.patch
index 5a66912fe84b..e204ae10a1bc 100644
--- a/patches/drivers-tty-fix-omap-lock-crap.patch
+++ b/patches/drivers-tty-fix-omap-lock-crap.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
-@@ -1312,13 +1312,10 @@ serial_omap_console_write(struct console
+@@ -1311,13 +1311,10 @@ serial_omap_console_write(struct console
pm_runtime_get_sync(up->dev);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* First save the IER then disable the interrupts
-@@ -1347,8 +1344,7 @@ serial_omap_console_write(struct console
+@@ -1346,8 +1343,7 @@ serial_omap_console_write(struct console
pm_runtime_mark_last_busy(up->dev);
pm_runtime_put_autosuspend(up->dev);
if (locked)
diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index ceec63cd1644..c39243eb17fd 100644
--- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -62,23 +62,23 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -574,6 +574,7 @@ static int zram_decompress_page(struct z
+@@ -871,6 +871,7 @@ static int __zram_bvec_read(struct zram
unsigned long handle;
unsigned int size;
void *src, *dst;
+ struct zcomp_strm *zstrm;
- if (zram_same_page_read(zram, index, page, 0, PAGE_SIZE))
- return 0;
-@@ -582,6 +583,7 @@ static int zram_decompress_page(struct z
- handle = zram_get_handle(zram, index);
+ if (zram_wb_enabled(zram)) {
+ zram_slot_lock(zram, index);
+@@ -905,6 +906,7 @@ static int __zram_bvec_read(struct zram
+
size = zram_get_obj_size(zram, index);
+ zstrm = zcomp_stream_get(zram->comp);
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -589,14 +591,13 @@ static int zram_decompress_page(struct z
+@@ -912,14 +914,13 @@ static int __zram_bvec_read(struct zram
kunmap_atomic(dst);
ret = 0;
} else {
diff --git a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
index c12fc3be285e..9b8583a4f173 100644
--- a/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
+++ b/patches/drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
-@@ -35,6 +35,7 @@
+@@ -36,6 +36,7 @@
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
#include <drm/drm_plane_helper.h>
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "intel_drv.h"
#include "intel_frontbuffer.h"
#include <drm/i915_drm.h>
-@@ -66,7 +67,7 @@ int intel_usecs_to_scanlines(const struc
+@@ -67,7 +68,7 @@ int intel_usecs_to_scanlines(const struc
}
#define VBLANK_EVASION_TIME_US 100
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* intel_pipe_update_start() - start update of a set of display registers
* @crtc: the crtc of which the registers are going to be updated
-@@ -101,7 +102,7 @@ void intel_pipe_update_start(struct inte
+@@ -102,7 +103,7 @@ void intel_pipe_update_start(struct inte
VBLANK_EVASION_TIME_US);
max = vblank_start - 1;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (min <= 0 || max <= 0)
return;
-@@ -131,11 +132,11 @@ void intel_pipe_update_start(struct inte
+@@ -132,11 +133,11 @@ void intel_pipe_update_start(struct inte
break;
}
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
finish_wait(wq, &wait);
-@@ -206,7 +207,7 @@ void intel_pipe_update_end(struct intel_
+@@ -201,7 +202,7 @@ void intel_pipe_update_end(struct intel_
crtc->base.state->event = NULL;
}
diff --git a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
index 8aa5a875b887..7d2fd54c34be 100644
--- a/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
+++ b/patches/drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
-@@ -868,6 +868,7 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -867,6 +867,7 @@ static bool i915_get_crtc_scanoutpos(str
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -919,6 +920,7 @@ static bool i915_get_crtc_scanoutpos(str
+@@ -918,6 +919,7 @@ static bool i915_get_crtc_scanoutpos(str
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
-@@ -1854,6 +1854,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1839,6 +1839,7 @@ int radeon_get_crtc_scanoutpos(struct dr
struct radeon_device *rdev = dev->dev_private;
/* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Get optional system timestamp before query. */
if (stime)
-@@ -1946,6 +1947,7 @@ int radeon_get_crtc_scanoutpos(struct dr
+@@ -1931,6 +1932,7 @@ int radeon_get_crtc_scanoutpos(struct dr
*etime = ktime_get();
/* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index 59eaf2091ed7..112840171a19 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
-@@ -581,9 +583,9 @@ static int kiocb_cancel(struct aio_kiocb
+@@ -588,9 +590,9 @@ static int kiocb_cancel(struct aio_kiocb
return cancel(&kiocb->common);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_debug("freeing %p\n", ctx);
-@@ -602,8 +604,8 @@ static void free_ioctx_reqs(struct percp
+@@ -609,8 +611,8 @@ static void free_ioctx_reqs(struct percp
if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count))
complete(&ctx->rq_wait->comp);
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -611,9 +613,9 @@ static void free_ioctx_reqs(struct percp
+@@ -618,9 +620,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -632,6 +634,14 @@ static void free_ioctx_users(struct perc
+@@ -639,6 +641,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-convert-two-mroe-BH_Uptodate_Lock-related-bitspin.patch b/patches/fs-convert-two-mroe-BH_Uptodate_Lock-related-bitspin.patch
deleted file mode 100644
index bc8fad5f6ca0..000000000000
--- a/patches/fs-convert-two-mroe-BH_Uptodate_Lock-related-bitspin.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Mon, 6 Nov 2017 18:45:30 +0100
-Subject: [PATCH] fs: convert two more BH_Uptodate_Lock related bitspinlocks
-
-We convert all BH_Uptodate_Lock based bit-spinlocks to use
-bh_uptodate_lock_irqsave() instead. Those two were introduced after the
-initial change in -RT and were not noticed before.
-
-Cc: stable-rt@vger.kernel.org
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- fs/ext4/page-io.c | 6 ++----
- fs/xfs/xfs_aops.c | 6 ++----
- 2 files changed, 4 insertions(+), 8 deletions(-)
-
-diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
-index c2fce4478cca..a8e94791990d 100644
---- a/fs/ext4/page-io.c
-+++ b/fs/ext4/page-io.c
-@@ -94,8 +94,7 @@ static void ext4_finish_bio(struct bio *bio)
- * We check all buffers in the page under BH_Uptodate_Lock
- * to avoid races with other end io clearing async_write flags
- */
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
-+ flags = bh_uptodate_lock_irqsave(head);
- do {
- if (bh_offset(bh) < bio_start ||
- bh_offset(bh) + bh->b_size > bio_end) {
-@@ -107,8 +106,7 @@ static void ext4_finish_bio(struct bio *bio)
- if (bio->bi_status)
- buffer_io_error(bh);
- } while ((bh = bh->b_this_page) != head);
-- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(head, flags);
- if (!under_io) {
- #ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (data_page)
-diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
-index 41b767ecfe50..e35ad09124ab 100644
---- a/fs/xfs/xfs_aops.c
-+++ b/fs/xfs/xfs_aops.c
-@@ -107,8 +107,7 @@ xfs_finish_page_writeback(
- ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
- ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
-
-- local_irq_save(flags);
-- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
-+ flags = bh_uptodate_lock_irqsave(head);
- do {
- if (off >= bvec->bv_offset &&
- off < bvec->bv_offset + bvec->bv_len) {
-@@ -130,8 +129,7 @@ xfs_finish_page_writeback(
- }
- off += bh->b_size;
- } while ((bh = bh->b_this_page) != head);
-- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
-- local_irq_restore(flags);
-+ bh_uptodate_unlock_irqrestore(head, flags);
-
- if (!busy)
- end_page_writeback(bvec->bv_page);
---
-2.15.0
-
diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index 69f27721fa9c..68808ecef1a1 100644
--- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -651,7 +651,7 @@ struct inode {
+@@ -655,7 +655,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
diff --git a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
index 64fb26097e2d..8f93cfbc89ed 100644
--- a/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
+++ b/patches/fs-dcache-use-cpu-chill-in-trylock-loops.patch
@@ -17,14 +17,14 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/autofs4/autofs_i.h
+++ b/fs/autofs4/autofs_i.h
-@@ -32,6 +32,7 @@
+@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
#include <linux/namei.h>
+#include <linux/delay.h>
- #include <asm/current.h>
#include <linux/uaccess.h>
-
+ #include <linux/mutex.h>
+ #include <linux/spinlock.h>
--- a/fs/autofs4/expire.c
+++ b/fs/autofs4/expire.c
@@ -148,7 +148,7 @@ static struct dentry *get_next_positive_
diff --git a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
index eeb7b926e1c8..7072eedc1d53 100644
--- a/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
+++ b/patches/fs-dcache-use-swait_queue-instead-of-waitqueue.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_HLIST_NODE(&dentry->d_u.d_alias);
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
-@@ -1191,7 +1191,7 @@ static int fuse_direntplus_link(struct f
+@@ -1187,7 +1187,7 @@ static int fuse_direntplus_link(struct f
struct inode *dir = d_inode(parent);
struct fuse_conn *fc;
struct inode *inode;
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/fs/namei.c
+++ b/fs/namei.c
-@@ -1628,7 +1628,7 @@ static struct dentry *lookup_slow(const
+@@ -1637,7 +1637,7 @@ static struct dentry *lookup_slow(const
{
struct dentry *dentry = ERR_PTR(-ENOENT), *old;
struct inode *inode = dir->d_inode;
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
inode_lock_shared(inode);
/* Don't go there if it's already dead */
-@@ -3101,7 +3101,7 @@ static int lookup_open(struct nameidata
+@@ -3110,7 +3110,7 @@ static int lookup_open(struct nameidata
struct dentry *dentry;
int error, create_error = 0;
umode_t mode = op->mode;
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct iattr attr = { .ia_valid = ATTR_OPEN };
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
-@@ -12,7 +12,7 @@
+@@ -13,7 +13,7 @@
#include <linux/sunrpc/clnt.h>
#include <linux/nfs_fs.h>
#include <linux/sched.h>
@@ -140,7 +140,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/namei.h>
#include <linux/fsnotify.h>
-@@ -205,7 +205,7 @@ nfs_async_unlink(struct dentry *dentry,
+@@ -206,7 +206,7 @@ nfs_async_unlink(struct dentry *dentry,
goto out_free_name;
}
data->res.dir_attr = &data->dir_attr;
@@ -151,7 +151,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(&dentry->d_lock);
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
-@@ -1876,7 +1876,7 @@ bool proc_fill_cache(struct file *file,
+@@ -1878,7 +1878,7 @@ bool proc_fill_cache(struct file *file,
child = d_hash_and_lookup(dir, &qname);
if (!child) {
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto end_instantiate;
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
-@@ -678,7 +678,7 @@ static bool proc_sys_fill_cache(struct f
+@@ -679,7 +679,7 @@ static bool proc_sys_fill_cache(struct f
child = d_lookup(dir, &qname);
if (!child) {
@@ -173,7 +173,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
-@@ -106,7 +106,7 @@ struct dentry {
+@@ -107,7 +107,7 @@ struct dentry {
union {
struct list_head d_lru; /* LRU list */
@@ -182,7 +182,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
struct list_head d_child; /* child of parent list */
struct list_head d_subdirs; /* our children */
-@@ -236,7 +236,7 @@ extern void d_set_d_op(struct dentry *de
+@@ -237,7 +237,7 @@ extern void d_set_d_op(struct dentry *de
extern struct dentry * d_alloc(struct dentry *, const struct qstr *);
extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *);
extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *,
@@ -193,7 +193,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern struct dentry * d_exact_alias(struct dentry *, struct inode *);
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
-@@ -1529,7 +1529,7 @@ struct nfs_unlinkdata {
+@@ -1530,7 +1530,7 @@ struct nfs_unlinkdata {
struct nfs_removeargs args;
struct nfs_removeres res;
struct dentry *dentry;
@@ -204,9 +204,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
long timeout;
--- a/kernel/sched/swait.c
+++ b/kernel/sched/swait.c
-@@ -74,6 +74,7 @@ void swake_up_all(struct swait_queue_hea
- if (!swait_active(q))
- return;
+@@ -69,6 +69,7 @@ void swake_up_all(struct swait_queue_hea
+ struct swait_queue *curr;
+ LIST_HEAD(tmp);
+ WARN_ON(irqs_disabled());
raw_spin_lock_irq(&q->lock);
diff --git a/patches/fs-jbd-replace-bh_state-lock.patch b/patches/fs-jbd-replace-bh_state-lock.patch
index 764a557c72bd..a56bc690915f 100644
--- a/patches/fs-jbd-replace-bh_state-lock.patch
+++ b/patches/fs-jbd-replace-bh_state-lock.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
-@@ -77,6 +77,10 @@ struct buffer_head {
+@@ -78,6 +78,10 @@ struct buffer_head {
atomic_t b_count; /* users using this buffer_head */
#ifdef CONFIG_PREEMPT_RT_BASE
spinlock_t b_uptodate_lock;
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
};
-@@ -108,6 +112,10 @@ static inline void buffer_head_init_lock
+@@ -109,6 +113,10 @@ static inline void buffer_head_init_lock
{
#ifdef CONFIG_PREEMPT_RT_BASE
spin_lock_init(&bh->b_uptodate_lock);
diff --git a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
index b7acd7238af8..28bc54ccc9ea 100644
--- a/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
+++ b/patches/fs-nfs-turn-rmdir_sem-into-a-semaphore.patch
@@ -47,21 +47,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
trace_nfs_rmdir_exit(dir, dentry, error);
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
-@@ -2015,7 +2015,11 @@ static void init_once(void *foo)
- nfsi->nrequests = 0;
- nfsi->commit_info.ncommit = 0;
+@@ -2014,7 +2014,11 @@ static void init_once(void *foo)
+ atomic_long_set(&nfsi->nrequests, 0);
+ atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
+#ifdef CONFIG_PREEMPT_RT_BASE
+ sema_init(&nfsi->rmdir_sem, 1);
+#else
init_rwsem(&nfsi->rmdir_sem);
+#endif
+ mutex_init(&nfsi->commit_mutex);
nfs4_init_once(nfsi);
}
-
--- a/fs/nfs/unlink.c
+++ b/fs/nfs/unlink.c
-@@ -51,6 +51,29 @@ static void nfs_async_unlink_done(struct
+@@ -52,6 +52,29 @@ static void nfs_async_unlink_done(struct
rpc_restart_call_prepare(task);
}
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* nfs_async_unlink_release - Release the sillydelete data.
* @task: rpc_task of the sillydelete
-@@ -64,7 +87,7 @@ static void nfs_async_unlink_release(voi
+@@ -65,7 +88,7 @@ static void nfs_async_unlink_release(voi
struct dentry *dentry = data->dentry;
struct super_block *sb = dentry->d_sb;
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
d_lookup_done(dentry);
nfs_free_unlinkdata(data);
dput(dentry);
-@@ -117,10 +140,10 @@ static int nfs_call_unlink(struct dentry
+@@ -118,10 +141,10 @@ static int nfs_call_unlink(struct dentry
struct inode *dir = d_inode(dentry->d_parent);
struct dentry *alias;
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
if (!d_in_lookup(alias)) {
-@@ -142,7 +165,7 @@ static int nfs_call_unlink(struct dentry
+@@ -143,7 +166,7 @@ static int nfs_call_unlink(struct dentry
ret = 0;
spin_unlock(&alias->d_lock);
dput(alias);
@@ -129,10 +129,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
+#ifdef CONFIG_PREEMPT_RT_BASE
-+ struct semaphore rmdir_sem;
++ struct semaphore rmdir_sem;
+#else
struct rw_semaphore rmdir_sem;
+#endif
+ struct mutex commit_mutex;
#if IS_ENABLED(CONFIG_NFS_V4)
- struct nfs4_cached_acl *nfs4_acl;
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index 0b65a1fbbd8e..8a30673b0269 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -8,9 +8,11 @@ replacements with a real spinlock.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
fs/buffer.c | 21 +++++++--------------
+ fs/ext4/page-io.c | 6 ++----
fs/ntfs/aops.c | 10 +++-------
+ fs/xfs/xfs_aops.c | 6 ++----
include/linux/buffer_head.h | 34 ++++++++++++++++++++++++++++++++++
- 3 files changed, 44 insertions(+), 21 deletions(-)
+ 5 files changed, 48 insertions(+), 29 deletions(-)
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -73,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3410,6 +3402,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3409,6 +3401,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
@@ -81,6 +83,28 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
+--- a/fs/ext4/page-io.c
++++ b/fs/ext4/page-io.c
+@@ -95,8 +95,7 @@ static void ext4_finish_bio(struct bio *
+ * We check all buffers in the page under BH_Uptodate_Lock
+ * to avoid races with other end io clearing async_write flags
+ */
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
++ flags = bh_uptodate_lock_irqsave(head);
+ do {
+ if (bh_offset(bh) < bio_start ||
+ bh_offset(bh) + bh->b_size > bio_end) {
+@@ -108,8 +107,7 @@ static void ext4_finish_bio(struct bio *
+ if (bio->bi_status)
+ buffer_io_error(bh);
+ } while ((bh = bh->b_this_page) != head);
+- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(head, flags);
+ if (!under_io) {
+ #ifdef CONFIG_EXT4_FS_ENCRYPTION
+ if (data_page)
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -108,8 +108,7 @@ static void ntfs_end_buffer_async_read(s
@@ -114,9 +138,31 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
+--- a/fs/xfs/xfs_aops.c
++++ b/fs/xfs/xfs_aops.c
+@@ -120,8 +120,7 @@ xfs_finish_page_writeback(
+ ASSERT(bvec->bv_offset + bvec->bv_len <= PAGE_SIZE);
+ ASSERT((bvec->bv_len & (i_blocksize(inode) - 1)) == 0);
+
+- local_irq_save(flags);
+- bit_spin_lock(BH_Uptodate_Lock, &head->b_state);
++ flags = bh_uptodate_lock_irqsave(head);
+ do {
+ if (off >= bvec->bv_offset &&
+ off < bvec->bv_offset + bvec->bv_len) {
+@@ -143,8 +142,7 @@ xfs_finish_page_writeback(
+ }
+ off += bh->b_size;
+ } while ((bh = bh->b_this_page) != head);
+- bit_spin_unlock(BH_Uptodate_Lock, &head->b_state);
+- local_irq_restore(flags);
++ bh_uptodate_unlock_irqrestore(head, flags);
+
+ if (!busy)
+ end_page_writeback(bvec->bv_page);
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
-@@ -75,8 +75,42 @@ struct buffer_head {
+@@ -76,8 +76,42 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
diff --git a/patches/ftrace-Fix-trace-header-alignment.patch b/patches/ftrace-Fix-trace-header-alignment.patch
index 0b4219fd36f5..7ecf9031c9ed 100644
--- a/patches/ftrace-Fix-trace-header-alignment.patch
+++ b/patches/ftrace-Fix-trace-header-alignment.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -3340,17 +3340,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3343,17 +3343,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
diff --git a/patches/ftrace-migrate-disable-tracing.patch b/patches/ftrace-migrate-disable-tracing.patch
index 440b4d1186d7..28e98df2a88f 100644
--- a/patches/ftrace-migrate-disable-tracing.patch
+++ b/patches/ftrace-migrate-disable-tracing.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -61,6 +61,8 @@ struct trace_entry {
+@@ -62,6 +62,8 @@ struct trace_entry {
unsigned char flags;
unsigned char preempt_count;
int pid;
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TRACE_EVENT_TYPE_MAX \
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2138,6 +2138,8 @@ tracing_generic_entry_update(struct trac
+@@ -2141,6 +2141,8 @@ tracing_generic_entry_update(struct trac
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
-@@ -3341,9 +3343,10 @@ static void print_lat_help_header(struct
+@@ -3344,9 +3346,10 @@ static void print_lat_help_header(struct
"# | / _----=> need-resched \n"
"# || / _---=> hardirq/softirq \n"
"# ||| / _--=> preempt-depth \n"
diff --git a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
index 88b60858cf80..77da6fac4046 100644
--- a/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
+++ b/patches/futex-Ensure-lock-unlock-symetry-versus-pi_lock-and-.patch
@@ -30,14 +30,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -934,8 +934,10 @@ void exit_pi_state_list(struct task_stru
+@@ -936,7 +936,9 @@ void exit_pi_state_list(struct task_stru
if (head->next != next) {
/* retain curr->pi_lock for the loop invariant */
raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
+ raw_spin_unlock_irq(&curr->pi_lock);
spin_unlock(&hb->lock);
- put_pi_state(pi_state);
+ raw_spin_lock_irq(&curr->pi_lock);
+ put_pi_state(pi_state);
continue;
}
-
diff --git a/patches/futex-requeue-pi-fix.patch b/patches/futex-requeue-pi-fix.patch
index b1a5cb410d76..bb564ccad17e 100644
--- a/patches/futex-requeue-pi-fix.patch
+++ b/patches/futex-requeue-pi-fix.patch
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1743,6 +1744,35 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1730,6 +1731,35 @@ int __rt_mutex_start_proxy_lock(struct r
if (try_to_take_rt_mutex(lock, task, NULL))
return 1;
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
RT_MUTEX_FULL_CHAINWALK);
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -100,6 +100,7 @@ enum rtmutex_chainwalk {
+@@ -130,6 +130,7 @@ enum rtmutex_chainwalk {
* PI-futex support (proxy locking functions, etc.):
*/
#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1)
diff --git a/patches/futex-workaround-migrate_disable-enable-in-different.patch b/patches/futex-workaround-migrate_disable-enable-in-different.patch
index 91e40a4f0a70..fd989f88a371 100644
--- a/patches/futex-workaround-migrate_disable-enable-in-different.patch
+++ b/patches/futex-workaround-migrate_disable-enable-in-different.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2682,9 +2682,18 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2762,9 +2762,18 @@ static int futex_lock_pi(u32 __user *uad
* lock handoff sequence.
*/
raw_spin_lock_irq(&q.pi_state->pi_mutex.wait_lock);
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret) {
if (ret == 1)
-@@ -2828,11 +2837,21 @@ static int futex_unlock_pi(u32 __user *u
+@@ -2911,11 +2920,21 @@ static int futex_unlock_pi(u32 __user *u
* observed.
*/
raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock);
diff --git a/patches/genirq-disable-irqpoll-on-rt.patch b/patches/genirq-disable-irqpoll-on-rt.patch
index 9aa92515d4d2..e696a2cd84d4 100644
--- a/patches/genirq-disable-irqpoll-on-rt.patch
+++ b/patches/genirq-disable-irqpoll-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
-@@ -444,6 +444,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
+@@ -445,6 +445,10 @@ MODULE_PARM_DESC(noirqdebug, "Disable ir
static int __init irqfixup_setup(char *str)
{
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
irqfixup = 1;
printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
printk(KERN_WARNING "This may impact system performance.\n");
-@@ -456,6 +460,10 @@ module_param(irqfixup, int, 0644);
+@@ -457,6 +461,10 @@ module_param(irqfixup, int, 0644);
static int __init irqpoll_setup(char *str)
{
diff --git a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
index d40310049a41..8e2ef01633c5 100644
--- a/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
+++ b/patches/genirq-do-not-invoke-the-affinity-callback-via-a-wor.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -14,6 +14,7 @@
+@@ -15,6 +15,7 @@
#include <linux/hrtimer.h>
#include <linux/kref.h>
#include <linux/workqueue.h>
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/atomic.h>
#include <asm/ptrace.h>
-@@ -227,6 +228,7 @@ extern void resume_device_irqs(void);
+@@ -229,6 +230,7 @@ extern void resume_device_irqs(void);
* struct irq_affinity_notify - context for notification of IRQ affinity changes
* @irq: Interrupt to which notification applies
* @kref: Reference count, for internal use
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* @work: Work item, for internal use
* @notify: Function to be called on change. This will be
* called in process context.
-@@ -238,7 +240,11 @@ extern void resume_device_irqs(void);
+@@ -240,7 +242,11 @@ extern void resume_device_irqs(void);
struct irq_affinity_notify {
unsigned int irq;
struct kref kref;
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
};
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -212,7 +212,12 @@ int irq_set_affinity_locked(struct irq_d
+@@ -226,7 +226,12 @@ int irq_set_affinity_locked(struct irq_d
if (desc->affinity_notify) {
kref_get(&desc->affinity_notify->kref);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
irqd_set(data, IRQD_AFFINITY_SET);
-@@ -250,10 +255,8 @@ int irq_set_affinity_hint(unsigned int i
+@@ -264,10 +269,8 @@ int irq_set_affinity_hint(unsigned int i
}
EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct irq_desc *desc = irq_to_desc(notify->irq);
cpumask_var_t cpumask;
unsigned long flags;
-@@ -275,6 +278,35 @@ static void irq_affinity_notify(struct w
+@@ -289,6 +292,35 @@ static void irq_affinity_notify(struct w
kref_put(&notify->kref, notify->release);
}
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* irq_set_affinity_notifier - control notification of IRQ affinity changes
* @irq: Interrupt for which to enable/disable notification
-@@ -303,7 +335,12 @@ irq_set_affinity_notifier(unsigned int i
+@@ -317,7 +349,12 @@ irq_set_affinity_notifier(unsigned int i
if (notify) {
notify->irq = irq;
kref_init(&notify->kref);
diff --git a/patches/genirq-force-threading.patch b/patches/genirq-force-threading.patch
index 3c2f363dbab8..9fddc86f9fc0 100644
--- a/patches/genirq-force-threading.patch
+++ b/patches/genirq-force-threading.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -427,9 +427,13 @@ extern int irq_set_irqchip_state(unsigne
+@@ -429,9 +429,13 @@ extern int irq_set_irqchip_state(unsigne
bool state);
#ifdef CONFIG_IRQ_FORCED_THREADING
diff --git a/patches/genirq-update-irq_set_irqchip_state-documentation.patch b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
index 2c91e2981913..d67be5fbfb1a 100644
--- a/patches/genirq-update-irq_set_irqchip_state-documentation.patch
+++ b/patches/genirq-update-irq_set_irqchip_state-documentation.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -2163,7 +2163,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
+@@ -2191,7 +2191,7 @@ EXPORT_SYMBOL_GPL(irq_get_irqchip_state)
* This call sets the internal irqchip state of an interrupt,
* depending on the value of @which.
*
diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
index 6a251600ddf0..ad8ba4623d56 100644
--- a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
+++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -13,16 +13,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -66,7 +66,7 @@ struct cpuhp_cpu_state {
-
- static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
+@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(struct cpuhp_cpu_s
+ .fail = CPUHP_INVALID,
+ };
-#ifdef CONFIG_HOTPLUG_CPU
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PREEMPT_RT_FULL)
static DEFINE_PER_CPU(struct rt_rw_lock, cpuhp_pin_lock) = \
__RWLOCK_RT_INITIALIZER(cpuhp_pin_lock);
#endif
-@@ -221,6 +221,7 @@ static int cpu_hotplug_disabled;
+@@ -296,6 +296,7 @@ static int cpu_hotplug_disabled;
*/
void pin_current_cpu(void)
{
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin;
unsigned int cpu;
int ret;
-@@ -245,6 +246,7 @@ void pin_current_cpu(void)
+@@ -320,6 +321,7 @@ void pin_current_cpu(void)
goto again;
}
current->pinned_on_cpu = cpu;
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -252,6 +254,7 @@ void pin_current_cpu(void)
+@@ -327,6 +329,7 @@ void pin_current_cpu(void)
*/
void unpin_current_cpu(void)
{
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_rw_lock *cpuhp_pin = this_cpu_ptr(&cpuhp_pin_lock);
if (WARN_ON(current->pinned_on_cpu != smp_processor_id()))
-@@ -259,6 +262,7 @@ void unpin_current_cpu(void)
+@@ -334,6 +337,7 @@ void unpin_current_cpu(void)
current->pinned_on_cpu = -1;
__read_rt_unlock(cpuhp_pin);
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -656,7 +660,9 @@ static int take_cpu_down(void *_param)
+@@ -803,7 +807,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -670,14 +676,18 @@ static int takedown_cpu(unsigned int cpu
+@@ -817,14 +823,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -83,8 +83,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -696,7 +706,9 @@ static int takedown_cpu(unsigned int cpu
- wait_for_completion(&st->done);
+@@ -843,7 +853,9 @@ static int takedown_cpu(unsigned int cpu
+ wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
+#ifdef CONFIG_PREEMPT_RT_FULL
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index 4bea0e315b14..0007927a92b8 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
-@@ -108,6 +108,8 @@ extern void cpu_hotplug_disable(void);
+@@ -109,6 +109,8 @@ extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
int cpu_down(unsigned int cpu);
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* CONFIG_HOTPLUG_CPU */
-@@ -118,6 +120,9 @@ static inline void cpus_read_unlock(void
+@@ -119,6 +121,9 @@ static inline void cpus_read_unlock(void
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Wrappers which go away once all code is converted */
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
-@@ -211,6 +211,21 @@ static int cpu_hotplug_disabled;
+@@ -286,6 +286,21 @@ static int cpu_hotplug_disabled;
#ifdef CONFIG_HOTPLUG_CPU
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6868,6 +6868,7 @@ void migrate_disable(void)
+@@ -6912,6 +6912,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -6930,12 +6931,15 @@ void migrate_enable(void)
+@@ -6974,12 +6975,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
index b174e8f5e74a..b49402d12e57 100644
--- a/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
+++ b/patches/hrtimer-Move-schedule_work-call-to-helper-thread.patch
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -692,6 +692,29 @@ static void hrtimer_switch_to_hres(void)
+@@ -728,6 +728,29 @@ static void hrtimer_switch_to_hres(void)
retrigger_next_event(NULL);
}
@@ -81,7 +81,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void clock_was_set_work(struct work_struct *work)
{
clock_was_set();
-@@ -707,6 +730,7 @@ void clock_was_set_delayed(void)
+@@ -743,6 +766,7 @@ void clock_was_set_delayed(void)
{
schedule_work(&hrtimer_work);
}
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index 9675e77b314b..e4aea3733924 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -2,9 +2,8 @@ From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 3 Jul 2009 08:44:31 -0500
Subject: hrtimer: by timers by default into the softirq context
-We can't have hrtimers callbacks running in haradirq context on RT. Therefore
-CLOCK_MONOTONIC (and so on) are mapped by default to CLOCK_MONOTONIC_SOFT
-behaviour (and are invoked in softirq context).
+We can't have hrtimers callbacks running in hardirq context on RT. Therefore
+the timers are deferred to the softirq context by default.
There are few timers which expect to be run in hardirq context even on RT.
Those are:
- very short running where low latency is critical (kvm lapic)
@@ -13,67 +12,68 @@ Those are:
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/kvm/lapic.c | 2 -
- include/linux/hrtimer.h | 6 +++++
- kernel/events/core.c | 4 +--
- kernel/sched/core.c | 2 -
- kernel/sched/deadline.c | 2 -
- kernel/sched/rt.c | 4 +--
- kernel/time/hrtimer.c | 41 ++++++++++++++++++++++++++++++++++-
- kernel/time/tick-broadcast-hrtimer.c | 2 -
- kernel/time/tick-sched.c | 2 -
- kernel/watchdog.c | 2 -
- 10 files changed, 56 insertions(+), 11 deletions(-)
+ arch/x86/kvm/lapic.c | 2 +-
+ include/linux/hrtimer.h | 6 ++++++
+ kernel/events/core.c | 4 ++--
+ kernel/sched/core.c | 2 +-
+ kernel/sched/deadline.c | 2 +-
+ kernel/sched/rt.c | 4 ++--
+ kernel/time/hrtimer.c | 34 +++++++++++++++++++++++++++++++---
+ kernel/time/tick-broadcast-hrtimer.c | 2 +-
+ kernel/time/tick-sched.c | 2 +-
+ kernel/watchdog.c | 2 +-
+ 10 files changed, 47 insertions(+), 13 deletions(-)
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
-@@ -2085,7 +2085,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
- }
+@@ -2093,7 +2093,7 @@ int kvm_create_lapic(struct kvm_vcpu *vc
apic->vcpu = vcpu;
-- hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
-+ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC_HARD,
- HRTIMER_MODE_ABS_PINNED);
+ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+- HRTIMER_MODE_ABS_PINNED);
++ HRTIMER_MODE_ABS_PINNED_HARD);
apic->lapic_timer.timer.function = apic_timer_fn;
+ /*
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -29,12 +29,18 @@
- * are kernel internal and never exported to user space.
- */
- #define HRTIMER_BASE_SOFT_MASK MAX_CLOCKS
-+#define HRTIMER_BASE_HARD_MASK (MAX_CLOCKS << 1)
-
- #define CLOCK_REALTIME_SOFT (CLOCK_REALTIME | HRTIMER_BASE_SOFT_MASK)
- #define CLOCK_MONOTONIC_SOFT (CLOCK_MONOTONIC | HRTIMER_BASE_SOFT_MASK)
- #define CLOCK_BOOTTIME_SOFT (CLOCK_BOOTTIME | HRTIMER_BASE_SOFT_MASK)
- #define CLOCK_TAI_SOFT (CLOCK_TAI | HRTIMER_BASE_SOFT_MASK)
-
-+#define CLOCK_REALTIME_HARD (CLOCK_REALTIME | HRTIMER_BASE_HARD_MASK)
-+#define CLOCK_MONOTONIC_HARD (CLOCK_MONOTONIC | HRTIMER_BASE_HARD_MASK)
-+#define CLOCK_BOOTTIME_HARD (CLOCK_BOOTTIME | HRTIMER_BASE_HARD_MASK)
-+#define CLOCK_TAI_HARD (CLOCK_TAI | HRTIMER_BASE_HARD_MASK)
+@@ -42,6 +42,7 @@ enum hrtimer_mode {
+ HRTIMER_MODE_REL = 0x01,
+ HRTIMER_MODE_PINNED = 0x02,
+ HRTIMER_MODE_SOFT = 0x04,
++ HRTIMER_MODE_HARD = 0x08,
+
+ HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED,
+ HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED,
+@@ -52,6 +53,11 @@ enum hrtimer_mode {
+ HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT,
+ HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT,
+
++ HRTIMER_MODE_ABS_HARD = HRTIMER_MODE_ABS | HRTIMER_MODE_HARD,
++ HRTIMER_MODE_REL_HARD = HRTIMER_MODE_REL | HRTIMER_MODE_HARD,
+
- struct hrtimer_clock_base;
- struct hrtimer_cpu_base;
++ HRTIMER_MODE_ABS_PINNED_HARD = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_HARD,
++ HRTIMER_MODE_REL_PINNED_HARD = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_HARD,
+ };
+ /*
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
-@@ -1040,7 +1040,7 @@ static void __perf_mux_hrtimer_init(stru
+@@ -1042,7 +1042,7 @@ static void __perf_mux_hrtimer_init(stru
cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
raw_spin_lock_init(&cpuctx->hrtimer_lock);
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
-+ hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS_PINNED);
++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD);
timer->function = perf_mux_hrtimer_handler;
}
-@@ -8653,7 +8653,7 @@ static void perf_swevent_init_hrtimer(st
+@@ -8705,7 +8705,7 @@ static void perf_swevent_init_hrtimer(st
if (!is_sampling_event(event))
return;
- hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
++ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
@@ -84,92 +84,66 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
++ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
rq->hrtick_timer.function = hrtick;
}
#else /* CONFIG_SCHED_HRTICK */
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -1021,7 +1021,7 @@ void init_dl_task_timer(struct sched_dl_
+@@ -1020,7 +1020,7 @@ void init_dl_task_timer(struct sched_dl_
{
struct hrtimer *timer = &dl_se->dl_timer;
- hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
++ hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
timer->function = dl_task_timer;
}
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -46,8 +46,8 @@ void init_rt_bandwidth(struct rt_bandwid
+@@ -47,8 +47,8 @@ void init_rt_bandwidth(struct rt_bandwid
raw_spin_lock_init(&rt_b->rt_runtime_lock);
- hrtimer_init(&rt_b->rt_period_timer,
- CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC_HARD,
-+ HRTIMER_MODE_REL);
++ hrtimer_init(&rt_b->rt_period_timer, CLOCK_MONOTONIC,
++ HRTIMER_MODE_REL_HARD);
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -123,20 +123,32 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base,
- }
- };
-
--#define MAX_CLOCKS_HRT (MAX_CLOCKS * 2)
-+#define MAX_CLOCKS_HRT (MAX_CLOCKS * 3)
+@@ -423,7 +423,7 @@ static inline void debug_hrtimer_activat
+ * match, when a timer is started via__hrtimer_start_range_ns().
+ */
+ if (modecheck)
+- WARN_ON_ONCE(!(mode & HRTIMER_MODE_SOFT) ^ !timer->is_soft);
++ WARN_ON_ONCE((mode & HRTIMER_MODE_SOFT) & !timer->is_soft);
- static const int hrtimer_clock_to_base_table[MAX_CLOCKS_HRT] = {
- /* Make sure we catch unsupported clockids */
- [0 ... MAX_CLOCKS_HRT - 1] = HRTIMER_MAX_CLOCK_BASES,
+ debug_object_activate(timer, &hrtimer_debug_descr);
+ }
+@@ -1247,10 +1247,17 @@ static inline int hrtimer_clockid_to_bas
+ static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
+ enum hrtimer_mode mode)
+ {
+- bool softtimer = !!(mode & HRTIMER_MODE_SOFT);
+- int base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
++ bool softtimer;
++ int base;
+ struct hrtimer_cpu_base *cpu_base;
++ softtimer = !!(mode & HRTIMER_MODE_SOFT);
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME_SOFT,
-+ [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC_SOFT,
-+ [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME_SOFT,
-+ [CLOCK_TAI] = HRTIMER_BASE_TAI_SOFT,
-+#else
- [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
- [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
- [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
- [CLOCK_TAI] = HRTIMER_BASE_TAI,
++ if (!softtimer && !(mode & HRTIMER_MODE_HARD))
++ softtimer = true;
+#endif
- [CLOCK_REALTIME_SOFT] = HRTIMER_BASE_REALTIME_SOFT,
- [CLOCK_MONOTONIC_SOFT] = HRTIMER_BASE_MONOTONIC_SOFT,
- [CLOCK_BOOTTIME_SOFT] = HRTIMER_BASE_BOOTTIME_SOFT,
- [CLOCK_TAI_SOFT] = HRTIMER_BASE_TAI_SOFT,
++ base = softtimer ? HRTIMER_MAX_CLOCK_BASES / 2 : 0;
+
-+ [CLOCK_REALTIME_HARD] = HRTIMER_BASE_REALTIME,
-+ [CLOCK_MONOTONIC_HARD] = HRTIMER_BASE_MONOTONIC,
-+ [CLOCK_BOOTTIME_HARD] = HRTIMER_BASE_BOOTTIME,
-+ [CLOCK_TAI_HARD] = HRTIMER_BASE_TAI,
- };
+ memset(timer, 0, sizeof(struct hrtimer));
- /*
-@@ -1201,7 +1213,11 @@ static inline int hrtimer_clockid_to_bas
- return base;
- }
- WARN(1, "Invalid clockid %d. Using MONOTONIC\n", clock_id);
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+ return HRTIMER_BASE_MONOTONIC_SOFT;
-+#else
- return HRTIMER_BASE_MONOTONIC;
-+#endif
- }
-
- static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
-@@ -1219,6 +1235,8 @@ static void __hrtimer_init(struct hrtime
- clock_id = CLOCK_MONOTONIC;
- else if (clock_id == CLOCK_REALTIME_SOFT)
- clock_id = CLOCK_MONOTONIC_SOFT;
-+ else if (clock_id == CLOCK_REALTIME_HARD)
-+ clock_id = CLOCK_MONOTONIC_HARD;
- }
-
- base = hrtimer_clockid_to_base(clock_id);
-@@ -1589,11 +1607,32 @@ static enum hrtimer_restart hrtimer_wake
+ cpu_base = raw_cpu_ptr(&hrtimer_bases);
+@@ -1630,11 +1637,32 @@ static enum hrtimer_restart hrtimer_wake
return HRTIMER_NORESTART;
}
@@ -192,11 +166,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct *task)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (!(clock_id & (HRTIMER_BASE_HARD_MASK | HRTIMER_BASE_SOFT_MASK))) {
++ if (!(mode & (HRTIMER_MODE_SOFT | HRTIMER_MODE_HARD))) {
+ if (task_is_realtime(current) || system_state != SYSTEM_RUNNING)
-+ clock_id |= HRTIMER_BASE_HARD_MASK;
++ mode |= HRTIMER_MODE_HARD;
+ else
-+ clock_id |= HRTIMER_BASE_SOFT_MASK;
++ mode |= HRTIMER_MODE_SOFT;
+ }
+#endif
__hrtimer_init(&sl->timer, clock_id, mode);
@@ -204,12 +178,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sl->task = task;
--- a/kernel/time/tick-broadcast-hrtimer.c
+++ b/kernel/time/tick-broadcast-hrtimer.c
-@@ -105,7 +105,7 @@ static enum hrtimer_restart bc_handler(s
+@@ -106,7 +106,7 @@ static enum hrtimer_restart bc_handler(s
void tick_setup_hrtimer_broadcast(void)
{
- hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-+ hrtimer_init(&bctimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
++ hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
bctimer.function = bc_handler;
clockevents_register_device(&ce_broadcast_hrtimer);
}
@@ -220,18 +194,18 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Emulate tick processing via per-CPU hrtimers:
*/
- hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
-+ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_ABS);
++ hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per-CPU) */
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
-@@ -450,7 +450,7 @@ static void watchdog_enable(unsigned int
- struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
-
- /* kick off the timer for the hardlockup detector */
+@@ -462,7 +462,7 @@ static void watchdog_enable(unsigned int
+ * Start the timer first to prevent the NMI watchdog triggering
+ * before the timer has a chance to fire.
+ */
- hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
-+ hrtimer_init(hrtimer, CLOCK_MONOTONIC_HARD, HRTIMER_MODE_REL);
++ hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_HARD);
hrtimer->function = watchdog_timer_fn;
-
- /* Enable the perf event */
+ hrtimer_start(hrtimer, ns_to_ktime(sample_period),
+ HRTIMER_MODE_REL_PINNED);
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index b7d8a519040b..a1acd0853d91 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -2756,10 +2756,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+@@ -2800,10 +2800,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
break;
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
-@@ -358,10 +358,17 @@ DECLARE_PER_CPU(struct tick_device, tick
+@@ -363,10 +363,17 @@ DECLARE_PER_CPU(struct tick_device, tick
/* Initialize timers: */
extern void hrtimer_init(struct hrtimer *timer, clockid_t which_clock,
enum hrtimer_mode mode);
@@ -51,7 +51,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void destroy_hrtimer_on_stack(struct hrtimer *timer);
#else
-@@ -371,6 +378,15 @@ static inline void hrtimer_init_on_stack
+@@ -376,6 +383,15 @@ static inline void hrtimer_init_on_stack
{
hrtimer_init(timer, which_clock, mode);
}
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
#endif
-@@ -472,9 +488,6 @@ extern long hrtimer_nanosleep(const stru
+@@ -478,9 +494,6 @@ extern long hrtimer_nanosleep(const stru
const enum hrtimer_mode mode,
const clockid_t clockid);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int schedule_hrtimeout_range_clock(ktime_t *expires,
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
-@@ -482,8 +482,8 @@ do { \
+@@ -486,8 +486,8 @@ do { \
int __ret = 0; \
struct hrtimer_sleeper __t; \
\
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
current->timer_slack_ns, \
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2511,10 +2511,9 @@ static int futex_wait(u32 __user *uaddr,
+@@ -2588,10 +2588,9 @@ static int futex_wait(u32 __user *uaddr,
if (abs_time) {
to = &timeout;
@@ -106,7 +106,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_set_expires_range_ns(&to->timer, *abs_time,
current->timer_slack_ns);
}
-@@ -2610,9 +2609,8 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2690,9 +2689,8 @@ static int futex_lock_pi(u32 __user *uad
if (time) {
to = &timeout;
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_set_expires(&to->timer, *time);
}
-@@ -3022,10 +3020,9 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3108,10 +3106,9 @@ static int futex_wait_requeue_pi(u32 __u
if (abs_time) {
to = &timeout;
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1561,13 +1561,44 @@ static enum hrtimer_restart hrtimer_wake
+@@ -1602,13 +1602,44 @@ static enum hrtimer_restart hrtimer_wake
return HRTIMER_NORESTART;
}
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
-@@ -1591,8 +1622,6 @@ static int __sched do_nanosleep(struct h
+@@ -1632,8 +1663,6 @@ static int __sched do_nanosleep(struct h
{
struct restart_block *restart;
@@ -189,7 +189,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
do {
set_current_state(TASK_INTERRUPTIBLE);
hrtimer_start_expires(&t->timer, mode);
-@@ -1629,10 +1658,9 @@ static long __sched hrtimer_nanosleep_re
+@@ -1670,10 +1699,9 @@ static long __sched hrtimer_nanosleep_re
struct hrtimer_sleeper t;
int ret;
@@ -202,7 +202,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = do_nanosleep(&t, HRTIMER_MODE_ABS);
destroy_hrtimer_on_stack(&t.timer);
return ret;
-@@ -1650,7 +1678,7 @@ long hrtimer_nanosleep(const struct time
+@@ -1691,7 +1719,7 @@ long hrtimer_nanosleep(const struct time
if (dl_task(current) || rt_task(current))
slack = 0;
@@ -211,7 +211,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
hrtimer_set_expires_range_ns(&t.timer, timespec64_to_ktime(*rqtp), slack);
ret = do_nanosleep(&t, mode);
if (ret != -ERESTART_RESTARTBLOCK)
-@@ -1842,11 +1870,9 @@ schedule_hrtimeout_range_clock(ktime_t *
+@@ -1876,11 +1904,9 @@ schedule_hrtimeout_range_clock(ktime_t *
return -EINTR;
}
diff --git a/patches/hrtimer-soft-bases-timekeeping.patch b/patches/hrtimer-soft-bases-timekeeping.patch
deleted file mode 100644
index 57efe6f96c2c..000000000000
--- a/patches/hrtimer-soft-bases-timekeeping.patch
+++ /dev/null
@@ -1,31 +0,0 @@
-Subject: hrtimer: Update offset for soft bases
-From: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Date: Fri, 06 Oct 2017 11:28:38 +0200
-
-The offset of the clock bases is done via timekeeping mechanisms. The
-offsets of the soft bases has to be considered as well.
-
-Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- kernel/time/hrtimer.c | 8 +++++++-
- 1 file changed, 7 insertions(+), 1 deletion(-)
-
---- a/kernel/time/hrtimer.c
-+++ b/kernel/time/hrtimer.c
-@@ -551,8 +551,14 @@ static inline ktime_t hrtimer_update_bas
- ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
- ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
-
-- return ktime_get_update_offsets_now(&base->clock_was_set_seq,
-+ ktime_t now = ktime_get_update_offsets_now(&base->clock_was_set_seq,
- offs_real, offs_boot, offs_tai);
-+
-+ base->clock_base[HRTIMER_BASE_REALTIME_SOFT].offset = *offs_real;
-+ base->clock_base[HRTIMER_BASE_BOOTTIME_SOFT].offset = *offs_boot;
-+ base->clock_base[HRTIMER_BASE_TAI_SOFT].offset = *offs_tai;
-+
-+ return now;
- }
-
- /*
diff --git a/patches/hrtimers-prepare-full-preemption.patch b/patches/hrtimers-prepare-full-preemption.patch
index 42712528a35c..f4deeba67351 100644
--- a/patches/hrtimers-prepare-full-preemption.patch
+++ b/patches/hrtimers-prepare-full-preemption.patch
@@ -23,19 +23,19 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/timerqueue.h>
+#include <linux/wait.h>
- /*
- * Clock ids for hrtimers which expire in softirq context. These clock ids
-@@ -212,6 +213,9 @@ struct hrtimer_cpu_base {
+ struct hrtimer_clock_base;
+ struct hrtimer_cpu_base;
+@@ -215,6 +216,9 @@ struct hrtimer_cpu_base {
ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
+#ifdef CONFIG_PREEMPT_RT_BASE
+ wait_queue_head_t wait;
+#endif
+ struct hrtimer *softirq_next_timer;
struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES];
} ____cacheline_aligned;
-
-@@ -426,6 +430,13 @@ static inline void hrtimer_restart(struc
+@@ -432,6 +436,13 @@ static inline void hrtimer_restart(struc
hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
}
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Query timers: */
extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
-@@ -450,7 +461,7 @@ static inline int hrtimer_is_queued(stru
+@@ -456,7 +467,7 @@ static inline int hrtimer_is_queued(stru
* Helper function to check, whether the timer is running the callback
* function
*/
@@ -60,7 +60,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -869,6 +869,33 @@ u64 hrtimer_forward(struct hrtimer *time
+@@ -930,6 +930,33 @@ u64 hrtimer_forward(struct hrtimer *time
}
EXPORT_SYMBOL_GPL(hrtimer_forward);
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* enqueue_hrtimer - internal function to (re)start a timer
*
-@@ -1116,7 +1143,7 @@ int hrtimer_cancel(struct hrtimer *timer
+@@ -1156,7 +1183,7 @@ int hrtimer_cancel(struct hrtimer *timer
if (ret >= 0)
return ret;
@@ -103,15 +103,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
-@@ -1392,6 +1419,7 @@ static __latent_entropy void hrtimer_run
+@@ -1430,6 +1457,7 @@ static __latent_entropy void hrtimer_run
hrtimer_update_softirq_timer(cpu_base, true);
- raw_spin_unlock_irq(&cpu_base->lock);
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ wake_up_timer_waiters(cpu_base);
}
#ifdef CONFIG_HIGH_RES_TIMERS
-@@ -1751,6 +1779,9 @@ int hrtimers_prepare_cpu(unsigned int cp
+@@ -1792,6 +1820,9 @@ int hrtimers_prepare_cpu(unsigned int cp
cpu_base->hres_active = 0;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
@@ -123,7 +123,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
-@@ -213,6 +213,7 @@ int do_setitimer(int which, struct itime
+@@ -214,6 +214,7 @@ int do_setitimer(int which, struct itime
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
spin_unlock_irq(&tsk->sighand->siglock);
diff --git a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch b/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
deleted file mode 100644
index a1ce806d4d4a..000000000000
--- a/patches/i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From: Clark Williams <williams@redhat.com>
-Date: Tue, 26 May 2015 10:43:43 -0500
-Subject: i915: bogus warning from i915 when running on PREEMPT_RT
-
-The i915 driver has a 'WARN_ON(!in_interrupt())' in the display
-handler, which whines constanly on the RT kernel (since the interrupt
-is actually handled in a threaded handler and not actual interrupt
-context).
-
-Change the WARN_ON to WARN_ON_NORT
-
-Tested-by: Joakim Hernberg <jhernberg@alchemy.lu>
-Signed-off-by: Clark Williams <williams@redhat.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/gpu/drm/i915/intel_display.c | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
---- a/drivers/gpu/drm/i915/intel_display.c
-+++ b/drivers/gpu/drm/i915/intel_display.c
-@@ -10691,7 +10691,7 @@ void intel_check_page_flip(struct drm_i9
- struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- struct intel_flip_work *work;
-
-- WARN_ON(!in_interrupt());
-+ WARN_ON_NONRT(!in_interrupt());
-
- if (crtc == NULL)
- return;
diff --git a/patches/iommu-amd--Use-WARN_ON_NORT.patch b/patches/iommu-amd--Use-WARN_ON_NORT.patch
index 74bba3545877..77e6c4702ba5 100644
--- a/patches/iommu-amd--Use-WARN_ON_NORT.patch
+++ b/patches/iommu-amd--Use-WARN_ON_NORT.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
-@@ -2170,10 +2170,10 @@ static int __attach_device(struct iommu_
+@@ -1944,10 +1944,10 @@ static int __attach_device(struct iommu_
int ret;
/*
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* lock domain */
spin_lock(&domain->lock);
-@@ -2341,10 +2341,10 @@ static void __detach_device(struct iommu
+@@ -2115,10 +2115,10 @@ static void __detach_device(struct iommu
struct protection_domain *domain;
/*
diff --git a/patches/iommu-amd-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-for.patch b/patches/iommu-amd-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-for.patch
deleted file mode 100644
index cee4e44c11dd..000000000000
--- a/patches/iommu-amd-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-for.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Tue, 5 Sep 2017 14:11:41 +0200
-Subject: iommu/amd: Use raw_cpu_ptr() instead of get_cpu_ptr() for
- ->flush_queue
-
-get_cpu_ptr() disabled preemption and returns the ->flush_queue object
-of the current CPU. raw_cpu_ptr() does the same except that it not
-disable preemption which means the scheduler can move it to another CPU
-after it obtained the per-CPU object.
-In this case this is not bad because the data structure itself is
-protected with a spin_lock. This change shouldn't matter however on RT
-it does because the sleeping lock can't be accessed with disabled
-preemption.
-
-Cc: rt-stable@vger.kernel.org
-Cc: Joerg Roedel <joro@8bytes.org>
-Cc: iommu@lists.linux-foundation.org
-Reported-by: Vinod Adhikary <vinadhy@gmail.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/iommu/amd_iommu.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
---- a/drivers/iommu/amd_iommu.c
-+++ b/drivers/iommu/amd_iommu.c
-@@ -1909,7 +1909,7 @@ static void queue_add(struct dma_ops_dom
- pages = __roundup_pow_of_two(pages);
- address >>= PAGE_SHIFT;
-
-- queue = get_cpu_ptr(dom->flush_queue);
-+ queue = raw_cpu_ptr(dom->flush_queue);
- spin_lock_irqsave(&queue->lock, flags);
-
- /*
-@@ -1938,8 +1938,6 @@ static void queue_add(struct dma_ops_dom
-
- if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0)
- mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10));
--
-- put_cpu_ptr(dom->flush_queue);
- }
-
- static void queue_flush_timeout(unsigned long data)
diff --git a/patches/iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch b/patches/iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch
new file mode 100644
index 000000000000..7c1776429b40
--- /dev/null
+++ b/patches/iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch
@@ -0,0 +1,42 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Sep 2017 17:21:40 +0200
+Subject: [PATCH] iommu/iova: Use raw_cpu_ptr() instead of get_cpu_ptr() for
+ ->fq
+
+get_cpu_ptr() disabled preemption and returns the ->fq object of the
+current CPU. raw_cpu_ptr() does the same except that it not disable
+preemption which means the scheduler can move it to another CPU after it
+obtained the per-CPU object.
+In this case this is not bad because the data structure itself is
+protected with a spin_lock. This change shouldn't matter however on RT
+it does because the sleeping lock can't be accessed with disabled
+preemption.
+
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: iommu@lists.linux-foundation.org
+Reported-by: vinadhy@gmail.com
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/iommu/iova.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/drivers/iommu/iova.c
++++ b/drivers/iommu/iova.c
+@@ -570,7 +570,7 @@ void queue_iova(struct iova_domain *iova
+ unsigned long pfn, unsigned long pages,
+ unsigned long data)
+ {
+- struct iova_fq *fq = get_cpu_ptr(iovad->fq);
++ struct iova_fq *fq = raw_cpu_ptr(iovad->fq);
+ unsigned long flags;
+ unsigned idx;
+
+@@ -600,8 +600,6 @@ void queue_iova(struct iova_domain *iova
+ if (atomic_cmpxchg(&iovad->fq_timer_on, 0, 1) == 0)
+ mod_timer(&iovad->fq_timer,
+ jiffies + msecs_to_jiffies(IOVA_FQ_TIMEOUT));
+-
+- put_cpu_ptr(iovad->fq);
+ }
+ EXPORT_SYMBOL_GPL(queue_iova);
+
diff --git a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
index f12cb2184064..750bcd05e654 100644
--- a/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
+++ b/patches/irq-allow-disabling-of-softirq-processing-in-irq-thread-context.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -61,6 +61,7 @@
+@@ -63,6 +63,7 @@
* interrupt handler after suspending interrupts. For system
* wakeup devices users need to implement wakeup detection in
* their interrupt handlers.
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
#define IRQF_SHARED 0x00000080
#define IRQF_PROBE_SHARED 0x00000100
-@@ -74,6 +75,7 @@
+@@ -76,6 +77,7 @@
#define IRQF_NO_THREAD 0x00010000
#define IRQF_EARLY_RESUME 0x00020000
#define IRQF_COND_SUSPEND 0x00040000
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
-@@ -73,6 +73,7 @@ enum irqchip_irq_state;
+@@ -74,6 +74,7 @@ enum irqchip_irq_state;
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
* mechanism and from core side polling.
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* IRQ_DISABLE_UNLAZY - Disable lazy irq disable
*/
enum {
-@@ -100,13 +101,14 @@ enum {
+@@ -101,13 +102,14 @@ enum {
IRQ_PER_CPU_DEVID = (1 << 17),
IRQ_IS_POLLED = (1 << 18),
IRQ_DISABLE_UNLAZY = (1 << 19),
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -861,7 +861,15 @@ irq_forced_thread_fn(struct irq_desc *de
+@@ -885,7 +885,15 @@ irq_forced_thread_fn(struct irq_desc *de
local_bh_disable();
ret = action->thread_fn(action->irq, action->dev_id);
irq_finalize_oneshot(desc, action);
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -1338,6 +1346,9 @@ static int
+@@ -1362,6 +1370,9 @@ static int
irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
}
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
-@@ -16,6 +16,7 @@ enum {
+@@ -17,6 +17,7 @@ enum {
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
_IRQ_IS_POLLED = IRQ_IS_POLLED,
_IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY,
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
_IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
};
-@@ -30,6 +31,7 @@ enum {
+@@ -31,6 +32,7 @@ enum {
#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
#define IRQ_IS_POLLED GOT_YOU_MORON
#define IRQ_DISABLE_UNLAZY GOT_YOU_MORON
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#undef IRQF_MODIFY_MASK
#define IRQF_MODIFY_MASK GOT_YOU_MORON
-@@ -40,6 +42,16 @@ irq_settings_clr_and_set(struct irq_desc
+@@ -41,6 +43,16 @@ irq_settings_clr_and_set(struct irq_desc
desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
}
diff --git a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
index 395ca4379d3f..46d867926cd9 100644
--- a/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
+++ b/patches/irqwork-Move-irq-safe-work-to-irq-context.patch
@@ -22,7 +22,7 @@ Cc: stable-rt@vger.kernel.org
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
-@@ -52,4 +52,10 @@ static inline bool irq_work_needs_cpu(vo
+@@ -53,4 +53,10 @@ static inline bool irq_work_needs_cpu(vo
static inline void irq_work_run(void) { }
#endif
@@ -55,7 +55,7 @@ Cc: stable-rt@vger.kernel.org
* Synchronize against the irq_work @entry, ensures the entry is not
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1623,7 +1623,7 @@ void update_process_times(int user_tick)
+@@ -1646,7 +1646,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -64,7 +64,7 @@ Cc: stable-rt@vger.kernel.org
if (in_irq())
irq_work_tick();
#endif
-@@ -1664,9 +1664,7 @@ static __latent_entropy void run_timer_s
+@@ -1687,9 +1687,7 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/irqwork-push_most_work_into_softirq_context.patch b/patches/irqwork-push_most_work_into_softirq_context.patch
index d6526ec63f33..c4db79964cf9 100644
--- a/patches/irqwork-push_most_work_into_softirq_context.patch
+++ b/patches/irqwork-push_most_work_into_softirq_context.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
-@@ -16,6 +16,7 @@
+@@ -17,6 +17,7 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
#define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -102,6 +102,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
+@@ -103,6 +103,7 @@ void init_rt_rq(struct rt_rq *rt_rq)
rt_rq->push_cpu = nr_cpu_ids;
raw_spin_lock_init(&rt_rq->push_lock);
init_irq_work(&rt_rq->push_work, push_irq_work_func);
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1623,7 +1623,7 @@ void update_process_times(int user_tick)
+@@ -1646,7 +1646,7 @@ void update_process_times(int user_tick)
scheduler_tick();
run_local_timers();
rcu_check_callbacks(user_tick);
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (in_irq())
irq_work_tick();
#endif
-@@ -1664,6 +1664,9 @@ static __latent_entropy void run_timer_s
+@@ -1687,6 +1687,9 @@ static __latent_entropy void run_timer_s
{
struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]);
diff --git a/patches/jump-label-rt.patch b/patches/jump-label-rt.patch
index 9dca87b1226e..782de0c79b37 100644
--- a/patches/jump-label-rt.patch
+++ b/patches/jump-label-rt.patch
@@ -24,7 +24,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
-@@ -44,7 +44,7 @@ config ARM
+@@ -45,7 +45,7 @@ config ARM
select HARDIRQS_SW_RESEND
select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT)
select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6
diff --git a/patches/kconfig-disable-a-few-options-rt.patch b/patches/kconfig-disable-a-few-options-rt.patch
index 2e4fb718ef49..eecb81a332a5 100644
--- a/patches/kconfig-disable-a-few-options-rt.patch
+++ b/patches/kconfig-disable-a-few-options-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/Kconfig
+++ b/arch/Kconfig
-@@ -16,6 +16,7 @@ config OPROFILE
+@@ -17,6 +17,7 @@ config OPROFILE
tristate "OProfile system profiling"
depends on PROFILING
depends on HAVE_OPROFILE
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
help
--- a/mm/Kconfig
+++ b/mm/Kconfig
-@@ -382,7 +382,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
+@@ -385,7 +385,7 @@ config NOMMU_INITIAL_TRIM_EXCESS
config TRANSPARENT_HUGEPAGE
bool "Transparent Hugepage Support"
diff --git a/patches/kconfig-preempt-rt-full.patch b/patches/kconfig-preempt-rt-full.patch
index 81c67840aa7b..9e5dfe30c5b7 100644
--- a/patches/kconfig-preempt-rt-full.patch
+++ b/patches/kconfig-preempt-rt-full.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Makefile
+++ b/init/Makefile
-@@ -35,4 +35,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts
+@@ -36,4 +36,4 @@ mounts-$(CONFIG_BLK_DEV_MD) += do_mounts
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
config PREEMPT_COUNT
--- a/scripts/mkcompile_h
+++ b/scripts/mkcompile_h
-@@ -4,7 +4,8 @@ TARGET=$1
+@@ -5,7 +5,8 @@ TARGET=$1
ARCH=$2
SMP=$3
PREEMPT=$4
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
-@@ -57,6 +58,7 @@ UTS_VERSION="#$VERSION"
+@@ -58,6 +59,7 @@ UTS_VERSION="#$VERSION"
CONFIG_FLAGS=""
if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
diff --git a/patches/kernel-SRCU-provide-a-static-initializer.patch b/patches/kernel-SRCU-provide-a-static-initializer.patch
index 25eaaebde5ce..5c14f9fc082f 100644
--- a/patches/kernel-SRCU-provide-a-static-initializer.patch
+++ b/patches/kernel-SRCU-provide-a-static-initializer.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
-@@ -6,7 +6,7 @@
+@@ -7,7 +7,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
*/
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef _LINUX_NOTIFIER_H
#define _LINUX_NOTIFIER_H
#include <linux/errno.h>
-@@ -42,9 +42,7 @@
+@@ -43,9 +43,7 @@
* in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
* As compensation, srcu_notifier_chain_unregister() is rather expensive.
* SRCU notifier chains should be used when the chain will be called very
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
struct notifier_block;
-@@ -90,7 +88,7 @@ struct srcu_notifier_head {
+@@ -91,7 +89,7 @@ struct srcu_notifier_head {
(name)->head = NULL; \
} while (0)
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
#define srcu_cleanup_notifier_head(name) \
cleanup_srcu_struct(&(name)->srcu);
-@@ -103,7 +101,13 @@ extern void srcu_init_notifier_head(stru
+@@ -104,7 +102,13 @@ extern void srcu_init_notifier_head(stru
.head = NULL }
#define RAW_NOTIFIER_INIT(name) { \
.head = NULL }
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define ATOMIC_NOTIFIER_HEAD(name) \
struct atomic_notifier_head name = \
-@@ -115,6 +119,26 @@ extern void srcu_init_notifier_head(stru
+@@ -116,6 +120,26 @@ extern void srcu_init_notifier_head(stru
struct raw_notifier_head name = \
RAW_NOTIFIER_INIT(name)
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef __KERNEL__
extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
-@@ -184,12 +208,12 @@ static inline int notifier_to_errno(int
+@@ -185,12 +209,12 @@ static inline int notifier_to_errno(int
/*
* Declared notifiers so far. I can imagine quite a few more chains
@@ -132,9 +132,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
-@@ -106,9 +106,9 @@ struct srcu_struct {
-
- void process_srcu(struct work_struct *work);
+@@ -104,9 +104,9 @@ struct srcu_struct {
+ #define SRCU_STATE_SCAN1 1
+ #define SRCU_STATE_SCAN2 2
-#define __SRCU_STRUCT_INIT(name) \
+#define __SRCU_STRUCT_INIT(name, pcpu_name) \
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
.srcu_gp_seq_needed = 0 - 1, \
__SRCU_DEP_MAP_INIT(name) \
-@@ -135,7 +135,7 @@ void process_srcu(struct work_struct *wo
+@@ -133,7 +133,7 @@ struct srcu_struct {
*/
#define __DEFINE_SRCU(name, is_static) \
static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
diff --git a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
index e52743002fa9..139b7008627c 100644
--- a/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
+++ b/patches/kernel-printk-Don-t-try-to-print-from-IRQ-NMI-region.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1629,6 +1629,11 @@ static void call_console_drivers(const c
+@@ -1617,6 +1617,11 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
migrate_disable();
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
-@@ -2361,6 +2366,11 @@ void console_unblank(void)
+@@ -2349,6 +2354,11 @@ void console_unblank(void)
{
struct console *c;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index baad1c6e6095..d3890de2015e 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} while(0)
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
-@@ -176,7 +176,7 @@ asmlinkage long mipsmt_sys_sched_getaffi
+@@ -177,7 +177,7 @@ asmlinkage long mipsmt_sys_sched_getaffi
if (retval)
goto out_unlock;
@@ -126,7 +126,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
-@@ -1194,12 +1194,12 @@ static void mt_ase_fp_affinity(void)
+@@ -1192,12 +1192,12 @@ static void mt_ase_fp_affinity(void)
* restricted the allowed set to exclude any CPUs with FPUs,
* we'll skip the procedure.
*/
@@ -274,7 +274,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!find_hca(cpu, &unit) && unit >= 0)
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
-@@ -365,9 +365,9 @@ static inline void task_context_switch_c
+@@ -361,9 +361,9 @@ static inline void task_context_switch_c
static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
{
seq_printf(m, "Cpus_allowed:\t%*pb\n",
@@ -288,7 +288,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -240,7 +240,8 @@ extern struct cred init_cred;
+@@ -234,7 +234,8 @@ extern struct cred init_cred;
.static_prio = MAX_PRIO-20, \
.normal_prio = MAX_PRIO-20, \
.policy = SCHED_NORMAL, \
@@ -310,7 +310,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_PREEMPT_RCU
int rcu_read_lock_nesting;
-@@ -1274,7 +1275,7 @@ extern struct pid *cad_pid;
+@@ -1315,7 +1316,7 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
@@ -321,7 +321,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
-@@ -2079,7 +2079,7 @@ static void cpuset_fork(struct task_stru
+@@ -2094,7 +2094,7 @@ static void cpuset_fork(struct task_stru
if (task_css_is_root(task, cpuset_cgrp_id))
return;
@@ -332,7 +332,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -556,7 +556,8 @@ static struct task_struct *dup_task_stru
+@@ -564,7 +564,8 @@ static struct task_struct *dup_task_stru
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_canary();
#endif
@@ -344,8 +344,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* parent)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -955,7 +955,7 @@ static struct rq *__migrate_task(struct
- return rq;
+@@ -960,7 +960,7 @@ static struct rq *__migrate_task(struct
+ }
/* Affinity changed (again). */
- if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed))
@@ -353,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return rq;
update_rq_clock(rq);
-@@ -983,7 +983,7 @@ static int migration_cpu_stop(void *data
+@@ -988,7 +988,7 @@ static int migration_cpu_stop(void *data
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
@@ -362,7 +362,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1014,7 +1014,7 @@ static int migration_cpu_stop(void *data
+@@ -1019,7 +1019,7 @@ static int migration_cpu_stop(void *data
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -371,7 +371,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-@@ -1084,7 +1084,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1089,7 +1089,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
@@ -380,7 +380,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1241,10 +1241,10 @@ static int migrate_swap_stop(void *data)
+@@ -1250,10 +1250,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -393,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1285,10 +1285,10 @@ int migrate_swap(struct task_struct *cur
+@@ -1294,10 +1294,10 @@ int migrate_swap(struct task_struct *cur
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -406,7 +406,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1432,7 +1432,7 @@ void kick_process(struct task_struct *p)
+@@ -1441,7 +1441,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1472,14 +1472,14 @@ static int select_fallback_rq(int cpu, s
+@@ -1481,14 +1481,14 @@ static int select_fallback_rq(int cpu, s
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -432,7 +432,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!(p->flags & PF_KTHREAD) && !cpu_active(dest_cpu))
continue;
if (!cpu_online(dest_cpu))
-@@ -1524,7 +1524,7 @@ static int select_fallback_rq(int cpu, s
+@@ -1533,7 +1533,7 @@ static int select_fallback_rq(int cpu, s
}
/*
@@ -441,7 +441,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1534,11 +1534,11 @@ int select_task_rq(struct task_struct *p
+@@ -1543,11 +1543,11 @@ int select_task_rq(struct task_struct *p
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -455,7 +455,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -1546,7 +1546,7 @@ int select_task_rq(struct task_struct *p
+@@ -1555,7 +1555,7 @@ int select_task_rq(struct task_struct *p
* [ this allows ->select_task() to simply return task_cpu(p) and
* not worry about this generic constraint ]
*/
@@ -464,7 +464,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!cpu_online(cpu)))
cpu = select_fallback_rq(task_cpu(p), p);
-@@ -2436,7 +2436,7 @@ void wake_up_new_task(struct task_struct
+@@ -2445,7 +2445,7 @@ void wake_up_new_task(struct task_struct
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -473,7 +473,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4128,7 +4128,7 @@ static int __sched_setscheduler(struct t
+@@ -4162,7 +4162,7 @@ static int __sched_setscheduler(struct t
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -482,7 +482,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4722,7 +4722,7 @@ long sched_getaffinity(pid_t pid, struct
+@@ -4756,7 +4756,7 @@ long sched_getaffinity(pid_t pid, struct
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -491,7 +491,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5277,7 +5277,7 @@ int task_can_attach(struct task_struct *
+@@ -5321,7 +5321,7 @@ int task_can_attach(struct task_struct *
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -500,7 +500,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5304,7 +5304,7 @@ int migrate_task_to(struct task_struct *
+@@ -5348,7 +5348,7 @@ int migrate_task_to(struct task_struct *
if (curr_cpu == target_cpu)
return 0;
@@ -509,8 +509,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5441,7 +5441,7 @@ static void migrate_tasks(struct rq *dea
- next->sched_class->put_prev_task(rq, next);
+@@ -5485,7 +5485,7 @@ static void migrate_tasks(struct rq *dea
+ put_prev_task(rq, next);
/*
- * Rules for changing task_struct::cpus_allowed are holding
@@ -520,19 +520,22 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
-@@ -128,10 +128,10 @@ int cpudl_find(struct cpudl *cp, struct
+@@ -127,13 +127,13 @@ int cpudl_find(struct cpudl *cp, struct
const struct sched_dl_entity *dl_se = &p->dl;
if (later_mask &&
- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) {
+ cpumask_and(later_mask, cp->free_cpus, p->cpus_ptr)) {
- best_cpu = cpumask_any(later_mask);
- goto out;
-- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) &&
-+ } else if (cpumask_test_cpu(cpudl_maximum(cp), p->cpus_ptr) &&
- dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
- best_cpu = cpudl_maximum(cp);
- if (later_mask)
+ return 1;
+ } else {
+ int best_cpu = cpudl_maximum(cp);
+ WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
+
+- if (cpumask_test_cpu(best_cpu, &p->cpus_allowed) &&
++ if (cpumask_test_cpu(best_cpu, p->cpus_ptr) &&
+ dl_time_before(dl_se->deadline, cp->elements[0].dl)) {
+ if (later_mask)
+ cpumask_set_cpu(best_cpu, later_mask);
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -103,11 +103,11 @@ int cpupri_find(struct cpupri *cp, struc
@@ -551,7 +554,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We have to ensure that we have at least one bit
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
-@@ -505,7 +505,7 @@ static struct rq *dl_task_offline_migrat
+@@ -504,7 +504,7 @@ static struct rq *dl_task_offline_migrat
* If we cannot preempt any rq, fall back to pick any
* online cpu.
*/
@@ -560,7 +563,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cpu >= nr_cpu_ids) {
/*
* Fail to find any suitable cpu.
-@@ -1760,7 +1760,7 @@ static void set_curr_task_dl(struct rq *
+@@ -1749,7 +1749,7 @@ static void set_curr_task_dl(struct rq *
static int pick_dl_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -569,7 +572,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
return 0;
}
-@@ -1909,7 +1909,7 @@ static struct rq *find_lock_later_rq(str
+@@ -1899,7 +1899,7 @@ static struct rq *find_lock_later_rq(str
/* Retry if something changed. */
if (double_lock_balance(rq, later_rq)) {
if (unlikely(task_rq(task) != rq ||
@@ -580,7 +583,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
!task_on_rq_queued(task))) {
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -1547,7 +1547,7 @@ static void task_numa_compare(struct tas
+@@ -1596,7 +1596,7 @@ static void task_numa_compare(struct tas
*/
if (cur) {
/* Skip this swap candidate if cannot move to the source cpu */
@@ -589,7 +592,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
/*
-@@ -1657,7 +1657,7 @@ static void task_numa_find_cpu(struct ta
+@@ -1706,7 +1706,7 @@ static void task_numa_find_cpu(struct ta
for_each_cpu(cpu, cpumask_of_node(env->dst_nid)) {
/* Skip this CPU if the source task cannot migrate */
@@ -598,7 +601,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
env->dst_cpu = cpu;
-@@ -5406,7 +5406,7 @@ find_idlest_group(struct sched_domain *s
+@@ -5475,7 +5475,7 @@ find_idlest_group(struct sched_domain *s
/* Skip over this group if it has no CPUs allowed */
if (!cpumask_intersects(sched_group_span(group),
@@ -607,7 +610,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
local_group = cpumask_test_cpu(this_cpu,
-@@ -5526,7 +5526,7 @@ find_idlest_cpu(struct sched_group *grou
+@@ -5595,7 +5595,7 @@ find_idlest_cpu(struct sched_group *grou
return cpumask_first(sched_group_span(group));
/* Traverse only the allowed CPUs */
@@ -616,7 +619,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (idle_cpu(i)) {
struct rq *rq = cpu_rq(i);
struct cpuidle_state *idle = idle_get_state(rq);
-@@ -5628,7 +5628,7 @@ static int select_idle_core(struct task_
+@@ -5697,7 +5697,7 @@ static int select_idle_core(struct task_
if (!test_idle_cores(target, false))
return -1;
@@ -625,7 +628,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_cpu_wrap(core, cpus, target) {
bool idle = true;
-@@ -5662,7 +5662,7 @@ static int select_idle_smt(struct task_s
+@@ -5731,7 +5731,7 @@ static int select_idle_smt(struct task_s
return -1;
for_each_cpu(cpu, cpu_smt_mask(target)) {
@@ -634,7 +637,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (idle_cpu(cpu))
return cpu;
-@@ -5725,7 +5725,7 @@ static int select_idle_cpu(struct task_s
+@@ -5794,7 +5794,7 @@ static int select_idle_cpu(struct task_s
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
if (!--nr)
return -1;
@@ -643,7 +646,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
continue;
if (idle_cpu(cpu))
break;
-@@ -5880,7 +5880,7 @@ select_task_rq_fair(struct task_struct *
+@@ -5949,7 +5949,7 @@ select_task_rq_fair(struct task_struct *
if (sd_flag & SD_BALANCE_WAKE) {
record_wakee(p);
want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
@@ -652,7 +655,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
rcu_read_lock();
-@@ -6627,14 +6627,14 @@ int can_migrate_task(struct task_struct
+@@ -6698,14 +6698,14 @@ int can_migrate_task(struct task_struct
/*
* We do not migrate tasks that are:
* 1) throttled_lb_pair, or
@@ -669,7 +672,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int cpu;
schedstat_inc(p->se.statistics.nr_failed_migrations_affine);
-@@ -6654,7 +6654,7 @@ int can_migrate_task(struct task_struct
+@@ -6725,7 +6725,7 @@ int can_migrate_task(struct task_struct
/* Prevent to re-select dst_cpu via env's cpus */
for_each_cpu_and(cpu, env->dst_grpmask, env->cpus) {
@@ -678,7 +681,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env->flags |= LBF_DST_PINNED;
env->new_dst_cpu = cpu;
break;
-@@ -7221,7 +7221,7 @@ check_cpu_capacity(struct rq *rq, struct
+@@ -7294,7 +7294,7 @@ check_cpu_capacity(struct rq *rq, struct
/*
* Group imbalance indicates (and tries to solve) the problem where balancing
@@ -687,7 +690,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Imagine a situation of two groups of 4 cpus each and 4 tasks each with a
* cpumask covering 1 cpu of the first group and 3 cpus of the second group.
-@@ -7796,7 +7796,7 @@ static struct sched_group *find_busiest_
+@@ -7870,7 +7870,7 @@ static struct sched_group *find_busiest_
/*
* If the busiest group is imbalanced the below checks don't
* work because they assume all things are equal, which typically
@@ -696,7 +699,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (busiest->group_type == group_imbalanced)
goto force_balance;
-@@ -8181,7 +8181,7 @@ static int load_balance(int this_cpu, st
+@@ -8262,7 +8262,7 @@ static int load_balance(int this_cpu, st
* if the curr task on busiest cpu can't be
* moved to this_cpu
*/
@@ -707,7 +710,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
env.flags |= LBF_ALL_PINNED;
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
-@@ -1602,7 +1602,7 @@ static void put_prev_task_rt(struct rq *
+@@ -1603,7 +1603,7 @@ static void put_prev_task_rt(struct rq *
static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
{
if (!task_running(rq, p) &&
@@ -716,7 +719,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 1;
return 0;
}
-@@ -1737,7 +1737,7 @@ static struct rq *find_lock_lowest_rq(st
+@@ -1738,7 +1738,7 @@ static struct rq *find_lock_lowest_rq(st
* Also make sure that it wasn't scheduled on its rq.
*/
if (unlikely(task_rq(task) != rq ||
@@ -738,7 +741,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
get_online_cpus();
--- a/lib/smp_processor_id.c
+++ b/lib/smp_processor_id.c
-@@ -22,7 +22,7 @@ notrace static unsigned int check_preemp
+@@ -23,7 +23,7 @@ notrace static unsigned int check_preemp
* Kernel threads bound to a single CPU can safely use
* smp_processor_id():
*/
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 06b0120ad5bd..be104163a798 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -16,15 +16,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -39,6 +39,7 @@
- #include <linux/mmu_notifier.h>
+@@ -40,6 +40,7 @@
+ #include <linux/hmm.h>
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/kprobes.h>
#include <linux/vmacache.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
-@@ -411,6 +412,15 @@ void __put_task_struct(struct task_struc
+@@ -417,6 +418,15 @@ void __put_task_struct(struct task_struc
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2687,15 +2687,6 @@ static struct rq *finish_task_switch(str
+@@ -2706,15 +2706,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/kgb-serial-hackaround.patch b/patches/kgb-serial-hackaround.patch
index 05ffbaaee595..c30aa95b25b4 100644
--- a/patches/kgb-serial-hackaround.patch
+++ b/patches/kgb-serial-hackaround.patch
@@ -32,8 +32,8 @@ Jason.
+#include <linux/kdb.h>
#include <linux/uaccess.h>
#include <linux/pm_runtime.h>
- #include <linux/timer.h>
-@@ -3191,6 +3192,8 @@ void serial8250_console_write(struct uar
+ #include <linux/ktime.h>
+@@ -3216,6 +3217,8 @@ void serial8250_console_write(struct uar
if (port->sysrq || oops_in_progress)
locked = 0;
diff --git a/patches/list_bl-fixup-bogus-lockdep-warning.patch b/patches/list_bl-fixup-bogus-lockdep-warning.patch
index 1583b5afad86..43ebad4b95f0 100644
--- a/patches/list_bl-fixup-bogus-lockdep-warning.patch
+++ b/patches/list_bl-fixup-bogus-lockdep-warning.patch
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
-@@ -42,13 +42,15 @@ struct hlist_bl_node {
+@@ -43,13 +43,15 @@ struct hlist_bl_node {
struct hlist_bl_node *next, **pprev;
};
diff --git a/patches/list_bl.h-make-list-head-locking-RT-safe.patch b/patches/list_bl.h-make-list-head-locking-RT-safe.patch
index 52df64023261..9ea1a600d985 100644
--- a/patches/list_bl.h-make-list-head-locking-RT-safe.patch
+++ b/patches/list_bl.h-make-list-head-locking-RT-safe.patch
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/list_bl.h
+++ b/include/linux/list_bl.h
-@@ -2,6 +2,7 @@
+@@ -3,6 +3,7 @@
#define _LINUX_LIST_BL_H
#include <linux/list.h>
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <linux/bit_spinlock.h>
/*
-@@ -32,13 +33,22 @@
+@@ -33,13 +34,22 @@
struct hlist_bl_head {
struct hlist_bl_node *first;
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
{
-@@ -118,12 +128,26 @@ static inline void hlist_bl_del_init(str
+@@ -119,12 +129,26 @@ static inline void hlist_bl_del_init(str
static inline void hlist_bl_lock(struct hlist_bl_head *b)
{
diff --git a/patches/local-irq-rt-depending-variants.patch b/patches/local-irq-rt-depending-variants.patch
index 2e0831532af2..f94a7389214b 100644
--- a/patches/local-irq-rt-depending-variants.patch
+++ b/patches/local-irq-rt-depending-variants.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -205,7 +205,7 @@ extern void devm_free_irq(struct device
+@@ -207,7 +207,7 @@ extern void devm_free_irq(struct device
#ifdef CONFIG_LOCKDEP
# define local_irq_enable_in_hardirq() do { } while (0)
#else
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void disable_irq_nosync(unsigned int irq);
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
-@@ -148,4 +148,23 @@
+@@ -165,4 +165,23 @@ do { \
#define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags)
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 72cdd2b3c760..a02382e6df70 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt5
++-rt1
diff --git a/patches/lockdep-disable-self-test.patch b/patches/lockdep-disable-self-test.patch
index 4b751d021068..813d2f343bdf 100644
--- a/patches/lockdep-disable-self-test.patch
+++ b/patches/lockdep-disable-self-test.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
-@@ -1177,7 +1177,7 @@ config DEBUG_ATOMIC_SLEEP
+@@ -1199,7 +1199,7 @@ config DEBUG_ATOMIC_SLEEP
config DEBUG_LOCKING_API_SELFTESTS
bool "Locking API boot-time self-tests"
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index d8e8b5b00f19..c8e0d964649d 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -6,22 +6,30 @@ teach lockdep that we don't really do softirqs on -RT.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- include/linux/irqflags.h | 10 +++++++---
+ include/linux/irqflags.h | 26 +++++++++++++++-----------
kernel/locking/lockdep.c | 2 ++
- 2 files changed, 9 insertions(+), 3 deletions(-)
+ 2 files changed, 17 insertions(+), 11 deletions(-)
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
-@@ -25,8 +25,6 @@
- # define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
- # define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
- # define trace_hardirq_exit() do { current->hardirq_context--; } while (0)
--# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
--# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
+@@ -34,16 +34,6 @@ do { \
+ current->hardirq_context--; \
+ crossrelease_hist_end(XHLOCK_HARD); \
+ } while (0)
+-# define lockdep_softirq_enter() \
+-do { \
+- current->softirq_context++; \
+- crossrelease_hist_start(XHLOCK_SOFT); \
+-} while (0)
+-# define lockdep_softirq_exit() \
+-do { \
+- current->softirq_context--; \
+- crossrelease_hist_end(XHLOCK_SOFT); \
+-} while (0)
# define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1,
#else
# define trace_hardirqs_on() do { } while (0)
-@@ -39,9 +37,15 @@
+@@ -56,9 +46,23 @@ do { \
# define trace_softirqs_enabled(p) 0
# define trace_hardirq_enter() do { } while (0)
# define trace_hardirq_exit() do { } while (0)
@@ -29,8 +37,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
-+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
-+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0)
++# define lockdep_softirq_enter() \
++do { \
++ current->softirq_context++; \
++ crossrelease_hist_start(XHLOCK_SOFT); \
++} while (0)
++# define lockdep_softirq_exit() \
++do { \
++ current->softirq_context--; \
++ crossrelease_hist_end(XHLOCK_SOFT); \
++} while (0)
+#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
@@ -40,7 +56,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#if defined(CONFIG_IRQSOFF_TRACER) || \
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3778,6 +3778,7 @@ static void check_flags(unsigned long fl
+@@ -3917,6 +3917,7 @@ static void check_flags(unsigned long fl
}
}
@@ -48,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3792,6 +3793,7 @@ static void check_flags(unsigned long fl
+@@ -3931,6 +3932,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch b/patches/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
index 2ba4e59c7149..01cc757dd76d 100644
--- a/patches/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
+++ b/patches/lockdep-selftest-fix-warnings-due-to-missing-PREEMPT.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
-@@ -644,6 +644,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
+@@ -742,6 +742,8 @@ GENERATE_TESTCASE(init_held_rtmutex);
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
-@@ -659,9 +661,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
+@@ -757,9 +759,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Enabling hardirqs with a softirq-safe lock held:
*/
-@@ -694,6 +699,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
+@@ -792,6 +797,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
#undef E1
#undef E2
@@ -59,7 +59,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Enabling irqs with an irq-safe lock held:
*/
-@@ -717,6 +724,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
+@@ -815,6 +822,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin)
@@ -68,7 +68,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
-@@ -732,6 +741,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
+@@ -830,6 +839,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#undef E1
#undef E2
-@@ -763,6 +774,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
+@@ -861,6 +872,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin)
@@ -86,7 +86,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
-@@ -778,6 +791,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
+@@ -876,6 +889,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
@@ -95,7 +95,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#undef E1
#undef E2
#undef E3
-@@ -811,6 +826,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
+@@ -909,6 +924,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin)
@@ -104,7 +104,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "locking-selftest-rlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
-@@ -826,10 +843,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
+@@ -924,10 +941,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_
#include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
@@ -119,7 +119,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* read-lock / write-lock irq inversion.
*
-@@ -892,6 +913,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
+@@ -990,6 +1011,10 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inver
#undef E2
#undef E3
@@ -130,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* read-lock / write-lock recursion that is actually safe.
*/
-@@ -930,6 +955,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
+@@ -1028,6 +1053,8 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_
#undef E2
#undef E3
diff --git a/patches/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch b/patches/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
index c39a991c2d31..6162cd2dab48 100644
--- a/patches/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
+++ b/patches/lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/lib/locking-selftest.c
+++ b/lib/locking-selftest.c
-@@ -1935,6 +1935,7 @@ void locking_selftest(void)
+@@ -2057,6 +2057,7 @@ void locking_selftest(void)
printk(" --------------------------------------------------------------------------\n");
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* irq-context testcases:
*/
-@@ -1947,6 +1948,28 @@ void locking_selftest(void)
+@@ -2069,6 +2070,28 @@ void locking_selftest(void)
DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion);
// DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2);
diff --git a/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
index 1200335d1e6e..336a735674b0 100644
--- a/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
+++ b/patches/locking-don-t-check-for-__LINUX_SPINLOCK_TYPES_H-on-.patch
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/alpha/include/asm/spinlock_types.h
+++ b/arch/alpha/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ALPHA_SPINLOCK_TYPES_H
#define _ALPHA_SPINLOCK_TYPES_H
@@ -45,7 +45,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
@@ -97,7 +97,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ASM_IA64_SPINLOCK_TYPES_H
#define _ASM_IA64_SPINLOCK_TYPES_H
@@ -110,7 +110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/m32r/include/asm/spinlock_types.h
+++ b/arch/m32r/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ASM_M32R_SPINLOCK_TYPES_H
#define _ASM_M32R_SPINLOCK_TYPES_H
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/metag/include/asm/spinlock_types.h
+++ b/arch/metag/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ASM_METAG_SPINLOCK_TYPES_H
#define _ASM_METAG_SPINLOCK_TYPES_H
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/mn10300/include/asm/spinlock_types.h
+++ b/arch/mn10300/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/powerpc/include/asm/spinlock_types.h
+++ b/arch/powerpc/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SPINLOCK_TYPES_H
@@ -162,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} arch_spinlock_t;
--- a/arch/s390/include/asm/spinlock_types.h
+++ b/arch/s390/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
@@ -175,7 +175,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
} __attribute__ ((aligned (4))) arch_spinlock_t;
--- a/arch/sh/include/asm/spinlock_types.h
+++ b/arch/sh/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef __ASM_SH_SPINLOCK_TYPES_H
#define __ASM_SH_SPINLOCK_TYPES_H
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Low 15 bits are "next"; high 15 bits are "current". */
--- a/arch/xtensa/include/asm/spinlock_types.h
+++ b/arch/xtensa/include/asm/spinlock_types.h
-@@ -1,10 +1,6 @@
+@@ -2,10 +2,6 @@
#ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H
diff --git a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch b/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
index 5976db76eddb..c06ea530a5cf 100644
--- a/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
+++ b/patches/locking-rt-mutex-fix-deadlock-in-device-mapper-block.patch
@@ -32,11 +32,9 @@ CC: stable-rt@vger.kernel.org
Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 13 +++++++++++++
+ kernel/locking/rtmutex.c | 13 +++++++++++++
1 file changed, 13 insertions(+)
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index c72b2acf4db4..1636498cc658 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -24,6 +24,7 @@
@@ -47,7 +45,7 @@ index c72b2acf4db4..1636498cc658 100644
#include "rtmutex_common.h"
-@@ -1939,6 +1940,15 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
+@@ -1926,6 +1927,15 @@ rt_mutex_fastlock(struct rt_mutex *lock,
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
@@ -63,7 +61,7 @@ index c72b2acf4db4..1636498cc658 100644
return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ww_ctx);
}
-@@ -1956,6 +1966,9 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
+@@ -1943,6 +1953,9 @@ rt_mutex_timed_fastlock(struct rt_mutex
likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
@@ -73,6 +71,3 @@ index c72b2acf4db4..1636498cc658 100644
return slowfn(lock, state, timeout, chwalk, ww_ctx);
}
---
-2.15.0
-
diff --git a/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch b/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch
index c4ca232027ee..ec0930544faf 100644
--- a/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch
+++ b/patches/locking-rtmutex-don-t-drop-the-wait_lock-twice.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1766,7 +1766,6 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1753,7 +1753,6 @@ int __rt_mutex_start_proxy_lock(struct r
raw_spin_lock(&task->pi_lock);
if (task->pi_blocked_on) {
raw_spin_unlock(&task->pi_lock);
diff --git a/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch b/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
index 6d229c40db2e..c21192f0556f 100644
--- a/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
+++ b/patches/locking-rtmutex-re-init-the-wait_lock-in-rt_mutex_in.patch
@@ -10,14 +10,12 @@ seeing a double-lock of the wait_lock.
Reported-by: Fernando Lopez-Lezcano <nando@ccrma.Stanford.EDU>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 8 ++++++++
+ kernel/locking/rtmutex.c | 8 ++++++++
1 file changed, 8 insertions(+)
-diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
-index 37433e3b8596..c72b2acf4db4 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -2261,6 +2261,14 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
+@@ -2260,6 +2260,14 @@ void rt_mutex_init_proxy_locked(struct r
struct task_struct *proxy_owner)
{
__rt_mutex_init(lock, NULL, NULL);
@@ -32,6 +30,3 @@ index 37433e3b8596..c72b2acf4db4 100644
debug_rt_mutex_proxy_lock(lock, proxy_owner);
rt_mutex_set_owner(lock, proxy_owner);
}
---
-2.15.0
-
diff --git a/patches/md-raid5-do-not-disable-interrupts.patch b/patches/md-raid5-do-not-disable-interrupts.patch
index 293dfc9b4f96..79464e43cc63 100644
--- a/patches/md-raid5-do-not-disable-interrupts.patch
+++ b/patches/md-raid5-do-not-disable-interrupts.patch
@@ -29,14 +29,12 @@ interrupts disabled.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/md/raid5.c | 4 ++--
+ drivers/md/raid5.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
-diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
-index 4df1cdad153d..2b956738b3e8 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -410,7 +410,7 @@ void raid5_release_stripe(struct stripe_head *sh)
+@@ -410,7 +410,7 @@ void raid5_release_stripe(struct stripe_
md_wakeup_thread(conf->mddev->thread);
return;
slow_path:
@@ -45,7 +43,7 @@ index 4df1cdad153d..2b956738b3e8 100644
/* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */
if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) {
INIT_LIST_HEAD(&list);
-@@ -419,7 +419,7 @@ void raid5_release_stripe(struct stripe_head *sh)
+@@ -419,7 +419,7 @@ void raid5_release_stripe(struct stripe_
spin_unlock(&conf->device_lock);
release_inactive_stripe_list(conf, &list, hash);
}
@@ -54,6 +52,3 @@ index 4df1cdad153d..2b956738b3e8 100644
}
static inline void remove_hash(struct stripe_head *sh)
---
-2.15.0
-
diff --git a/patches/md-raid5-percpu-handling-rt-aware.patch b/patches/md-raid5-percpu-handling-rt-aware.patch
index 9784356b6d6c..64cd6473a7af 100644
--- a/patches/md-raid5-percpu-handling-rt-aware.patch
+++ b/patches/md-raid5-percpu-handling-rt-aware.patch
@@ -20,7 +20,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
-@@ -2065,8 +2065,9 @@ static void raid_run_ops(struct stripe_h
+@@ -2064,8 +2064,9 @@ static void raid_run_ops(struct stripe_h
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -31,7 +31,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
ops_run_biofill(sh);
overlap_clear++;
-@@ -2125,7 +2126,8 @@ static void raid_run_ops(struct stripe_h
+@@ -2124,7 +2125,8 @@ static void raid_run_ops(struct stripe_h
if (test_and_clear_bit(R5_Overlap, &dev->flags))
wake_up(&sh->raid_conf->wait_for_overlap);
}
@@ -41,7 +41,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
}
static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh)
-@@ -6797,6 +6799,7 @@ static int raid456_cpu_up_prepare(unsign
+@@ -6793,6 +6795,7 @@ static int raid456_cpu_up_prepare(unsign
__func__, cpu);
return -ENOMEM;
}
@@ -49,7 +49,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
return 0;
}
-@@ -6807,7 +6810,6 @@ static int raid5_alloc_percpu(struct r5c
+@@ -6803,7 +6806,6 @@ static int raid5_alloc_percpu(struct r5c
conf->percpu = alloc_percpu(struct raid5_percpu);
if (!conf->percpu)
return -ENOMEM;
@@ -59,7 +59,7 @@ Tested-by: Udo van den Heuvel <udovdh@xs4all.nl>
conf->scribble_disks = max(conf->raid_disks,
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
-@@ -623,6 +623,7 @@ struct r5conf {
+@@ -624,6 +624,7 @@ struct r5conf {
int recovery_disabled;
/* per cpu variables */
struct raid5_percpu {
diff --git a/patches/mips-disable-highmem-on-rt.patch b/patches/mips-disable-highmem-on-rt.patch
index 902e2a0ed053..503ff8c2b93b 100644
--- a/patches/mips-disable-highmem-on-rt.patch
+++ b/patches/mips-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
-@@ -2529,7 +2529,7 @@ config MIPS_ASID_BITS_VARIABLE
+@@ -2519,7 +2519,7 @@ config MIPS_ASID_BITS_VARIABLE
#
config HIGHMEM
bool "High Memory Support"
diff --git a/patches/mm--rt--Fix-generic-kmap_atomic-for-RT.patch b/patches/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
index 6814afe977c9..3549e58fd8e6 100644
--- a/patches/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
+++ b/patches/mm--rt--Fix-generic-kmap_atomic-for-RT.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
-@@ -65,7 +65,7 @@ static inline void kunmap(struct page *p
+@@ -66,7 +66,7 @@ static inline void kunmap(struct page *p
static inline void *kmap_atomic(struct page *page)
{
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pagefault_disable();
return page_address(page);
}
-@@ -74,7 +74,7 @@ static inline void *kmap_atomic(struct p
+@@ -75,7 +75,7 @@ static inline void *kmap_atomic(struct p
static inline void __kunmap_atomic(void *addr)
{
pagefault_enable();
diff --git a/patches/mm-bounce-local-irq-save-nort.patch b/patches/mm-bounce-local-irq-save-nort.patch
index 58d12de2f287..e7719b49482b 100644
--- a/patches/mm-bounce-local-irq-save-nort.patch
+++ b/patches/mm-bounce-local-irq-save-nort.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/bounce.c
+++ b/block/bounce.c
-@@ -65,11 +65,11 @@ static void bounce_copy_vec(struct bio_v
+@@ -66,11 +66,11 @@ static void bounce_copy_vec(struct bio_v
unsigned long flags;
unsigned char *vto;
diff --git a/patches/mm-convert-swap-to-percpu-locked.patch b/patches/mm-convert-swap-to-percpu-locked.patch
index 267686e4cee0..b1dea6a68010 100644
--- a/patches/mm-convert-swap-to-percpu-locked.patch
+++ b/patches/mm-convert-swap-to-percpu-locked.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
-@@ -269,6 +269,7 @@ extern unsigned long nr_free_pagecache_p
+@@ -312,6 +312,7 @@ extern unsigned long nr_free_pagecache_p
/* linux/mm/swap.c */
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void lru_cache_add_file(struct page *page);
--- a/mm/compaction.c
+++ b/mm/compaction.c
-@@ -1633,10 +1633,12 @@ static enum compact_result compact_zone(
+@@ -1634,10 +1634,12 @@ static enum compact_result compact_zone(
block_start_pfn(cc->migrate_pfn, cc->order);
if (cc->last_migrated_pfn < current_block_start) {
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -6918,8 +6918,9 @@ void __init free_area_init(unsigned long
+@@ -6857,8 +6857,9 @@ void __init free_area_init(unsigned long
static int page_alloc_cpu_dead(unsigned int cpu)
{
diff --git a/patches/mm-disable-sloub-rt.patch b/patches/mm-disable-sloub-rt.patch
index 523991b3e10b..b302ebbbe400 100644
--- a/patches/mm-disable-sloub-rt.patch
+++ b/patches/mm-disable-sloub-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1521,6 +1521,7 @@ choice
+@@ -1526,6 +1526,7 @@ choice
config SLAB
bool "SLAB"
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_HARDENED_USERCOPY_ALLOCATOR
help
The regular slab allocator that is established and known to work
-@@ -1541,6 +1542,7 @@ config SLUB
+@@ -1546,6 +1547,7 @@ config SLUB
config SLOB
depends on EXPERT
bool "SLOB (Simple Allocator)"
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index c5fd3dd4ef0f..12b601abb10c 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -449,7 +449,11 @@ static inline void slab_post_alloc_hook(
+@@ -454,7 +454,11 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct list_head slabs_partial; /* partial list first, better asm code */
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1146,7 +1146,7 @@ static noinline int free_debug_processin
+@@ -1180,7 +1180,7 @@ static noinline int free_debug_processin
unsigned long uninitialized_var(flags);
int ret = 0;
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
slab_lock(page);
if (s->flags & SLAB_CONSISTENCY_CHECKS) {
-@@ -1181,7 +1181,7 @@ static noinline int free_debug_processin
+@@ -1215,7 +1215,7 @@ static noinline int free_debug_processin
bulk_cnt, cnt);
slab_unlock(page);
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (!ret)
slab_fix(s, "Object at 0x%p not freed", object);
return ret;
-@@ -1309,6 +1309,12 @@ static inline void dec_slabs_node(struct
+@@ -1343,6 +1343,12 @@ static inline void dec_slabs_node(struct
#endif /* CONFIG_SLUB_DEBUG */
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Hooks for other subsystems that check memory allocations. In a typical
* production configuration these hooks all should produce no code at all.
-@@ -1535,7 +1541,11 @@ static struct page *allocate_slab(struct
+@@ -1569,7 +1575,11 @@ static struct page *allocate_slab(struct
flags &= gfp_allowed_mask;
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1610,7 +1620,11 @@ static struct page *allocate_slab(struct
+@@ -1644,7 +1654,11 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_disable();
if (!page)
return NULL;
-@@ -1670,6 +1684,16 @@ static void __free_slab(struct kmem_cach
+@@ -1704,6 +1718,16 @@ static void __free_slab(struct kmem_cach
__free_pages(page, order);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define need_reserve_slab_rcu \
(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
-@@ -1701,6 +1725,12 @@ static void free_slab(struct kmem_cache
+@@ -1735,6 +1759,12 @@ static void free_slab(struct kmem_cache
}
call_rcu(head, rcu_free_slab);
@@ -112,7 +112,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else
__free_slab(s, page);
}
-@@ -1808,7 +1838,7 @@ static void *get_partial_node(struct kme
+@@ -1842,7 +1872,7 @@ static void *get_partial_node(struct kme
if (!n || !n->nr_partial)
return NULL;
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, page2, &n->partial, lru) {
void *t;
-@@ -1833,7 +1863,7 @@ static void *get_partial_node(struct kme
+@@ -1867,7 +1897,7 @@ static void *get_partial_node(struct kme
break;
}
@@ -130,7 +130,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return object;
}
-@@ -2079,7 +2109,7 @@ static void deactivate_slab(struct kmem_
+@@ -2113,7 +2143,7 @@ static void deactivate_slab(struct kmem_
* that acquire_slab() will see a slab page that
* is frozen
*/
@@ -139,7 +139,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
m = M_FULL;
-@@ -2090,7 +2120,7 @@ static void deactivate_slab(struct kmem_
+@@ -2124,7 +2154,7 @@ static void deactivate_slab(struct kmem_
* slabs from diagnostic functions will not see
* any frozen slabs.
*/
@@ -148,7 +148,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2125,7 +2155,7 @@ static void deactivate_slab(struct kmem_
+@@ -2159,7 +2189,7 @@ static void deactivate_slab(struct kmem_
goto redo;
if (lock)
@@ -157,7 +157,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (m == M_FREE) {
stat(s, DEACTIVATE_EMPTY);
-@@ -2160,10 +2190,10 @@ static void unfreeze_partials(struct kme
+@@ -2194,10 +2224,10 @@ static void unfreeze_partials(struct kme
n2 = get_node(s, page_to_nid(page));
if (n != n2) {
if (n)
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
do {
-@@ -2192,7 +2222,7 @@ static void unfreeze_partials(struct kme
+@@ -2226,7 +2256,7 @@ static void unfreeze_partials(struct kme
}
if (n)
@@ -179,7 +179,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (discard_page) {
page = discard_page;
-@@ -2231,14 +2261,21 @@ static void put_cpu_partial(struct kmem_
+@@ -2265,14 +2295,21 @@ static void put_cpu_partial(struct kmem_
pobjects = oldpage->pobjects;
pages = oldpage->pages;
if (drain && pobjects > s->cpu_partial) {
@@ -201,7 +201,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
oldpage = NULL;
pobjects = 0;
pages = 0;
-@@ -2308,7 +2345,22 @@ static bool has_cpu_slab(int cpu, void *
+@@ -2342,7 +2379,22 @@ static bool has_cpu_slab(int cpu, void *
static void flush_all(struct kmem_cache *s)
{
@@ -224,7 +224,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2363,10 +2415,10 @@ static unsigned long count_partial(struc
+@@ -2397,10 +2449,10 @@ static unsigned long count_partial(struc
unsigned long x = 0;
struct page *page;
@@ -237,7 +237,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return x;
}
#endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
-@@ -2504,8 +2556,10 @@ static inline void *get_freelist(struct
+@@ -2538,8 +2590,10 @@ static inline void *get_freelist(struct
* already disabled (which is the case for bulk allocation).
*/
static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
@@ -249,7 +249,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *freelist;
struct page *page;
-@@ -2561,6 +2615,13 @@ static void *___slab_alloc(struct kmem_c
+@@ -2595,6 +2649,13 @@ static void *___slab_alloc(struct kmem_c
VM_BUG_ON(!c->page->frozen);
c->freelist = get_freepointer(s, freelist);
c->tid = next_tid(c->tid);
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return freelist;
new_slab:
-@@ -2576,7 +2637,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2610,7 +2671,7 @@ static void *___slab_alloc(struct kmem_c
if (unlikely(!freelist)) {
slab_out_of_memory(s, gfpflags, node);
@@ -272,7 +272,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
page = c->page;
-@@ -2589,7 +2650,7 @@ static void *___slab_alloc(struct kmem_c
+@@ -2623,7 +2684,7 @@ static void *___slab_alloc(struct kmem_c
goto new_slab; /* Slab failed checks. Next slab needed */
deactivate_slab(s, page, get_freepointer(s, freelist), c);
@@ -281,7 +281,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2601,6 +2662,7 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2635,6 +2696,7 @@ static void *__slab_alloc(struct kmem_ca
{
void *p;
unsigned long flags;
@@ -289,7 +289,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_save(flags);
#ifdef CONFIG_PREEMPT
-@@ -2612,8 +2674,9 @@ static void *__slab_alloc(struct kmem_ca
+@@ -2646,8 +2708,9 @@ static void *__slab_alloc(struct kmem_ca
c = this_cpu_ptr(s->cpu_slab);
#endif
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return p;
}
-@@ -2799,7 +2862,7 @@ static void __slab_free(struct kmem_cach
+@@ -2833,7 +2896,7 @@ static void __slab_free(struct kmem_cach
do {
if (unlikely(n)) {
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
n = NULL;
}
prior = page->freelist;
-@@ -2831,7 +2894,7 @@ static void __slab_free(struct kmem_cach
+@@ -2865,7 +2928,7 @@ static void __slab_free(struct kmem_cach
* Otherwise the list_lock will synchronize with
* other processors updating the list of slabs.
*/
@@ -318,7 +318,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -2873,7 +2936,7 @@ static void __slab_free(struct kmem_cach
+@@ -2907,7 +2970,7 @@ static void __slab_free(struct kmem_cach
add_partial(n, page, DEACTIVATE_TO_TAIL);
stat(s, FREE_ADD_PARTIAL);
}
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
slab_empty:
-@@ -2888,7 +2951,7 @@ static void __slab_free(struct kmem_cach
+@@ -2922,7 +2985,7 @@ static void __slab_free(struct kmem_cach
remove_full(s, n, page);
}
@@ -336,7 +336,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
stat(s, FREE_SLAB);
discard_slab(s, page);
}
-@@ -3093,6 +3156,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3127,6 +3190,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
void **p)
{
struct kmem_cache_cpu *c;
@@ -344,7 +344,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int i;
/* memcg and kmem_cache debug support */
-@@ -3116,7 +3180,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3150,7 +3214,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
* of re-populating per CPU c->freelist
*/
p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
@@ -353,7 +353,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(!p[i]))
goto error;
-@@ -3128,6 +3192,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
+@@ -3162,6 +3226,7 @@ int kmem_cache_alloc_bulk(struct kmem_ca
}
c->tid = next_tid(c->tid);
local_irq_enable();
@@ -361,7 +361,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Clear memory outside IRQ disabled fastpath loop */
if (unlikely(flags & __GFP_ZERO)) {
-@@ -3275,7 +3340,7 @@ static void
+@@ -3309,7 +3374,7 @@ static void
init_kmem_cache_node(struct kmem_cache_node *n)
{
n->nr_partial = 0;
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3626,6 +3691,10 @@ static void list_slab_objects(struct kme
+@@ -3663,6 +3728,10 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -381,7 +381,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
-@@ -3646,6 +3715,7 @@ static void list_slab_objects(struct kme
+@@ -3683,6 +3752,7 @@ static void list_slab_objects(struct kme
slab_unlock(page);
kfree(map);
#endif
@@ -389,7 +389,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -3659,7 +3729,7 @@ static void free_partial(struct kmem_cac
+@@ -3696,7 +3766,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -398,7 +398,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3669,7 +3739,7 @@ static void free_partial(struct kmem_cac
+@@ -3706,7 +3776,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@@ -407,7 +407,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3913,7 +3983,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3950,7 +4020,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -416,7 +416,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -3944,7 +4014,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3981,7 +4051,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -425,7 +425,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4157,6 +4227,12 @@ void __init kmem_cache_init(void)
+@@ -4194,6 +4264,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
@@ -438,7 +438,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (debug_guardpage_minorder())
slub_max_order = 0;
-@@ -4365,7 +4441,7 @@ static int validate_slab_node(struct kme
+@@ -4402,7 +4478,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -447,7 +447,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4387,7 +4463,7 @@ static int validate_slab_node(struct kme
+@@ -4424,7 +4500,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -456,7 +456,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return count;
}
-@@ -4575,12 +4651,12 @@ static int list_locations(struct kmem_ca
+@@ -4612,12 +4688,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/mm-make-vmstat-rt-aware.patch b/patches/mm-make-vmstat-rt-aware.patch
index 042330ae6131..7a45b97c0cd2 100644
--- a/patches/mm-make-vmstat-rt-aware.patch
+++ b/patches/mm-make-vmstat-rt-aware.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
-@@ -32,7 +32,9 @@ DECLARE_PER_CPU(struct vm_event_state, v
+@@ -33,7 +33,9 @@ DECLARE_PER_CPU(struct vm_event_state, v
*/
static inline void __count_vm_event(enum vm_event_item item)
{
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static inline void count_vm_event(enum vm_event_item item)
-@@ -42,7 +44,9 @@ static inline void count_vm_event(enum v
+@@ -43,7 +45,9 @@ static inline void count_vm_event(enum v
static inline void __count_vm_events(enum vm_event_item item, long delta)
{
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void count_vm_events(enum vm_event_item item, long delta)
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
-@@ -245,6 +245,7 @@ void __mod_zone_page_state(struct zone *
+@@ -249,6 +249,7 @@ void __mod_zone_page_state(struct zone *
long x;
long t;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -254,6 +255,7 @@ void __mod_zone_page_state(struct zone *
+@@ -258,6 +259,7 @@ void __mod_zone_page_state(struct zone *
x = 0;
}
__this_cpu_write(*p, x);
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__mod_zone_page_state);
-@@ -265,6 +267,7 @@ void __mod_node_page_state(struct pglist
+@@ -269,6 +271,7 @@ void __mod_node_page_state(struct pglist
long x;
long t;
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
x = delta + __this_cpu_read(*p);
t = __this_cpu_read(pcp->stat_threshold);
-@@ -274,6 +277,7 @@ void __mod_node_page_state(struct pglist
+@@ -278,6 +281,7 @@ void __mod_node_page_state(struct pglist
x = 0;
}
__this_cpu_write(*p, x);
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__mod_node_page_state);
-@@ -306,6 +310,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -310,6 +314,7 @@ void __inc_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -78,7 +78,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -314,6 +319,7 @@ void __inc_zone_state(struct zone *zone,
+@@ -318,6 +323,7 @@ void __inc_zone_state(struct zone *zone,
zone_page_state_add(v + overstep, zone, item);
__this_cpu_write(*p, -overstep);
}
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __inc_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -322,6 +328,7 @@ void __inc_node_state(struct pglist_data
+@@ -326,6 +332,7 @@ void __inc_node_state(struct pglist_data
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_inc_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v > t)) {
-@@ -330,6 +337,7 @@ void __inc_node_state(struct pglist_data
+@@ -334,6 +341,7 @@ void __inc_node_state(struct pglist_data
node_page_state_add(v + overstep, pgdat, item);
__this_cpu_write(*p, -overstep);
}
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
-@@ -350,6 +358,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -354,6 +362,7 @@ void __dec_zone_state(struct zone *zone,
s8 __percpu *p = pcp->vm_stat_diff + item;
s8 v, t;
@@ -110,7 +110,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -358,6 +367,7 @@ void __dec_zone_state(struct zone *zone,
+@@ -362,6 +371,7 @@ void __dec_zone_state(struct zone *zone,
zone_page_state_add(v - overstep, zone, item);
__this_cpu_write(*p, overstep);
}
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __dec_node_state(struct pglist_data *pgdat, enum node_stat_item item)
-@@ -366,6 +376,7 @@ void __dec_node_state(struct pglist_data
+@@ -370,6 +380,7 @@ void __dec_node_state(struct pglist_data
s8 __percpu *p = pcp->vm_node_stat_diff + item;
s8 v, t;
@@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
v = __this_cpu_dec_return(*p);
t = __this_cpu_read(pcp->stat_threshold);
if (unlikely(v < - t)) {
-@@ -374,6 +385,7 @@ void __dec_node_state(struct pglist_data
+@@ -378,6 +389,7 @@ void __dec_node_state(struct pglist_data
node_page_state_add(v - overstep, pgdat, item);
__this_cpu_write(*p, overstep);
}
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 684069dc27df..52ddca411353 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,21 +48,21 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -1806,7 +1806,7 @@ static void drain_all_stock(struct mem_c
- return;
- /* Notify other cpus that system-wide "drain" is running */
- get_online_cpus();
+@@ -1831,7 +1831,7 @@ static void drain_all_stock(struct mem_c
+ * as well as workers from this path always operate on the local
+ * per-cpu data. CPU up doesn't touch memcg_stock at all.
+ */
- curcpu = get_cpu();
+ curcpu = get_cpu_light();
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -1823,7 +1823,7 @@ static void drain_all_stock(struct mem_c
- schedule_work_on(cpu, &stock->work);
+@@ -1851,7 +1851,7 @@ static void drain_all_stock(struct mem_c
}
+ css_put(&memcg->css);
}
- put_cpu();
+ put_cpu_light();
- put_online_cpus();
mutex_unlock(&percpu_charge_mutex);
}
+
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index da5c359b52c7..9c8a7cce86d3 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4574,12 +4577,12 @@ static int mem_cgroup_move_account(struc
+@@ -4621,12 +4624,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5486,10 +5489,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5569,10 +5572,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,25 +57,25 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -5545,7 +5548,7 @@ static void uncharge_batch(struct mem_cg
- memcg_oom_recover(memcg);
+@@ -5641,7 +5644,7 @@ static void uncharge_batch(const struct
+ memcg_oom_recover(ug->memcg);
}
- local_irq_save(flags);
+ local_lock_irqsave(event_lock, flags);
- __this_cpu_sub(memcg->stat->count[MEMCG_RSS], nr_anon);
- __this_cpu_sub(memcg->stat->count[MEMCG_CACHE], nr_file);
- __this_cpu_sub(memcg->stat->count[MEMCG_RSS_HUGE], nr_huge);
-@@ -5553,7 +5556,7 @@ static void uncharge_batch(struct mem_cg
- __this_cpu_add(memcg->stat->events[PGPGOUT], pgpgout);
- __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
- memcg_check_events(memcg, dummy_page);
+ __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS], ug->nr_anon);
+ __this_cpu_sub(ug->memcg->stat->count[MEMCG_CACHE], ug->nr_file);
+ __this_cpu_sub(ug->memcg->stat->count[MEMCG_RSS_HUGE], ug->nr_huge);
+@@ -5649,7 +5652,7 @@ static void uncharge_batch(const struct
+ __this_cpu_add(ug->memcg->stat->events[PGPGOUT], ug->pgpgout);
+ __this_cpu_add(ug->memcg->stat->nr_page_events, nr_pages);
+ memcg_check_events(ug->memcg, ug->dummy_page);
- local_irq_restore(flags);
+ local_unlock_irqrestore(event_lock, flags);
- if (!mem_cgroup_is_root(memcg))
- css_put_many(&memcg->css, nr_pages);
-@@ -5712,10 +5715,10 @@ void mem_cgroup_migrate(struct page *old
+ if (!mem_cgroup_is_root(ug->memcg))
+ css_put_many(&ug->memcg->css, nr_pages);
+@@ -5812,10 +5815,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -88,15 +88,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -5907,6 +5910,7 @@ void mem_cgroup_swapout(struct page *pag
- {
+@@ -5993,6 +5996,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
+ unsigned int nr_entries;
unsigned short oldid;
+ unsigned long flags;
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -5947,12 +5951,16 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6038,13 +6042,17 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for udpating the per-CPU variables.
*/
@@ -104,7 +104,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#ifndef CONFIG_PREEMPT_RT_BASE
VM_BUG_ON(!irqs_disabled());
+#endif
- mem_cgroup_charge_statistics(memcg, page, false, -1);
+ mem_cgroup_charge_statistics(memcg, page, PageTransHuge(page),
+ -nr_entries);
memcg_check_events(memcg, page);
if (!mem_cgroup_is_root(memcg))
diff --git a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
index 7e3e0389a653..e5e462b4b723 100644
--- a/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
+++ b/patches/mm-page-alloc-use-local-lock-on-target-cpu.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -291,9 +291,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
+@@ -292,9 +292,9 @@ static DEFINE_LOCAL_IRQ_LOCK(pa_lock);
#ifdef CONFIG_PREEMPT_RT_BASE
# define cpu_lock_irqsave(cpu, flags) \
diff --git a/patches/mm-page_alloc-reduce-lock-sections-further.patch b/patches/mm-page_alloc-reduce-lock-sections-further.patch
index a60330013037..e715ab909289 100644
--- a/patches/mm-page_alloc-reduce-lock-sections-further.patch
+++ b/patches/mm-page_alloc-reduce-lock-sections-further.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
-@@ -1099,7 +1099,7 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1100,7 +1100,7 @@ static bool bulkfree_pcp_prepare(struct
#endif /* CONFIG_DEBUG_VM */
/*
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free.
*
-@@ -1110,15 +1110,53 @@ static bool bulkfree_pcp_prepare(struct
+@@ -1111,15 +1111,53 @@ static bool bulkfree_pcp_prepare(struct
* pinned" detection logic.
*/
static void free_pcppages_bulk(struct zone *zone, int count,
@@ -80,7 +80,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
while (count) {
struct page *page;
struct list_head *list;
-@@ -1134,7 +1172,7 @@ static void free_pcppages_bulk(struct zo
+@@ -1135,7 +1173,7 @@ static void free_pcppages_bulk(struct zo
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} while (list_empty(list));
/* This is the only non-empty list. Free them all. */
-@@ -1142,27 +1180,12 @@ static void free_pcppages_bulk(struct zo
+@@ -1143,27 +1181,12 @@ static void free_pcppages_bulk(struct zo
batch_free = count;
do {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void free_one_page(struct zone *zone,
-@@ -1170,13 +1193,15 @@ static void free_one_page(struct zone *z
+@@ -1171,13 +1194,15 @@ static void free_one_page(struct zone *z
unsigned int order,
int migratetype)
{
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
-@@ -2383,16 +2408,18 @@ static int rmqueue_bulk(struct zone *zon
+@@ -2384,16 +2409,18 @@ static int rmqueue_bulk(struct zone *zon
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
{
unsigned long flags;
@@ -156,7 +156,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2408,16 +2435,21 @@ static void drain_pages_zone(unsigned in
+@@ -2409,16 +2436,21 @@ static void drain_pages_zone(unsigned in
unsigned long flags;
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -180,7 +180,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2655,8 +2687,13 @@ void free_hot_cold_page(struct page *pag
+@@ -2656,8 +2688,13 @@ void free_hot_cold_page(struct page *pag
pcp->count++;
if (pcp->count >= pcp->high) {
unsigned long batch = READ_ONCE(pcp->batch);
diff --git a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
index db4654d4e0a7..655ea722883b 100644
--- a/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
+++ b/patches/mm-page_alloc-rt-friendly-per-cpu-pages.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page_owner.h>
#include <linux/kthread.h>
#include <linux/memcontrol.h>
-@@ -286,6 +287,18 @@ EXPORT_SYMBOL(nr_node_ids);
+@@ -287,6 +288,18 @@ EXPORT_SYMBOL(nr_node_ids);
EXPORT_SYMBOL(nr_online_nodes);
#endif
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int page_group_by_mobility_disabled __read_mostly;
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-@@ -1249,10 +1262,10 @@ static void __free_pages_ok(struct page
+@@ -1250,10 +1263,10 @@ static void __free_pages_ok(struct page
return;
migratetype = get_pfnblock_migratetype(page, pfn);
@@ -57,7 +57,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void __init __free_pages_boot_core(struct page *page, unsigned int order)
-@@ -2372,14 +2385,14 @@ void drain_zone_pages(struct zone *zone,
+@@ -2373,14 +2386,14 @@ void drain_zone_pages(struct zone *zone,
unsigned long flags;
int to_drain, batch;
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#endif
-@@ -2396,7 +2409,7 @@ static void drain_pages_zone(unsigned in
+@@ -2397,7 +2410,7 @@ static void drain_pages_zone(unsigned in
struct per_cpu_pageset *pset;
struct per_cpu_pages *pcp;
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pset = per_cpu_ptr(zone->pageset, cpu);
pcp = &pset->pcp;
-@@ -2404,7 +2417,7 @@ static void drain_pages_zone(unsigned in
+@@ -2405,7 +2418,7 @@ static void drain_pages_zone(unsigned in
free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0;
}
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2439,6 +2452,7 @@ void drain_local_pages(struct zone *zone
+@@ -2440,6 +2453,7 @@ void drain_local_pages(struct zone *zone
drain_pages(cpu);
}
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void drain_local_pages_wq(struct work_struct *work)
{
/*
-@@ -2452,6 +2466,7 @@ static void drain_local_pages_wq(struct
+@@ -2453,6 +2467,7 @@ static void drain_local_pages_wq(struct
drain_local_pages(NULL);
preempt_enable();
}
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator.
-@@ -2522,7 +2537,14 @@ void drain_all_pages(struct zone *zone)
+@@ -2523,7 +2538,14 @@ void drain_all_pages(struct zone *zone)
else
cpumask_clear_cpu(cpu, &cpus_with_pcps);
}
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_cpu(cpu, &cpus_with_pcps) {
struct work_struct *work = per_cpu_ptr(&pcpu_drain, cpu);
INIT_WORK(work, drain_local_pages_wq);
-@@ -2530,6 +2552,7 @@ void drain_all_pages(struct zone *zone)
+@@ -2531,6 +2553,7 @@ void drain_all_pages(struct zone *zone)
}
for_each_cpu(cpu, &cpus_with_pcps)
flush_work(per_cpu_ptr(&pcpu_drain, cpu));
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mutex_unlock(&pcpu_drain_mutex);
}
-@@ -2606,7 +2629,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2607,7 +2630,7 @@ void free_hot_cold_page(struct page *pag
migratetype = get_pfnblock_migratetype(page, pfn);
set_pcppage_migratetype(page, migratetype);
@@ -141,7 +141,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
__count_vm_event(PGFREE);
/*
-@@ -2637,7 +2660,7 @@ void free_hot_cold_page(struct page *pag
+@@ -2638,7 +2661,7 @@ void free_hot_cold_page(struct page *pag
}
out:
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2794,7 +2817,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -2795,7 +2818,7 @@ static struct page *rmqueue_pcplist(stru
struct page *page;
unsigned long flags;
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pcp = &this_cpu_ptr(zone->pageset)->pcp;
list = &pcp->lists[migratetype];
page = __rmqueue_pcplist(zone, migratetype, cold, pcp, list);
-@@ -2802,7 +2825,7 @@ static struct page *rmqueue_pcplist(stru
+@@ -2803,7 +2826,7 @@ static struct page *rmqueue_pcplist(stru
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
}
@@ -168,7 +168,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return page;
}
-@@ -2829,7 +2852,7 @@ struct page *rmqueue(struct zone *prefer
+@@ -2830,7 +2853,7 @@ struct page *rmqueue(struct zone *prefer
* allocate greater than order-1 page units with __GFP_NOFAIL.
*/
WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
page = NULL;
-@@ -2849,14 +2872,14 @@ struct page *rmqueue(struct zone *prefer
+@@ -2850,14 +2873,14 @@ struct page *rmqueue(struct zone *prefer
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
zone_statistics(preferred_zone, zone);
@@ -194,7 +194,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return NULL;
}
-@@ -7754,7 +7777,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7695,7 +7718,7 @@ void zone_pcp_reset(struct zone *zone)
struct per_cpu_pageset *pset;
/* avoid races with drain_pages() */
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (zone->pageset != &boot_pageset) {
for_each_online_cpu(cpu) {
pset = per_cpu_ptr(zone->pageset, cpu);
-@@ -7763,7 +7786,7 @@ void zone_pcp_reset(struct zone *zone)
+@@ -7704,7 +7727,7 @@ void zone_pcp_reset(struct zone *zone)
free_percpu(zone->pageset);
zone->pageset = &boot_pageset;
}
diff --git a/patches/mm-protect-activate-switch-mm.patch b/patches/mm-protect-activate-switch-mm.patch
index c6eb31661b31..16cc40ca00b0 100644
--- a/patches/mm-protect-activate-switch-mm.patch
+++ b/patches/mm-protect-activate-switch-mm.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/exec.c
+++ b/fs/exec.c
-@@ -1043,12 +1043,14 @@ static int exec_mmap(struct mm_struct *m
+@@ -1024,12 +1024,14 @@ static int exec_mmap(struct mm_struct *m
}
}
task_lock(tsk);
diff --git a/patches/mm-rt-kmap-atomic-scheduling.patch b/patches/mm-rt-kmap-atomic-scheduling.patch
index 6af4e0d8f8fa..064e4f9d444c 100644
--- a/patches/mm-rt-kmap-atomic-scheduling.patch
+++ b/patches/mm-rt-kmap-atomic-scheduling.patch
@@ -163,7 +163,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
}
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
-@@ -86,32 +86,51 @@ static inline void __kunmap_atomic(void
+@@ -87,32 +87,51 @@ static inline void __kunmap_atomic(void
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
@@ -221,7 +221,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -26,6 +26,7 @@
+@@ -27,6 +27,7 @@
#include <linux/signal_types.h>
#include <linux/mm_types_task.h>
#include <linux/task_io_accounting.h>
@@ -229,7 +229,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/* task_struct member predeclarations (sorted alphabetically): */
struct audit_context;
-@@ -1093,6 +1094,12 @@ struct task_struct {
+@@ -1104,6 +1105,12 @@ struct task_struct {
int softirq_nestcnt;
unsigned int softirqs_raised;
#endif
@@ -244,7 +244,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
#endif
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
-@@ -184,6 +184,7 @@ static __always_inline void pagefault_di
+@@ -185,6 +185,7 @@ static __always_inline void pagefault_di
*/
static inline void pagefault_disable(void)
{
@@ -252,7 +252,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
pagefault_disabled_inc();
/*
* make sure to have issued the store before a pagefault
-@@ -200,6 +201,7 @@ static inline void pagefault_enable(void
+@@ -201,6 +202,7 @@ static inline void pagefault_enable(void
*/
barrier();
pagefault_disabled_dec();
@@ -262,7 +262,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/*
--- a/mm/highmem.c
+++ b/mm/highmem.c
-@@ -29,10 +29,11 @@
+@@ -30,10 +30,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
@@ -275,7 +275,7 @@ Link: http://lkml.kernel.org/r/1311842631.5890.208.camel@twins
/*
* Virtual_count is not a pure "count".
-@@ -107,8 +108,9 @@ static inline wait_queue_head_t *get_pkm
+@@ -108,8 +109,9 @@ static inline wait_queue_head_t *get_pkm
unsigned long totalhigh_pages __read_mostly;
EXPORT_SYMBOL(totalhigh_pages);
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index 1280a8f8b851..0690a5e46224 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -867,7 +867,7 @@ static void *new_vmap_block(unsigned int
+@@ -865,7 +865,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -910,11 +910,12 @@ static void *new_vmap_block(unsigned int
+@@ -908,11 +908,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -983,6 +984,7 @@ static void *vb_alloc(unsigned long size
+@@ -981,6 +982,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -997,7 +999,8 @@ static void *vb_alloc(unsigned long size
+@@ -995,7 +997,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1020,7 +1023,7 @@ static void *vb_alloc(unsigned long size
+@@ -1018,7 +1021,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
index 237af2b875c1..800d0d213e45 100644
--- a/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
+++ b/patches/mm-workingset-do-not-protect-workingset_shadow_nodes.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
-@@ -11,6 +11,7 @@
+@@ -12,6 +12,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <asm/page.h>
struct notifier_block;
-@@ -254,7 +255,8 @@ struct swap_info_struct {
+@@ -297,7 +298,8 @@ struct vma_swap_readahead {
void *workingset_eviction(struct address_space *mapping, struct page *page);
bool workingset_refault(void *shadow);
void workingset_activation(struct page *page);
@@ -45,9 +45,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int page_cache_tree_insert(struct address_space *mapping,
struct page *page, void **shadowp)
-@@ -142,8 +143,10 @@ static int page_cache_tree_insert(struct
- true);
- }
+@@ -133,8 +134,10 @@ static int page_cache_tree_insert(struct
+ if (shadowp)
+ *shadowp = p;
}
+ local_lock(shadow_nodes_lock);
__radix_tree_replace(&mapping->page_tree, node, slot, page,
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mapping->nrpages++;
return 0;
}
-@@ -160,6 +163,7 @@ static void page_cache_tree_delete(struc
+@@ -151,6 +154,7 @@ static void page_cache_tree_delete(struc
VM_BUG_ON_PAGE(PageTail(page), page);
VM_BUG_ON_PAGE(nr != 1 && shadow, page);
@@ -65,7 +65,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for (i = 0; i < nr; i++) {
struct radix_tree_node *node;
void **slot;
-@@ -171,8 +175,9 @@ static void page_cache_tree_delete(struc
+@@ -162,8 +166,9 @@ static void page_cache_tree_delete(struc
radix_tree_clear_tags(&mapping->page_tree, node, slot);
__radix_tree_replace(&mapping->page_tree, node, slot, shadow,
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&mapping->tree_lock);
--- a/mm/workingset.c
+++ b/mm/workingset.c
-@@ -337,9 +337,10 @@ void workingset_activation(struct page *
+@@ -338,9 +338,10 @@ void workingset_activation(struct page *
* point where they would still be useful.
*/
@@ -105,7 +105,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct address_space *mapping = private;
-@@ -357,10 +358,10 @@ void workingset_update_node(struct radix
+@@ -358,10 +359,10 @@ void workingset_update_node(struct radix
*/
if (node->count && node->count == node->exceptional) {
if (list_empty(&node->private_list))
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -372,9 +373,9 @@ static unsigned long count_shadow_nodes(
+@@ -373,9 +374,9 @@ static unsigned long count_shadow_nodes(
unsigned long cache;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -131,7 +131,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Approximate a reasonable limit for the radix tree nodes
-@@ -474,15 +475,15 @@ static enum lru_status shadow_lru_isolat
+@@ -475,15 +476,15 @@ static enum lru_status shadow_lru_isolat
goto out_invalid;
inc_lruvec_page_state(virt_to_page(node), WORKINGSET_NODERECLAIM);
__radix_tree_delete_node(&mapping->page_tree, node,
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_lock(lru_lock);
return ret;
}
-@@ -493,9 +494,9 @@ static unsigned long scan_shadow_nodes(s
+@@ -494,9 +495,9 @@ static unsigned long scan_shadow_nodes(s
unsigned long ret;
/* list_lru lock nests inside IRQ-safe mapping->tree_lock */
@@ -163,7 +163,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -533,7 +534,7 @@ static int __init workingset_init(void)
+@@ -534,7 +535,7 @@ static int __init workingset_init(void)
pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
timestamp_bits, max_order, bucket_order);
@@ -172,7 +172,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
goto err;
ret = register_shrinker(&workingset_shadow_shrinker);
-@@ -541,7 +542,7 @@ static int __init workingset_init(void)
+@@ -542,7 +543,7 @@ static int __init workingset_init(void)
goto err_list_lru;
return 0;
err_list_lru:
diff --git a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
index 41f9e21496be..578138506a7f 100644
--- a/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
+++ b/patches/mm_zsmalloc_copy_with_get_cpu_var_and_locking.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool is_zspage_isolated(struct zspage *zspage)
{
-@@ -895,7 +933,13 @@ static unsigned long location_to_obj(str
+@@ -898,7 +936,13 @@ static unsigned long location_to_obj(str
static unsigned long handle_to_obj(unsigned long handle)
{
@@ -129,7 +129,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static unsigned long obj_to_head(struct page *page, void *obj)
-@@ -909,22 +953,46 @@ static unsigned long obj_to_head(struct
+@@ -912,22 +956,46 @@ static unsigned long obj_to_head(struct
static inline int testpin_tag(unsigned long handle)
{
@@ -176,7 +176,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void reset_page(struct page *page)
-@@ -1362,7 +1430,7 @@ void *zs_map_object(struct zs_pool *pool
+@@ -1365,7 +1433,7 @@ void *zs_map_object(struct zs_pool *pool
class = pool->size_class[class_idx];
off = (class->size * obj_idx) & ~PAGE_MASK;
@@ -185,7 +185,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
area->vm_mm = mm;
if (off + class->size <= PAGE_SIZE) {
/* this object is contained entirely within a page */
-@@ -1416,7 +1484,7 @@ void zs_unmap_object(struct zs_pool *poo
+@@ -1419,7 +1487,7 @@ void zs_unmap_object(struct zs_pool *poo
__zs_unmap_object(area, pages, off, class->size);
}
diff --git a/patches/move_sched_delayed_work_to_helper.patch b/patches/move_sched_delayed_work_to_helper.patch
index 4d8783c3884e..29b2b852e10a 100644
--- a/patches/move_sched_delayed_work_to_helper.patch
+++ b/patches/move_sched_delayed_work_to_helper.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
-@@ -17,6 +17,7 @@
+@@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/rtc.h>
#include <linux/math64.h>
@@ -34,7 +34,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "ntp_internal.h"
#include "timekeeping_internal.h"
-@@ -568,10 +569,35 @@ static void sync_cmos_clock(struct work_
+@@ -569,10 +570,35 @@ static void sync_cmos_clock(struct work_
&sync_cmos_work, timespec64_to_jiffies(&next));
}
diff --git a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
index 7531bfe218e7..4bb4359086ee 100644
--- a/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
+++ b/patches/net-Have-__napi_schedule_irqoff-disable-interrupts-o.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5196,6 +5196,7 @@ bool napi_schedule_prep(struct napi_stru
+@@ -5237,6 +5237,7 @@ bool napi_schedule_prep(struct napi_stru
}
EXPORT_SYMBOL(napi_schedule_prep);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* __napi_schedule_irqoff - schedule for receive
* @n: entry to schedule
-@@ -5207,6 +5208,7 @@ void __napi_schedule_irqoff(struct napi_
+@@ -5248,6 +5249,7 @@ void __napi_schedule_irqoff(struct napi_
____napi_schedule(this_cpu_ptr(&softnet_data), n);
}
EXPORT_SYMBOL(__napi_schedule_irqoff);
diff --git a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
index f240506e3db4..27645fc30081 100644
--- a/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
+++ b/patches/net-Qdisc-use-a-seqlock-instead-seqcount.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -481,6 +481,15 @@ static inline void write_seqlock(seqlock
+@@ -482,6 +482,15 @@ static inline void write_seqlock(seqlock
__raw_write_seqcount_begin(&sl->seqcount);
}
@@ -40,7 +40,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__raw_write_seqcount_end(&sl->seqcount);
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
-@@ -5,6 +5,7 @@
+@@ -6,6 +6,7 @@
#include <linux/socket.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
-@@ -35,11 +36,11 @@ int gnet_stats_start_copy_compat(struct
+@@ -36,11 +37,11 @@ int gnet_stats_start_copy_compat(struct
spinlock_t *lock, struct gnet_dump *d,
int padattr);
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
-@@ -56,13 +57,13 @@ int gen_new_estimator(struct gnet_stats_
+@@ -57,13 +58,13 @@ int gen_new_estimator(struct gnet_stats_
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
spinlock_t *stats_lock,
@@ -98,15 +98,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
-@@ -12,6 +12,7 @@
+@@ -10,6 +10,7 @@
+ #include <linux/percpu.h>
+ #include <linux/dynamic_queue_limits.h>
+ #include <linux/list.h>
++#include <net/net_seq_lock.h>
#include <linux/refcount.h>
+ #include <linux/workqueue.h>
#include <net/gen_stats.h>
- #include <net/rtnetlink.h>
-+#include <net/net_seq_lock.h>
-
- struct Qdisc_ops;
- struct qdisc_walker;
-@@ -89,7 +90,7 @@ struct Qdisc {
+@@ -90,7 +91,7 @@ struct Qdisc {
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
struct qdisc_skb_head q;
struct gnet_stats_basic_packed bstats;
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_queue qstats;
unsigned long state;
struct Qdisc *next_sched;
-@@ -108,13 +109,22 @@ static inline void qdisc_refcount_inc(st
+@@ -109,13 +110,22 @@ static inline void qdisc_refcount_inc(st
refcount_inc(&qdisc->refcnt);
}
@@ -139,7 +139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (qdisc_is_running(qdisc))
return false;
/* Variant of write_seqcount_begin() telling lockdep a trylock
-@@ -123,11 +133,16 @@ static inline bool qdisc_run_begin(struc
+@@ -124,11 +134,16 @@ static inline bool qdisc_run_begin(struc
raw_write_seqcount_begin(&qdisc->running);
seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
return true;
@@ -156,7 +156,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
-@@ -337,7 +352,7 @@ static inline spinlock_t *qdisc_root_sle
+@@ -338,7 +353,7 @@ static inline spinlock_t *qdisc_root_sle
return qdisc_lock(root);
}
@@ -229,7 +229,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct gnet_stats_basic_packed *b)
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
-@@ -988,7 +988,7 @@ static struct Qdisc *qdisc_create(struct
+@@ -1081,7 +1081,7 @@ static struct Qdisc *qdisc_create(struct
rcu_assign_pointer(sch->stab, stab);
}
if (tca[TCA_RATE]) {
@@ -240,7 +240,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (sch->flags & TCQ_F_MQROOT)
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -425,7 +425,11 @@ struct Qdisc noop_qdisc = {
+@@ -429,7 +429,11 @@ struct Qdisc noop_qdisc = {
.ops = &noop_qdisc_ops,
.q.lock = __SPIN_LOCK_UNLOCKED(noop_qdisc.q.lock),
.dev_queue = &noop_netdev_queue,
@@ -252,7 +252,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
.busylock = __SPIN_LOCK_UNLOCKED(noop_qdisc.busylock),
};
EXPORT_SYMBOL(noop_qdisc);
-@@ -624,9 +628,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
+@@ -628,9 +632,17 @@ struct Qdisc *qdisc_alloc(struct netdev_
lockdep_set_class(&sch->busylock,
dev->qdisc_tx_busylock ?: &qdisc_tx_busylock);
diff --git a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
index 2103a23e8ec3..0dc9f7f2f066 100644
--- a/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
+++ b/patches/net-add-back-the-missing-serialization-in-ip_send_un.patch
@@ -50,7 +50,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include <net/net_namespace.h>
#include <net/icmp.h>
-@@ -582,6 +583,7 @@ void tcp_v4_send_check(struct sock *sk,
+@@ -580,6 +581,7 @@ void tcp_v4_send_check(struct sock *sk,
}
EXPORT_SYMBOL(tcp_v4_send_check);
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This routine will send an RST to the other tcp.
*
-@@ -710,6 +712,7 @@ static void tcp_v4_send_reset(const stru
+@@ -709,6 +711,7 @@ static void tcp_v4_send_reset(const stru
arg.tos = ip_hdr(skb)->tos;
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -719,6 +722,7 @@ static void tcp_v4_send_reset(const stru
+@@ -718,6 +721,7 @@ static void tcp_v4_send_reset(const stru
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
local_bh_enable();
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_TCP_MD5SIG
out:
-@@ -796,6 +800,7 @@ static void tcp_v4_send_ack(const struct
+@@ -795,6 +799,7 @@ static void tcp_v4_send_ack(const struct
arg.bound_dev_if = oif;
arg.tos = tos;
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
-@@ -804,6 +809,7 @@ static void tcp_v4_send_ack(const struct
+@@ -803,6 +808,7 @@ static void tcp_v4_send_ack(const struct
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
local_bh_enable();
diff --git a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
index 13693942050a..6f1eaf0b7575 100644
--- a/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
+++ b/patches/net-another-local-irq-disable-alloc-atomic-headache.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <net/protocol.h>
#include <net/dst.h>
-@@ -359,6 +360,7 @@ struct napi_alloc_cache {
+@@ -334,6 +335,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -366,10 +368,10 @@ static void *__netdev_alloc_frag(unsigne
+@@ -341,10 +343,10 @@ static void *__netdev_alloc_frag(unsigne
unsigned long flags;
void *data;
@@ -40,7 +40,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return data;
}
-@@ -437,13 +439,13 @@ struct sk_buff *__netdev_alloc_skb(struc
+@@ -412,13 +414,13 @@ struct sk_buff *__netdev_alloc_skb(struc
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
diff --git a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
index 181706640532..011f93c9ca92 100644
--- a/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
+++ b/patches/net-core-cpuhotplug-drain-input_pkt_queue-lockless.patch
@@ -35,7 +35,7 @@ Cc: stable-rt@vger.kernel.org
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -8370,7 +8370,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8422,7 +8422,7 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
diff --git a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
index 2d6cb2251efb..22997993e483 100644
--- a/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
+++ b/patches/net-core-protect-users-of-napi_alloc_cache-against-r.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
-@@ -361,6 +361,7 @@ struct napi_alloc_cache {
+@@ -336,6 +336,7 @@ struct napi_alloc_cache {
static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache);
static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache);
static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock);
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
-@@ -390,9 +391,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
+@@ -365,9 +366,13 @@ EXPORT_SYMBOL(netdev_alloc_frag);
static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
@@ -41,7 +41,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
void *napi_alloc_frag(unsigned int fragsz)
-@@ -486,9 +491,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
+@@ -461,9 +466,10 @@ EXPORT_SYMBOL(__netdev_alloc_skb);
struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
gfp_t gfp_mask)
{
@@ -53,7 +53,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
len += NET_SKB_PAD + NET_IP_ALIGN;
-@@ -506,7 +512,10 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -481,7 +487,10 @@ struct sk_buff *__napi_alloc_skb(struct
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(!data))
return NULL;
-@@ -517,7 +526,7 @@ struct sk_buff *__napi_alloc_skb(struct
+@@ -492,7 +501,7 @@ struct sk_buff *__napi_alloc_skb(struct
}
/* use OR instead of assignment to avoid clearing of bits in mask */
@@ -73,7 +73,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
skb->pfmemalloc = 1;
skb->head_frag = 1;
-@@ -768,23 +777,26 @@ void __consume_stateless_skb(struct sk_b
+@@ -724,23 +733,26 @@ void __consume_stateless_skb(struct sk_b
void __kfree_skb_flush(void)
{
@@ -102,7 +102,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* record skb to CPU local list */
nc->skb_cache[nc->skb_count++] = skb;
-@@ -799,6 +811,7 @@ static inline void _kfree_skb_defer(stru
+@@ -755,6 +767,7 @@ static inline void _kfree_skb_defer(stru
nc->skb_cache);
nc->skb_count = 0;
}
diff --git a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
index fc03714dfd35..aa2e8c055042 100644
--- a/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
+++ b/patches/net-dev-always-take-qdisc-s-busylock-in-__dev_xmit_s.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3162,7 +3162,11 @@ static inline int __dev_xmit_skb(struct
+@@ -3158,7 +3158,11 @@ static inline int __dev_xmit_skb(struct
* This permits qdisc->running owner to get the lock more
* often and dequeue packets faster.
*/
diff --git a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
index ca650f4b9b03..c15a395b35ab 100644
--- a/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
+++ b/patches/net-fix-iptable-xt-write-recseq-begin-rt-fallout.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
-@@ -5,6 +5,7 @@
+@@ -6,6 +6,7 @@
#include <linux/netdevice.h>
#include <linux/static_key.h>
#include <linux/netfilter.h>
@@ -24,7 +24,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <uapi/linux/netfilter/x_tables.h>
/* Test a struct->invflags and a boolean for inequality */
-@@ -337,6 +338,8 @@ void xt_free_table_info(struct xt_table_
+@@ -338,6 +339,8 @@ void xt_free_table_info(struct xt_table_
*/
DECLARE_PER_CPU(seqcount_t, xt_recseq);
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* xt_tee_enabled - true if x_tables needs to handle reentrancy
*
* Enabled if current ip(6)tables ruleset has at least one -j TEE rule.
-@@ -357,6 +360,9 @@ static inline unsigned int xt_write_recs
+@@ -358,6 +361,9 @@ static inline unsigned int xt_write_recs
{
unsigned int addend;
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Low order bit of sequence is set if we already
* called xt_write_recseq_begin().
-@@ -387,6 +393,7 @@ static inline void xt_write_recseq_end(u
+@@ -388,6 +394,7 @@ static inline void xt_write_recseq_end(u
/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
smp_wmb();
__this_cpu_add(xt_recseq.sequence, addend);
@@ -53,14 +53,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/net/netfilter/core.c
+++ b/net/netfilter/core.c
-@@ -22,12 +22,18 @@
+@@ -21,6 +21,7 @@
+ #include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
- #include <linux/slab.h>
+#include <linux/locallock.h>
+ #include <linux/mm.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
- #include <net/sock.h>
+@@ -28,6 +29,11 @@
#include "nf_internals.h"
diff --git a/patches/net-make-devnet_rename_seq-a-mutex.patch b/patches/net-make-devnet_rename_seq-a-mutex.patch
index b254fb6f74b9..122cd3459c99 100644
--- a/patches/net-make-devnet_rename_seq-a-mutex.patch
+++ b/patches/net-make-devnet_rename_seq-a-mutex.patch
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -194,6 +194,7 @@ static unsigned int napi_gen_id = NR_CPU
+@@ -195,6 +195,7 @@ static unsigned int napi_gen_id = NR_CPU
static DEFINE_READ_MOSTLY_HASHTABLE(napi_hash, 8);
static seqcount_t devnet_rename_seq;
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline void dev_base_seq_inc(struct net *net)
{
-@@ -919,7 +920,8 @@ int netdev_get_name(struct net *net, cha
+@@ -920,7 +921,8 @@ int netdev_get_name(struct net *net, cha
strcpy(name, dev->name);
rcu_read_unlock();
if (read_seqcount_retry(&devnet_rename_seq, seq)) {
@@ -39,7 +39,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto retry;
}
-@@ -1188,20 +1190,17 @@ int dev_change_name(struct net_device *d
+@@ -1189,20 +1191,17 @@ int dev_change_name(struct net_device *d
if (dev->flags & IFF_UP)
return -EBUSY;
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (oldname[0] && !strchr(oldname, '%'))
netdev_info(dev, "renamed from %s\n", oldname);
-@@ -1214,11 +1213,12 @@ int dev_change_name(struct net_device *d
+@@ -1215,11 +1214,12 @@ int dev_change_name(struct net_device *d
if (ret) {
memcpy(dev->name, oldname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
@@ -82,7 +82,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
netdev_adjacent_rename_links(dev, oldname);
-@@ -1239,7 +1239,8 @@ int dev_change_name(struct net_device *d
+@@ -1240,7 +1240,8 @@ int dev_change_name(struct net_device *d
/* err >= 0 after dev_alloc_name() or stores the first errno */
if (err >= 0) {
err = ret;
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
memcpy(dev->name, oldname, IFNAMSIZ);
memcpy(oldname, newname, IFNAMSIZ);
dev->name_assign_type = old_assign_type;
-@@ -1252,6 +1253,11 @@ int dev_change_name(struct net_device *d
+@@ -1253,6 +1254,11 @@ int dev_change_name(struct net_device *d
}
return err;
diff --git a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
index 478d1fe810ec..1f81b3171a86 100644
--- a/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
+++ b/patches/net-move-xmit_recursion-to-per-task-variable-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2443,14 +2443,53 @@ void netdev_freemem(struct net_device *d
+@@ -2433,14 +2433,53 @@ void netdev_freemem(struct net_device *d
void synchronize_net(void);
int init_dummy_netdev(struct net_device *dev);
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1096,6 +1096,9 @@ struct task_struct {
+@@ -1107,6 +1107,9 @@ struct task_struct {
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
#endif
@@ -92,7 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct task_struct *oom_reaper_list;
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3225,8 +3225,10 @@ static void skb_update_prio(struct sk_bu
+@@ -3221,8 +3221,10 @@ static void skb_update_prio(struct sk_bu
#define skb_update_prio(skb)
#endif
@@ -103,7 +103,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* dev_loopback_xmit - loop back @skb
-@@ -3467,8 +3469,7 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3463,8 +3465,7 @@ static int __dev_queue_xmit(struct sk_bu
int cpu = smp_processor_id(); /* ok because BHs are off */
if (txq->xmit_lock_owner != cpu) {
@@ -113,7 +113,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto recursion_alert;
skb = validate_xmit_skb(skb, dev);
-@@ -3478,9 +3479,9 @@ static int __dev_queue_xmit(struct sk_bu
+@@ -3474,9 +3475,9 @@ static int __dev_queue_xmit(struct sk_bu
HARD_TX_LOCK(dev, txq, cpu);
if (!netif_xmit_stopped(txq)) {
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
--- a/net/core/filter.c
+++ b/net/core/filter.c
-@@ -1680,7 +1680,7 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1694,7 +1694,7 @@ static inline int __bpf_tx_skb(struct ne
{
int ret;
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
net_crit_ratelimited("bpf: recursion limit reached on datapath, buggy bpf program?\n");
kfree_skb(skb);
return -ENETDOWN;
-@@ -1688,9 +1688,9 @@ static inline int __bpf_tx_skb(struct ne
+@@ -1702,9 +1702,9 @@ static inline int __bpf_tx_skb(struct ne
skb->dev = dev;
diff --git a/patches/net-prevent-abba-deadlock.patch b/patches/net-prevent-abba-deadlock.patch
index 992a12e4abe8..ed9035be543b 100644
--- a/patches/net-prevent-abba-deadlock.patch
+++ b/patches/net-prevent-abba-deadlock.patch
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/sock.c
+++ b/net/core/sock.c
-@@ -2704,12 +2704,11 @@ void lock_sock_nested(struct sock *sk, i
+@@ -2762,12 +2762,11 @@ void lock_sock_nested(struct sock *sk, i
if (sk->sk_lock.owned)
__lock_sock(sk);
sk->sk_lock.owned = 1;
diff --git a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
index 6edeb42a034f..986e2823feb6 100644
--- a/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
+++ b/patches/net-provide-a-way-to-delegate-processing-a-softirq-t.patch
@@ -20,7 +20,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -517,6 +517,14 @@ extern void thread_do_softirq(void);
+@@ -519,6 +519,14 @@ extern void thread_do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
extern void __raise_softirq_irqoff(unsigned int nr);
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void raise_softirq_irqoff(unsigned int nr)
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -5601,7 +5601,7 @@ static __latent_entropy void net_rx_acti
+@@ -5642,7 +5642,7 @@ static __latent_entropy void net_rx_acti
list_splice_tail(&repoll, &list);
list_splice(&list, &sd->poll_list);
if (!list_empty(&sd->poll_list))
diff --git a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
index 24b22d11537f..136412182c42 100644
--- a/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
+++ b/patches/net-sched-dev_deactivate_many-use-msleep-1-instead-o.patch
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
-@@ -926,7 +926,7 @@ void dev_deactivate_many(struct list_hea
+@@ -930,7 +930,7 @@ void dev_deactivate_many(struct list_hea
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
while (some_qdisc_is_busy(dev))
diff --git a/patches/net-take-the-tcp_sk_lock-lock-with-BH-disabled.patch b/patches/net-take-the-tcp_sk_lock-lock-with-BH-disabled.patch
index dea3e8fe68bf..47e28476304a 100644
--- a/patches/net-take-the-tcp_sk_lock-lock-with-BH-disabled.patch
+++ b/patches/net-take-the-tcp_sk_lock-lock-with-BH-disabled.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
-@@ -712,8 +712,8 @@ static void tcp_v4_send_reset(const stru
+@@ -711,8 +711,8 @@ static void tcp_v4_send_reset(const stru
arg.tos = ip_hdr(skb)->tos;
arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
-@@ -721,8 +721,8 @@ static void tcp_v4_send_reset(const stru
+@@ -720,8 +720,8 @@ static void tcp_v4_send_reset(const stru
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_TCP_MD5SIG
out:
-@@ -800,16 +800,16 @@ static void tcp_v4_send_ack(const struct
+@@ -799,16 +799,16 @@ static void tcp_v4_send_ack(const struct
arg.bound_dev_if = oif;
arg.tos = tos;
arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
diff --git a/patches/net-use-cpu-chill.patch b/patches/net-use-cpu-chill.patch
index 9c583c082b0a..ffe94af5b5c3 100644
--- a/patches/net-use-cpu-chill.patch
+++ b/patches/net-use-cpu-chill.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
-@@ -702,7 +703,7 @@ static void prb_retire_rx_blk_timer_expi
+@@ -700,7 +701,7 @@ static void prb_retire_rx_blk_timer_expi
if (BLOCK_NUM_PKTS(pbd)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -964,7 +965,7 @@ static void prb_retire_current_block(str
+@@ -962,7 +963,7 @@ static void prb_retire_current_block(str
if (!(status & TP_STATUS_BLK_TMO)) {
while (atomic_read(&pkc->blk_fill_in_prog)) {
/* Waiting for skb_copy_bits to finish... */
diff --git a/patches/net_disable_NET_RX_BUSY_POLL.patch b/patches/net_disable_NET_RX_BUSY_POLL.patch
index 5d833f6db86b..75bf42ac77c1 100644
--- a/patches/net_disable_NET_RX_BUSY_POLL.patch
+++ b/patches/net_disable_NET_RX_BUSY_POLL.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/net/Kconfig
+++ b/net/Kconfig
-@@ -278,7 +278,7 @@ config CGROUP_NET_CLASSID
+@@ -272,7 +272,7 @@ config CGROUP_NET_CLASSID
config NET_RX_BUSY_POLL
bool
diff --git a/patches/oleg-signal-rt-fix.patch b/patches/oleg-signal-rt-fix.patch
index 18385324b9ee..a9088244cc9b 100644
--- a/patches/oleg-signal-rt-fix.patch
+++ b/patches/oleg-signal-rt-fix.patch
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -149,6 +149,13 @@ static void exit_to_usermode_loop(struct
+@@ -150,6 +150,13 @@ static void exit_to_usermode_loop(struct
if (cached_flags & _TIF_NEED_RESCHED)
schedule();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
-@@ -27,6 +27,19 @@ typedef struct {
+@@ -28,6 +28,19 @@ typedef struct {
#define SA_IA32_ABI 0x02000000u
#define SA_X32_ABI 0x01000000u
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -794,6 +794,10 @@ struct task_struct {
+@@ -795,6 +795,10 @@ struct task_struct {
/* Restored if set_restore_sigmask() was used: */
sigset_t saved_sigmask;
struct sigpending pending;
diff --git a/patches/panic-disable-random-on-rt.patch b/patches/panic-disable-random-on-rt.patch
index 71e256b060f0..c21c7d0160ab 100644
--- a/patches/panic-disable-random-on-rt.patch
+++ b/patches/panic-disable-random-on-rt.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/panic.c
+++ b/kernel/panic.c
-@@ -481,9 +481,11 @@ static u64 oops_id;
+@@ -482,9 +482,11 @@ static u64 oops_id;
static int init_oops_id(void)
{
diff --git a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
index 12489119adef..f144e5c274a5 100644
--- a/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
+++ b/patches/patch-to-introduce-rcu-bh-qs-where-safe-from-softirq.patch
@@ -30,9 +30,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -118,11 +118,7 @@ static inline int rcu_preempt_depth(void
- /* Internal to kernel */
+@@ -117,11 +117,7 @@ static inline int rcu_preempt_depth(void
void rcu_init(void);
+ extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
-#ifdef CONFIG_PREEMPT_RT_FULL
-static inline void rcu_bh_qs(void) { }
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_cpu_starting(unsigned int cpu);
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -246,7 +246,14 @@ void rcu_sched_qs(void)
+@@ -243,7 +243,14 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <uapi/linux/sched/types.h>
#include "../time/tick-internal.h"
#include "../locking/rtmutex_common.h"
-@@ -1285,7 +1286,7 @@ static void rcu_prepare_kthreads(int cpu
+@@ -1299,7 +1300,7 @@ static void rcu_prepare_kthreads(int cpu
#endif /* #else #ifdef CONFIG_RCU_BOOST */
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Check to see if any future RCU-related work will need to be done
-@@ -1301,7 +1302,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1315,7 +1316,9 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
*nextevt = KTIME_MAX;
return rcu_cpu_has_callbacks(NULL);
}
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
* after it.
-@@ -1397,6 +1400,8 @@ static bool __maybe_unused rcu_try_advan
+@@ -1411,6 +1414,8 @@ static bool __maybe_unused rcu_try_advan
return cbs_ready;
}
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
* to invoke. If the CPU has callbacks, try to advance them. Tell the
-@@ -1439,6 +1444,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
+@@ -1453,6 +1458,7 @@ int rcu_needs_cpu(u64 basemono, u64 *nex
*nextevt = basemono + dj * TICK_NSEC;
return 0;
}
diff --git a/patches/peter_zijlstra-frob-rcu.patch b/patches/peter_zijlstra-frob-rcu.patch
index 169d38d2fd53..ff4fb670f094 100644
--- a/patches/peter_zijlstra-frob-rcu.patch
+++ b/patches/peter_zijlstra-frob-rcu.patch
@@ -155,7 +155,7 @@ Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
-@@ -460,7 +460,7 @@ void rcu_read_unlock_special(struct task
+@@ -466,7 +466,7 @@ void rcu_read_unlock_special(struct task
}
/* Hardware IRQ handlers cannot block, complain if they get here. */
diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch
index f479305b5491..d01b42da6bef 100644
--- a/patches/peterz-percpu-rwsem-rt.patch
+++ b/patches/peterz-percpu-rwsem-rt.patch
@@ -18,7 +18,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
--- a/fs/locks.c
+++ b/fs/locks.c
-@@ -935,7 +935,7 @@ static int flock_lock_inode(struct inode
+@@ -945,7 +945,7 @@ static int flock_lock_inode(struct inode
return -ENOMEM;
}
@@ -27,7 +27,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS)
goto find_conflict;
-@@ -976,7 +976,7 @@ static int flock_lock_inode(struct inode
+@@ -986,7 +986,7 @@ static int flock_lock_inode(struct inode
out:
spin_unlock(&ctx->flc_lock);
@@ -36,7 +36,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
if (new_fl)
locks_free_lock(new_fl);
locks_dispose_list(&dispose);
-@@ -1013,7 +1013,7 @@ static int posix_lock_inode(struct inode
+@@ -1023,7 +1023,7 @@ static int posix_lock_inode(struct inode
new_fl2 = locks_alloc_lock();
}
@@ -45,7 +45,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
-@@ -1185,7 +1185,7 @@ static int posix_lock_inode(struct inode
+@@ -1195,7 +1195,7 @@ static int posix_lock_inode(struct inode
}
out:
spin_unlock(&ctx->flc_lock);
@@ -54,7 +54,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
/*
* Free any unused locks.
*/
-@@ -1460,7 +1460,7 @@ int __break_lease(struct inode *inode, u
+@@ -1470,7 +1470,7 @@ int __break_lease(struct inode *inode, u
return error;
}
@@ -63,7 +63,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
-@@ -1512,13 +1512,13 @@ int __break_lease(struct inode *inode, u
+@@ -1522,13 +1522,13 @@ int __break_lease(struct inode *inode, u
locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
@@ -79,7 +79,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
-@@ -1535,7 +1535,7 @@ int __break_lease(struct inode *inode, u
+@@ -1545,7 +1545,7 @@ int __break_lease(struct inode *inode, u
}
out:
spin_unlock(&ctx->flc_lock);
@@ -88,7 +88,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
-@@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
+@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
@@ -97,7 +97,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
-@@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
+@@ -1629,7 +1629,7 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
@@ -106,7 +106,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
}
-@@ -1694,7 +1694,7 @@ generic_add_lease(struct file *filp, lon
+@@ -1704,7 +1704,7 @@ generic_add_lease(struct file *filp, lon
return -EINVAL;
}
@@ -115,7 +115,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags);
-@@ -1765,7 +1765,7 @@ generic_add_lease(struct file *filp, lon
+@@ -1775,7 +1775,7 @@ generic_add_lease(struct file *filp, lon
lease->fl_lmops->lm_setup(lease, priv);
out:
spin_unlock(&ctx->flc_lock);
@@ -124,7 +124,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
-@@ -1788,7 +1788,7 @@ static int generic_delete_lease(struct f
+@@ -1798,7 +1798,7 @@ static int generic_delete_lease(struct f
return error;
}
@@ -133,7 +133,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp &&
-@@ -1801,7 +1801,7 @@ static int generic_delete_lease(struct f
+@@ -1811,7 +1811,7 @@ static int generic_delete_lease(struct f
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
@@ -142,7 +142,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
return error;
}
-@@ -2495,13 +2495,13 @@ locks_remove_lease(struct file *filp, st
+@@ -2535,13 +2535,13 @@ locks_remove_lease(struct file *filp, st
if (list_empty(&ctx->flc_lease))
return;
@@ -160,7 +160,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
}
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
-@@ -28,7 +28,7 @@ static struct percpu_rw_semaphore name =
+@@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name =
extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
@@ -169,7 +169,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
{
might_sleep();
-@@ -46,16 +46,10 @@ static inline void percpu_down_read_pree
+@@ -47,16 +47,10 @@ static inline void percpu_down_read_pree
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
@@ -187,7 +187,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
preempt_enable();
}
-@@ -82,13 +76,9 @@ static inline int percpu_down_read_trylo
+@@ -83,13 +77,9 @@ static inline int percpu_down_read_trylo
return ret;
}
@@ -203,7 +203,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
/*
* Same as in percpu_down_read().
*/
-@@ -101,12 +91,6 @@ static inline void percpu_up_read_preemp
+@@ -102,12 +92,6 @@ static inline void percpu_up_read_preemp
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
diff --git a/patches/pid.h-include-atomic.h.patch b/patches/pid.h-include-atomic.h.patch
index 8803ac8cd388..9510164b0b9a 100644
--- a/patches/pid.h-include-atomic.h.patch
+++ b/patches/pid.h-include-atomic.h.patch
@@ -26,7 +26,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
-@@ -2,6 +2,7 @@
+@@ -3,6 +3,7 @@
#define _LINUX_PID_H
#include <linux/rculist.h>
diff --git a/patches/ping-sysrq.patch b/patches/ping-sysrq.patch
index b167ccce20d2..63a88d3d73c2 100644
--- a/patches/ping-sysrq.patch
+++ b/patches/ping-sysrq.patch
@@ -41,7 +41,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
-@@ -78,6 +78,7 @@ struct netns_ipv4 {
+@@ -79,6 +79,7 @@ struct netns_ipv4 {
int sysctl_icmp_echo_ignore_all;
int sysctl_icmp_echo_ignore_broadcasts;
@@ -104,7 +104,7 @@ Signed-off-by: Carsten Emde <C.Emde@osadl.org>
return true;
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
-@@ -768,6 +768,13 @@ static struct ctl_table ipv4_net_table[]
+@@ -772,6 +772,13 @@ static struct ctl_table ipv4_net_table[]
.proc_handler = proc_dointvec
},
{
diff --git a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
index ed187d3cd3f6..c7e4ae529866 100644
--- a/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
+++ b/patches/posix-timers-thread-posix-cpu-timers-on-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
-@@ -168,6 +168,12 @@ extern struct cred init_cred;
+@@ -163,6 +163,12 @@ extern struct cred init_cred;
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
# define INIT_VTIME(tsk) \
.vtime.seqcount = SEQCNT_ZERO(tsk.vtime.seqcount), \
-@@ -283,6 +289,7 @@ extern struct cred init_cred;
+@@ -277,6 +283,7 @@ extern struct cred init_cred;
INIT_CPU_TIMERS(tsk) \
.pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
.timer_slack_ns = 50000, /* 50 usec default slack */ \
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -744,6 +744,9 @@ struct task_struct {
+@@ -745,6 +745,9 @@ struct task_struct {
#ifdef CONFIG_POSIX_TIMERS
struct task_cputime cputime_expires;
struct list_head cpu_timers[3];
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Process credentials: */
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1481,6 +1481,9 @@ static void rt_mutex_init_task(struct ta
+@@ -1497,6 +1497,9 @@ static void rt_mutex_init_task(struct ta
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
@@ -66,7 +66,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
tsk->cputime_expires.sched_exp = 0;
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
-@@ -2,8 +2,10 @@
+@@ -3,8 +3,10 @@
* Implement CPU time clocks for the POSIX clock interface.
*/
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/posix-timers.h>
#include <linux/errno.h>
#include <linux/math64.h>
-@@ -13,6 +15,7 @@
+@@ -14,6 +16,7 @@
#include <linux/tick.h>
#include <linux/workqueue.h>
#include <linux/compat.h>
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "posix-timers.h"
-@@ -602,7 +605,7 @@ static int posix_cpu_timer_set(struct k_
+@@ -603,7 +606,7 @@ static int posix_cpu_timer_set(struct k_
/*
* Disarm any old timer after extracting its expiry time.
*/
@@ -94,7 +94,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = 0;
old_incr = timer->it.cpu.incr;
-@@ -1035,7 +1038,7 @@ static void posix_cpu_timer_rearm(struct
+@@ -1034,7 +1037,7 @@ static void posix_cpu_timer_rearm(struct
/*
* Now re-arm for the new expiry time.
*/
@@ -103,7 +103,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
-@@ -1120,13 +1123,13 @@ static inline int fastpath_timer_check(s
+@@ -1119,13 +1122,13 @@ static inline int fastpath_timer_check(s
* already updated our counts. We need to check if any timers fire now.
* Interrupts are disabled.
*/
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The fast path checks that there are no expired thread or thread
-@@ -1180,6 +1183,152 @@ void run_posix_cpu_timers(struct task_st
+@@ -1179,6 +1182,152 @@ void run_posix_cpu_timers(struct task_st
}
}
diff --git a/patches/power-disable-highmem-on-rt.patch b/patches/power-disable-highmem-on-rt.patch
index 2034922207fd..359d477c6010 100644
--- a/patches/power-disable-highmem-on-rt.patch
+++ b/patches/power-disable-highmem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -382,7 +382,7 @@ menu "Kernel options"
+@@ -390,7 +390,7 @@ menu "Kernel options"
config HIGHMEM
bool "High memory support"
diff --git a/patches/power-use-generic-rwsem-on-rt.patch b/patches/power-use-generic-rwsem-on-rt.patch
index a84643ef67e5..d1d414f54d92 100644
--- a/patches/power-use-generic-rwsem-on-rt.patch
+++ b/patches/power-use-generic-rwsem-on-rt.patch
@@ -11,7 +11,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -99,10 +99,11 @@ config LOCKDEP_SUPPORT
+@@ -111,10 +111,11 @@ config LOCKDEP_SUPPORT
config RWSEM_GENERIC_SPINLOCK
bool
diff --git a/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch b/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
index 8b2b3a329509..913bb4bcc925 100644
--- a/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
+++ b/patches/powerpc-kvm-Disable-in-kernel-MPIC-emulation-for-PRE.patch
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
-@@ -176,6 +176,7 @@ config KVM_E500MC
+@@ -177,6 +177,7 @@ config KVM_E500MC
config KVM_MPIC
bool "KVM in-kernel MPIC emulation"
depends on KVM && E500
diff --git a/patches/powerpc-preempt-lazy-support.patch b/patches/powerpc-preempt-lazy-support.patch
index cb4bc6fa777a..4286b3d751a9 100644
--- a/patches/powerpc-preempt-lazy-support.patch
+++ b/patches/powerpc-preempt-lazy-support.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
-@@ -203,6 +203,7 @@ config PPC
+@@ -215,6 +215,7 @@ config PPC
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI && !HAVE_HARDLOCKUP_DETECTOR_ARCH
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
select HAVE_SYSCALL_TRACEPOINTS
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
-@@ -35,6 +35,8 @@ struct thread_info {
+@@ -36,6 +36,8 @@ struct thread_info {
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
<0 => BUG */
@@ -34,7 +34,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long local_flags; /* private flags for thread */
#ifdef CONFIG_LIVEPATCH
unsigned long *livepatch_sp;
-@@ -80,8 +82,7 @@ static inline struct thread_info *curren
+@@ -81,8 +83,7 @@ static inline struct thread_info *curren
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_SIGPENDING 1 /* signal pending */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_32BIT 4 /* 32 bit binary */
#define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */
#define TIF_PATCH_PENDING 6 /* pending live patching update */
-@@ -100,6 +101,8 @@ static inline struct thread_info *curren
+@@ -101,6 +102,8 @@ static inline struct thread_info *curren
#if defined(CONFIG_PPC64)
#define TIF_ELF2ABI 18 /* function descriptors must die! */
#endif
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* as above, but as bit values */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
-@@ -119,14 +122,16 @@ static inline struct thread_info *curren
+@@ -120,14 +123,16 @@ static inline struct thread_info *curren
#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
#define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
#define _TIF_NOHZ (1<<TIF_NOHZ)
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
-@@ -844,7 +844,14 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -866,7 +866,14 @@ user_exc_return: /* r10 contains MSR_KE
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
andi. r8,r8,_TIF_NEED_RESCHED
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lwz r3,_MSR(r1)
andi. r0,r3,MSR_EE /* interrupts off? */
beq restore /* don't schedule if so */
-@@ -855,11 +862,11 @@ user_exc_return: /* r10 contains MSR_KE
+@@ -877,11 +884,11 @@ user_exc_return: /* r10 contains MSR_KE
*/
bl trace_hardirqs_off
#endif
@@ -113,7 +113,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
/* And now, to properly rebalance the above, we tell lockdep they
* are being turned back on, which will happen when we return
-@@ -1182,7 +1189,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
+@@ -1204,7 +1211,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRE
#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
do_work: /* r10 contains MSR_KERNEL here */
@@ -122,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
beq do_user_signal
do_resched: /* r10 contains MSR_KERNEL here */
-@@ -1203,7 +1210,7 @@ do_resched: /* r10 contains MSR_KERNEL
+@@ -1225,7 +1232,7 @@ do_resched: /* r10 contains MSR_KERNEL
MTMSRD(r10) /* disable interrupts */
CURRENT_THREAD_INFO(r9, r1)
lwz r9,TI_FLAGS(r9)
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index cada2a5562d8..622f3747f352 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -68,7 +68,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -179,6 +179,20 @@ extern void preempt_count_sub(int val);
+@@ -180,6 +180,20 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT_COUNT
#define preempt_disable() \
-@@ -187,6 +201,12 @@ do { \
+@@ -188,6 +202,12 @@ do { \
barrier(); \
} while (0)
@@ -102,7 +102,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define sched_preempt_enable_no_resched() \
do { \
barrier(); \
-@@ -240,6 +260,13 @@ do { \
+@@ -241,6 +261,13 @@ do { \
__preempt_schedule(); \
} while (0)
@@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* !CONFIG_PREEMPT */
#define preempt_enable() \
do { \
-@@ -247,6 +274,12 @@ do { \
+@@ -248,6 +275,12 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -129,7 +129,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preempt_enable_notrace() \
do { \
barrier(); \
-@@ -313,7 +346,7 @@ do { \
+@@ -314,7 +347,7 @@ do { \
} while (0)
#define preempt_fold_need_resched() \
do { \
@@ -140,7 +140,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1559,6 +1559,44 @@ static inline int test_tsk_need_resched(
+@@ -1600,6 +1600,44 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (task->state & (__TASK_STOPPED | __TASK_TRACED))
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
-@@ -86,7 +86,17 @@ static inline int test_ti_thread_flag(st
+@@ -91,7 +91,17 @@ static inline int test_ti_thread_flag(st
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -208,7 +208,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline int arch_within_stack_frames(const void * const stack,
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
-@@ -63,6 +63,7 @@ struct trace_entry {
+@@ -64,6 +64,7 @@ struct trace_entry {
int pid;
unsigned short migrate_disable;
unsigned short padding;
@@ -282,7 +282,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2436,6 +2478,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2445,6 +2487,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3343,6 +3388,7 @@ static void __sched notrace __schedule(b
+@@ -3362,6 +3407,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -300,7 +300,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3518,6 +3564,30 @@ static void __sched notrace preempt_sche
+@@ -3552,6 +3598,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3532,7 +3602,8 @@ asmlinkage __visible void __sched notrac
+@@ -3566,7 +3636,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3559,6 +3630,9 @@ asmlinkage __visible void __sched notrac
+@@ -3593,6 +3664,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -351,7 +351,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5288,7 +5362,9 @@ void init_idle(struct task_struct *idle,
+@@ -5332,7 +5406,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -362,7 +362,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -6844,6 +6920,7 @@ void migrate_disable(void)
+@@ -6888,6 +6964,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -370,7 +370,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -6911,6 +6988,7 @@ void migrate_enable(void)
+@@ -6955,6 +7032,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -378,7 +378,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -6919,6 +6997,7 @@ void migrate_enable(void)
+@@ -6963,6 +7041,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
@@ -388,7 +388,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(migrate_enable);
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
-@@ -3828,7 +3828,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3840,7 +3840,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
if (delta_exec > ideal_runtime) {
@@ -397,7 +397,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The current task ran long enough, ensure it doesn't get
* re-elected due to buddy favours.
-@@ -3852,7 +3852,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
+@@ -3864,7 +3864,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq
return;
if (delta > ideal_runtime)
@@ -406,7 +406,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void
-@@ -3994,7 +3994,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
+@@ -4006,7 +4006,7 @@ entity_tick(struct cfs_rq *cfs_rq, struc
* validating it and just reschedule.
*/
if (queued) {
@@ -415,7 +415,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
/*
-@@ -4176,7 +4176,7 @@ static void __account_cfs_rq_runtime(str
+@@ -4188,7 +4188,7 @@ static void __account_cfs_rq_runtime(str
* hierarchy can be throttled
*/
if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr))
@@ -424,7 +424,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static __always_inline
-@@ -4825,7 +4825,7 @@ static void hrtick_start_fair(struct rq
+@@ -4837,7 +4837,7 @@ static void hrtick_start_fair(struct rq
if (delta < 0) {
if (rq->curr == p)
@@ -433,7 +433,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return;
}
hrtick_start(rq, delta);
-@@ -6161,7 +6161,7 @@ static void check_preempt_wakeup(struct
+@@ -6230,7 +6230,7 @@ static void check_preempt_wakeup(struct
return;
preempt:
@@ -442,7 +442,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Only set the backward buddy when the current task is still
* on the rq. This can happen when a wakeup gets interleaved
-@@ -8990,7 +8990,7 @@ static void task_fork_fair(struct task_s
+@@ -9084,7 +9084,7 @@ static void task_fork_fair(struct task_s
* 'current' within the tree based on its new key value.
*/
swap(curr->vruntime, se->vruntime);
@@ -451,7 +451,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
se->vruntime -= cfs_rq->min_vruntime;
-@@ -9014,7 +9014,7 @@ prio_changed_fair(struct rq *rq, struct
+@@ -9108,7 +9108,7 @@ prio_changed_fair(struct rq *rq, struct
*/
if (rq->curr == p) {
if (p->prio > oldprio)
@@ -462,7 +462,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -47,6 +47,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
+@@ -48,6 +48,9 @@ SCHED_FEAT(NONTASK_CAPACITY, true)
#ifdef CONFIG_PREEMPT_RT_FULL
SCHED_FEAT(TTWU_QUEUE, false)
@@ -474,7 +474,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1532,6 +1532,15 @@ extern void init_sched_fair_class(void);
+@@ -1534,6 +1534,15 @@ extern void init_sched_fair_class(void);
extern void resched_curr(struct rq *rq);
extern void resched_cpu(int cpu);
@@ -492,7 +492,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -2126,6 +2126,7 @@ tracing_generic_entry_update(struct trac
+@@ -2129,6 +2129,7 @@ tracing_generic_entry_update(struct trac
struct task_struct *tsk = current;
entry->preempt_count = pc & 0xff;
@@ -500,7 +500,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
entry->pid = (tsk) ? tsk->pid : 0;
entry->flags =
#ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
-@@ -2136,7 +2137,8 @@ tracing_generic_entry_update(struct trac
+@@ -2139,7 +2140,8 @@ tracing_generic_entry_update(struct trac
((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
@@ -510,7 +510,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0;
-@@ -3338,15 +3340,17 @@ get_total_entries(struct trace_buffer *b
+@@ -3341,15 +3343,17 @@ get_total_entries(struct trace_buffer *b
static void print_lat_help_header(struct seq_file *m)
{
@@ -537,7 +537,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
-@@ -3382,15 +3386,17 @@ static void print_func_help_header_irq(s
+@@ -3385,15 +3389,17 @@ static void print_func_help_header_irq(s
tgid ? tgid_space : space);
seq_printf(m, "# %s / _----=> need-resched\n",
tgid ? tgid_space : space);
@@ -562,7 +562,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
-@@ -126,6 +126,7 @@ struct kretprobe_trace_entry_head {
+@@ -127,6 +127,7 @@ struct kretprobe_trace_entry_head {
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
@@ -570,7 +570,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*/
enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01,
-@@ -135,6 +136,7 @@ enum trace_flag_type {
+@@ -136,6 +137,7 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x10,
TRACE_FLAG_PREEMPT_RESCHED = 0x20,
TRACE_FLAG_NMI = 0x40,
diff --git a/patches/preempt-nort-rt-variants.patch b/patches/preempt-nort-rt-variants.patch
index 3eeaeb360737..c9a87c546ee3 100644
--- a/patches/preempt-nort-rt-variants.patch
+++ b/patches/preempt-nort-rt-variants.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -180,7 +180,11 @@ do { \
+@@ -181,7 +181,11 @@ do { \
preempt_count_dec(); \
} while (0)
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -297,6 +301,18 @@ do { \
+@@ -298,6 +302,18 @@ do { \
set_preempt_need_resched(); \
} while (0)
diff --git a/patches/printk-kill.patch b/patches/printk-kill.patch
index 4c2f4e1e65e7..b281b87b6466 100644
--- a/patches/printk-kill.patch
+++ b/patches/printk-kill.patch
@@ -9,12 +9,12 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
include/linux/printk.h | 2 +
kernel/printk/printk.c | 79 ++++++++++++++++++++++++++++++++++++-------------
- kernel/watchdog_hld.c | 9 +++++
- 3 files changed, 70 insertions(+), 20 deletions(-)
+ kernel/watchdog_hld.c | 10 ++++++
+ 3 files changed, 71 insertions(+), 20 deletions(-)
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
-@@ -141,9 +141,11 @@ struct va_format {
+@@ -142,9 +142,11 @@ struct va_format {
#ifdef CONFIG_EARLY_PRINTK
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -87,8 +87,8 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PRINTK
DECLARE_WAIT_QUEUE_HEAD(log_wait);
/* the next printk record to read by syslog(READ) or /proc/kmsg */
-@@ -1704,6 +1756,13 @@ asmlinkage int vprintk_emit(int facility
- int printed_len = 0;
+@@ -1692,6 +1744,13 @@ asmlinkage int vprintk_emit(int facility
+ int printed_len;
bool in_sched = false;
+ /*
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (level == LOGLEVEL_SCHED) {
level = LOGLEVEL_DEFAULT;
in_sched = true;
-@@ -1875,26 +1934,6 @@ static bool suppress_message_printing(in
+@@ -1863,26 +1922,6 @@ static bool suppress_message_printing(in
#endif /* CONFIG_PRINTK */
@@ -130,15 +130,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
-@@ -21,6 +21,7 @@
- static DEFINE_PER_CPU(bool, hard_watchdog_warn);
+@@ -24,6 +24,8 @@ static DEFINE_PER_CPU(bool, hard_watchdo
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
+ static DEFINE_PER_CPU(struct perf_event *, dead_event);
+static DEFINE_RAW_SPINLOCK(watchdog_output_lock);
++
+ static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
-
-@@ -132,6 +133,13 @@ static void watchdog_overflow_callback(s
+@@ -134,6 +136,13 @@ static void watchdog_overflow_callback(s
/* only print hardlockups once */
if (__this_cpu_read(hard_watchdog_warn) == true)
return;
@@ -152,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu);
print_modules();
-@@ -149,6 +157,7 @@ static void watchdog_overflow_callback(s
+@@ -151,6 +160,7 @@ static void watchdog_overflow_callback(s
!test_and_set_bit(0, &hardlockup_allcpu_dumped))
trigger_allbutself_cpu_backtrace();
diff --git a/patches/printk-rt-aware.patch b/patches/printk-rt-aware.patch
index 77221aa95b5d..fab124dd2e26 100644
--- a/patches/printk-rt-aware.patch
+++ b/patches/printk-rt-aware.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1629,6 +1629,7 @@ static void call_console_drivers(const c
+@@ -1617,6 +1617,7 @@ static void call_console_drivers(const c
if (!console_drivers)
return;
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_console(con) {
if (exclusive_console && con != exclusive_console)
continue;
-@@ -1644,6 +1645,7 @@ static void call_console_drivers(const c
+@@ -1632,6 +1633,7 @@ static void call_console_drivers(const c
else
con->write(con, text, len);
}
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
int printk_delay_msec __read_mostly;
-@@ -1826,12 +1828,22 @@ asmlinkage int vprintk_emit(int facility
+@@ -1814,12 +1816,22 @@ asmlinkage int vprintk_emit(int facility
/* If called from the scheduler, we can not call up(). */
if (!in_sched) {
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
console_unlock();
}
-@@ -2287,10 +2299,15 @@ void console_unlock(void)
+@@ -2275,10 +2287,15 @@ void console_unlock(void)
console_seq++;
raw_spin_unlock(&logbuf_lock);
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 1822a6c1cbb3..b1ff6d10ba7a 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -31,8 +31,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -100,12 +100,8 @@ struct task_group;
- TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
- __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
+ __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
+ TASK_PARKED)
-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
-
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define task_contributes_to_load(task) ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
(task->flags & PF_FROZEN) == 0 && \
(task->state & TASK_NOLOAD) == 0)
-@@ -1552,6 +1548,51 @@ static inline int test_tsk_need_resched(
+@@ -1593,6 +1589,51 @@ static inline int test_tsk_need_resched(
return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
}
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1350,6 +1350,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1359,6 +1359,18 @@ int migrate_swap(struct task_struct *cur
return ret;
}
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1394,7 +1406,7 @@ unsigned long wait_task_inactive(struct
+@@ -1403,7 +1415,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1409,7 +1421,8 @@ unsigned long wait_task_inactive(struct
+@@ -1418,7 +1430,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/radix-tree-use-local-locks.patch b/patches/radix-tree-use-local-locks.patch
index 723777307bc0..271b7a0ad806 100644
--- a/patches/radix-tree-use-local-locks.patch
+++ b/patches/radix-tree-use-local-locks.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
-@@ -111,10 +111,7 @@ static inline bool idr_is_empty(const st
+@@ -167,10 +167,7 @@ static inline bool idr_is_empty(const st
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
*/
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static unsigned radix_tree_load_root(const struct radix_tree_root *root,
struct radix_tree_node **nodep, unsigned long *maxindex)
{
-@@ -2104,10 +2112,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
+@@ -2105,10 +2113,16 @@ EXPORT_SYMBOL(radix_tree_tagged);
void idr_preload(gfp_t gfp_mask)
{
if (__radix_tree_preload(gfp_mask, IDR_PRELOAD_SIZE))
@@ -154,7 +154,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* ida_pre_get - reserve resources for ida allocation
* @ida: ida handle
-@@ -2124,7 +2138,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
+@@ -2125,7 +2139,7 @@ int ida_pre_get(struct ida *ida, gfp_t g
* to return to the ida_pre_get() step.
*/
if (!__radix_tree_preload(gfp, IDA_PRELOAD_SIZE))
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index ae9807aacd7c..950e102bf062 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -69,6 +69,7 @@ struct irq_desc {
+@@ -70,6 +70,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
const struct cpumask *percpu_affinity;
--- a/include/linux/random.h
+++ b/include/linux/random.h
-@@ -31,7 +31,7 @@ static inline void add_latent_entropy(vo
+@@ -32,7 +32,7 @@ static inline void add_latent_entropy(vo
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value) __latent_entropy;
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
note_interrupt(desc, retval);
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
-@@ -1003,6 +1003,12 @@ static int irq_thread(void *data)
+@@ -1027,6 +1027,12 @@ static int irq_thread(void *data)
if (action_ret == IRQ_WAKE_THREAD)
irq_wake_secondary(desc, action);
diff --git a/patches/rbtree-include-rcu.h-because-we-use-it.patch b/patches/rbtree-include-rcu.h-because-we-use-it.patch
index 93595e83829e..3070854defd0 100644
--- a/patches/rbtree-include-rcu.h-because-we-use-it.patch
+++ b/patches/rbtree-include-rcu.h-because-we-use-it.patch
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Please note - only struct rb_augment_callbacks and the prototypes for
--- a/include/linux/rbtree_latch.h
+++ b/include/linux/rbtree_latch.h
-@@ -34,6 +34,7 @@
+@@ -35,6 +35,7 @@
#include <linux/rbtree.h>
#include <linux/seqlock.h>
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index 6b345126b56d..8afe96b18bc5 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -3042,18 +3047,17 @@ static void
+@@ -2946,18 +2951,17 @@ static void
/*
* Do RCU core processing for the current CPU.
*/
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the specified type of RCU
* does not support RCU priority boosting, just do a direct call,
-@@ -3065,18 +3069,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2969,18 +2973,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -4262,7 +4353,6 @@ void __init rcu_init(void)
+@@ -4221,7 +4312,6 @@ void __init rcu_init(void)
if (dump_tree)
rcu_dump_rcu_node_tree(&rcu_sched_state);
__rcu_init_preempt();
@@ -178,7 +178,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* We don't need protection against CPU-hotplug here because
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -447,12 +447,10 @@ extern struct rcu_state rcu_preempt_stat
+@@ -438,12 +438,10 @@ extern struct rcu_state rcu_preempt_stat
int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
bool rcu_eqs_special_set(int cpu);
@@ -191,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef RCU_TREE_NONCORE
-@@ -472,10 +470,9 @@ void call_rcu(struct rcu_head *head, rcu
+@@ -463,10 +461,9 @@ void call_rcu(struct rcu_head *head, rcu
static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
@@ -245,7 +245,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_RCU_NOCB_CPU
static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */
-@@ -668,15 +645,6 @@ static void rcu_preempt_check_callbacks(
+@@ -682,15 +659,6 @@ static void rcu_preempt_check_callbacks(
t->rcu_read_unlock_special.b.need_qs = true;
}
@@ -261,7 +261,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -899,20 +867,23 @@ void exit_rcu(void)
+@@ -913,20 +881,23 @@ void exit_rcu(void)
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -295,7 +295,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Carry out RCU priority boosting on the task indicated by ->exp_tasks
* or ->boost_tasks, advancing the pointer to the next task in the
-@@ -1055,23 +1026,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1069,23 +1040,6 @@ static void rcu_initiate_boost(struct rc
}
/*
@@ -319,7 +319,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Is the current CPU running the RCU-callbacks kthread?
* Caller must have preemption disabled.
*/
-@@ -1125,67 +1079,6 @@ static int rcu_spawn_one_boost_kthread(s
+@@ -1139,67 +1093,6 @@ static int rcu_spawn_one_boost_kthread(s
return 0;
}
@@ -387,7 +387,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Set the per-rcu_node kthread's affinity to cover all CPUs that are
* served by the rcu_node in question. The CPU hotplug lock is still
-@@ -1216,26 +1109,12 @@ static void rcu_boost_kthread_setaffinit
+@@ -1230,26 +1123,12 @@ static void rcu_boost_kthread_setaffinit
free_cpumask_var(cm);
}
@@ -414,7 +414,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_for_each_leaf_node(rcu_state_p, rnp)
(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
}
-@@ -1258,11 +1137,6 @@ static void rcu_initiate_boost(struct rc
+@@ -1272,11 +1151,6 @@ static void rcu_initiate_boost(struct rc
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
}
diff --git a/patches/rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch b/patches/rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch
index 1e6c829100d7..9c3d3c52ab45 100644
--- a/patches/rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch
+++ b/patches/rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch
@@ -38,7 +38,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Control variables for per-CPU and per-rcu_node kthreads. These
* handle all flavors of RCU.
-@@ -523,7 +522,7 @@ void rcu_read_unlock_special(struct task
+@@ -530,7 +529,7 @@ void rcu_read_unlock_special(struct task
/* Unboost if we were boosted. */
if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
diff --git a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
index b687c5275d54..13b4e6bebbeb 100644
--- a/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
+++ b/patches/rcu-disable-rcu-fast-no-hz-on-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/rcu/Kconfig
+++ b/kernel/rcu/Kconfig
-@@ -173,7 +173,7 @@ config RCU_FANOUT_LEAF
+@@ -172,7 +172,7 @@ config RCU_FANOUT_LEAF
config RCU_FAST_NO_HZ
bool "Accelerate last non-dyntick-idle CPU's grace periods"
diff --git a/patches/rcu-make-RCU_BOOST-default-on-RT.patch b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
index b0694274f767..bda5c6289f0a 100644
--- a/patches/rcu-make-RCU_BOOST-default-on-RT.patch
+++ b/patches/rcu-make-RCU_BOOST-default-on-RT.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
help
This option needs to be enabled if you wish to make
expert-level adjustments to RCU configuration. By default,
-@@ -192,7 +192,7 @@ config RCU_FAST_NO_HZ
+@@ -191,7 +191,7 @@ config RCU_FAST_NO_HZ
config RCU_BOOST
bool "Enable RCU priority boosting"
depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
diff --git a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
index b0237ff4c0e2..839ec219e2b2 100644
--- a/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
+++ b/patches/rcu-merge-rcu-bh-into-rcu-preempt-for-rt.patch
@@ -46,10 +46,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void);
- void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
-@@ -114,7 +118,11 @@ static inline int rcu_preempt_depth(void
- /* Internal to kernel */
+ void rcu_barrier_tasks(void);
+@@ -113,7 +117,11 @@ static inline int rcu_preempt_depth(void
void rcu_init(void);
+ extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline void rcu_bh_qs(void) { }
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu);
void rcu_cpu_starting(unsigned int cpu);
-@@ -258,7 +266,14 @@ extern struct lockdep_map rcu_sched_lock
+@@ -263,7 +271,14 @@ extern struct lockdep_map rcu_sched_lock
extern struct lockdep_map rcu_callback_map;
int debug_lockdep_rcu_enabled(void);
int rcu_read_lock_held(void);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int rcu_read_lock_sched_held(void);
#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-@@ -645,10 +660,14 @@ static inline void rcu_read_unlock(void)
+@@ -667,10 +682,14 @@ static inline void rcu_read_unlock(void)
static inline void rcu_read_lock_bh(void)
{
local_bh_disable();
@@ -89,7 +89,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -658,10 +677,14 @@ static inline void rcu_read_lock_bh(void
+@@ -680,10 +699,14 @@ static inline void rcu_read_lock_bh(void
*/
static inline void rcu_read_unlock_bh(void)
{
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cond_synchronize_rcu(unsigned long oldstate);
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
-@@ -550,18 +550,26 @@ static inline void show_rcu_gp_kthreads(
+@@ -462,18 +462,26 @@ static inline void show_rcu_gp_kthreads(
extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum;
unsigned long rcu_batches_started(void);
@@ -164,7 +164,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_RCU_NOCB_CPU
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
-@@ -416,6 +416,7 @@ static struct rcu_torture_ops rcu_ops =
+@@ -417,6 +417,7 @@ static struct rcu_torture_ops rcu_ops =
.name = "rcu"
};
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Definitions for rcu_bh torture testing.
*/
-@@ -455,6 +456,12 @@ static struct rcu_torture_ops rcu_bh_ops
+@@ -456,6 +457,12 @@ static struct rcu_torture_ops rcu_bh_ops
.name = "rcu_bh"
};
@@ -187,7 +187,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* The names includes "busted", and they really means it!
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -246,6 +246,7 @@ void rcu_sched_qs(void)
+@@ -243,6 +243,7 @@ void rcu_sched_qs(void)
this_cpu_ptr(&rcu_sched_data), true);
}
@@ -195,7 +195,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void rcu_bh_qs(void)
{
RCU_LOCKDEP_WARN(preemptible(), "rcu_bh_qs() invoked with preemption enabled!!!");
-@@ -256,6 +257,7 @@ void rcu_bh_qs(void)
+@@ -253,6 +254,7 @@ void rcu_bh_qs(void)
__this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false);
}
}
@@ -203,7 +203,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Steal a bit from the bottom of ->dynticks for idle entry/exit
-@@ -567,11 +569,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
+@@ -564,11 +566,13 @@ EXPORT_SYMBOL_GPL(rcu_batches_started_sc
/*
* Return the number of RCU BH batches started thus far for debug & stats.
*/
@@ -217,7 +217,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU batches completed thus far for debug & stats.
-@@ -591,6 +595,7 @@ unsigned long rcu_batches_completed_sche
+@@ -588,6 +592,7 @@ unsigned long rcu_batches_completed_sche
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
@@ -225,7 +225,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU BH batches completed thus far for debug & stats.
*/
-@@ -599,6 +604,7 @@ unsigned long rcu_batches_completed_bh(v
+@@ -596,6 +601,7 @@ unsigned long rcu_batches_completed_bh(v
return rcu_bh_state.completed;
}
EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Return the number of RCU expedited batches completed thus far for
-@@ -622,6 +628,7 @@ unsigned long rcu_exp_batches_completed_
+@@ -619,6 +625,7 @@ unsigned long rcu_exp_batches_completed_
}
EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
@@ -241,7 +241,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state.
*/
-@@ -640,6 +647,13 @@ void rcu_bh_force_quiescent_state(void)
+@@ -637,6 +644,13 @@ void rcu_bh_force_quiescent_state(void)
}
EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
@@ -255,7 +255,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Force a quiescent state for RCU-sched.
*/
-@@ -690,9 +704,11 @@ void rcutorture_get_gp_data(enum rcutort
+@@ -687,9 +701,11 @@ void rcutorture_get_gp_data(enum rcutort
case RCU_FLAVOR:
rsp = rcu_state_p;
break;
@@ -267,7 +267,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
case RCU_SCHED_FLAVOR:
rsp = &rcu_sched_state;
break;
-@@ -3208,6 +3224,7 @@ void call_rcu_sched(struct rcu_head *hea
+@@ -3113,6 +3129,7 @@ void call_rcu_sched(struct rcu_head *hea
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -275,7 +275,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
-@@ -3234,6 +3251,7 @@ void call_rcu_bh(struct rcu_head *head,
+@@ -3140,6 +3157,7 @@ void call_rcu_bh(struct rcu_head *head,
__call_rcu(head, func, &rcu_bh_state, -1, 0);
}
EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -283,7 +283,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Queue an RCU callback for lazy invocation after a grace period.
-@@ -3319,6 +3337,7 @@ void synchronize_sched(void)
+@@ -3225,6 +3243,7 @@ void synchronize_sched(void)
}
EXPORT_SYMBOL_GPL(synchronize_sched);
@@ -291,7 +291,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
*
-@@ -3345,6 +3364,7 @@ void synchronize_rcu_bh(void)
+@@ -3251,6 +3270,7 @@ void synchronize_rcu_bh(void)
wait_rcu_gp(call_rcu_bh);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* get_state_synchronize_rcu - Snapshot current RCU state
-@@ -3692,6 +3712,7 @@ static void _rcu_barrier(struct rcu_stat
+@@ -3601,6 +3621,7 @@ static void _rcu_barrier(struct rcu_stat
mutex_unlock(&rsp->barrier_mutex);
}
@@ -307,7 +307,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
*/
-@@ -3700,6 +3721,7 @@ void rcu_barrier_bh(void)
+@@ -3609,6 +3630,7 @@ void rcu_barrier_bh(void)
_rcu_barrier(&rcu_bh_state);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
@@ -315,7 +315,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
-@@ -4225,7 +4247,9 @@ void __init rcu_init(void)
+@@ -4184,7 +4206,9 @@ void __init rcu_init(void)
rcu_bootup_announce();
rcu_init_geometry();
@@ -327,7 +327,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_dump_rcu_node_tree(&rcu_sched_state);
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
-@@ -436,7 +436,9 @@ extern struct list_head rcu_struct_flavo
+@@ -427,7 +427,9 @@ extern struct list_head rcu_struct_flavo
*/
extern struct rcu_state rcu_sched_state;
diff --git a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
index 9e8e79d3c782..a781890ab49e 100644
--- a/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
+++ b/patches/rcutree-rcu_bh_qs-disable-irq-while-calling-rcu_pree.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -251,7 +251,12 @@ static void rcu_preempt_qs(void);
+@@ -248,7 +248,12 @@ static void rcu_preempt_qs(void);
void rcu_bh_qs(void)
{
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index 0122e29729c2..a834577def32 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6807,6 +6807,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -6851,6 +6851,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -6827,10 +6868,9 @@ void migrate_disable(void)
+@@ -6871,10 +6912,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -6859,9 +6899,8 @@ void migrate_enable(void)
+@@ -6903,9 +6943,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rt-introduce-cpu-chill.patch b/patches/rt-introduce-cpu-chill.patch
index 282b10ea751d..5b6cc45cc131 100644
--- a/patches/rt-introduce-cpu-chill.patch
+++ b/patches/rt-introduce-cpu-chill.patch
@@ -87,7 +87,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/delay.h
+++ b/include/linux/delay.h
-@@ -63,4 +63,10 @@ static inline void ssleep(unsigned int s
+@@ -64,4 +64,10 @@ static inline void ssleep(unsigned int s
msleep(seconds * 1000);
}
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* defined(_LINUX_DELAY_H) */
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
-@@ -1825,6 +1825,25 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct
+@@ -1855,6 +1855,25 @@ COMPAT_SYSCALL_DEFINE2(nanosleep, struct
}
#endif
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ unsigned int freeze_flag = current->flags & PF_NOFREEZE;
+
+ current->flags |= PF_NOFREEZE;
-+ hrtimer_nanosleep(&tu, HRTIMER_MODE_REL, CLOCK_MONOTONIC_HARD);
++ hrtimer_nanosleep(&tu, HRTIMER_MODE_REL_HARD, CLOCK_MONOTONIC);
+ if (!freeze_flag)
+ current->flags &= ~PF_NOFREEZE;
+}
diff --git a/patches/rt-local-irq-lock.patch b/patches/rt-local-irq-lock.patch
index f1435ee1ed62..c27d96204455 100644
--- a/patches/rt-local-irq-lock.patch
+++ b/patches/rt-local-irq-lock.patch
@@ -292,7 +292,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+#endif
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
-@@ -18,6 +18,35 @@
+@@ -19,6 +19,35 @@
#define PERCPU_MODULE_RESERVE 0
#endif
diff --git a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
index 1673b3cfdb46..ee8ee68da7a4 100644
--- a/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
+++ b/patches/rtmutex--Handle-non-enqueued-waiters-gracefully.patch
@@ -21,7 +21,7 @@ Cc: stable-rt@vger.kernel.org
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1729,7 +1729,7 @@ int __rt_mutex_start_proxy_lock(struct r
+@@ -1716,7 +1716,7 @@ int __rt_mutex_start_proxy_lock(struct r
ret = 0;
}
diff --git a/patches/rtmutex-Make-lock_killable-work.patch b/patches/rtmutex-Make-lock_killable-work.patch
index aad1dd8f6a68..882bb17e3262 100644
--- a/patches/rtmutex-Make-lock_killable-work.patch
+++ b/patches/rtmutex-Make-lock_killable-work.patch
@@ -16,7 +16,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1213,18 +1213,13 @@ static int __sched
+@@ -1201,18 +1201,13 @@ static int __sched
if (try_to_take_rt_mutex(lock, current, waiter))
break;
diff --git a/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch b/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch
index df98a55a3fdb..e61af721b5e9 100644
--- a/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch
+++ b/patches/rtmutex-Provide-rt_mutex_slowlock_locked.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1256,35 +1256,16 @@ static void rt_mutex_handle_deadlock(int
+@@ -1244,35 +1244,16 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1292,17 +1273,18 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1280,17 +1261,18 @@ rt_mutex_slowlock(struct rt_mutex *lock,
if (unlikely(timeout))
hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1310,6 +1292,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1298,6 +1280,34 @@ rt_mutex_slowlock(struct rt_mutex *lock,
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -127,6 +127,12 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -157,6 +157,12 @@ extern bool __rt_mutex_futex_unlock(stru
struct wake_q_head *wqh);
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch
index defdb62ebfbf..859ddbd9beb7 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/rtmutex-add-sleeping-lock-implementation.patch
@@ -6,7 +6,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/kernel.h | 4
- include/linux/rtmutex.h | 20 +
+ include/linux/rtmutex.h | 21 +
include/linux/sched.h | 9
include/linux/sched/wake_q.h | 27 ++
include/linux/spinlock_rt.h | 159 +++++++++++++
@@ -16,13 +16,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kernel/locking/rtmutex.c | 449 ++++++++++++++++++++++++++++++++++----
kernel/locking/rtmutex_common.h | 15 +
kernel/sched/core.c | 28 +-
- 11 files changed, 712 insertions(+), 59 deletions(-)
+ 11 files changed, 713 insertions(+), 59 deletions(-)
create mode 100644 include/linux/spinlock_rt.h
create mode 100644 include/linux/spinlock_types_rt.h
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -203,6 +203,9 @@ extern int _cond_resched(void);
+@@ -225,6 +225,9 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
-@@ -210,6 +213,7 @@ extern int _cond_resched(void);
+@@ -232,6 +235,7 @@ extern int _cond_resched(void);
static inline void __might_sleep(const char *file, int line,
int preempt_offset) { }
# define might_sleep() do { might_resched(); } while (0)
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -13,11 +13,15 @@
+@@ -14,11 +14,15 @@
#define __LINUX_RT_MUTEX_H
#include <linux/linkage.h>
@@ -60,16 +60,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* The rt_mutex structure
*
@@ -31,8 +35,8 @@ struct rt_mutex {
- struct rb_root waiters;
- struct rb_node *waiters_leftmost;
+ raw_spinlock_t wait_lock;
+ struct rb_root_cached waiters;
struct task_struct *owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
- const char *name, *file;
+ const char *name, *file;
int line;
void *magic;
-@@ -82,16 +86,22 @@ do { \
+@@ -82,16 +86,23 @@ do { \
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)
#endif
@@ -77,7 +77,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
- , .waiters = RB_ROOT \
+ , .waiters = RB_ROOT_CACHED \
, .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
- __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
@@ -92,6 +92,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
+ , .save_state = 1 }
++
/**
* rt_mutex_is_locked - is the mutex locked
* @lock: the mutex to be queried
@@ -119,7 +120,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
/* Task command name length: */
-@@ -826,6 +834,7 @@ struct task_struct {
+@@ -827,6 +835,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
@@ -129,7 +130,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* PI waiters blocked on a rt_mutex held by this task: */
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
-@@ -46,8 +46,29 @@ static inline void wake_q_init(struct wa
+@@ -47,8 +47,29 @@ static inline void wake_q_init(struct wa
head->lastp = &head->first;
}
@@ -377,7 +378,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -592,6 +592,7 @@ static struct task_struct *dup_task_stru
+@@ -600,6 +600,7 @@ static struct task_struct *dup_task_stru
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
tsk->wake_q.next = NULL;
@@ -387,7 +388,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -1411,6 +1411,7 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1432,6 +1432,7 @@ static int wake_futex_pi(u32 __user *uad
struct task_struct *new_owner;
bool postunlock = false;
DEFINE_WAKE_Q(wake_q);
@@ -395,7 +396,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex);
-@@ -1472,13 +1473,13 @@ static int wake_futex_pi(u32 __user *uad
+@@ -1493,13 +1494,13 @@ static int wake_futex_pi(u32 __user *uad
pi_state->owner = new_owner;
raw_spin_unlock(&new_owner->pi_lock);
@@ -412,7 +413,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
}
-@@ -2677,7 +2678,7 @@ static int futex_lock_pi(u32 __user *uad
+@@ -2757,7 +2758,7 @@ static int futex_lock_pi(u32 __user *uad
goto no_block;
}
@@ -421,7 +422,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* On PREEMPT_RT_FULL, when hb->lock becomes an rt_mutex, we must not
-@@ -3043,7 +3044,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3129,7 +3130,7 @@ static int futex_wait_requeue_pi(u32 __u
* The waiter is allocated on our stack, manipulated by the requeue
* code while we sleep on uaddr.
*/
@@ -481,7 +482,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
-@@ -390,6 +416,14 @@ static bool rt_mutex_cond_detect_deadloc
+@@ -378,6 +404,14 @@ static bool rt_mutex_cond_detect_deadloc
return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}
@@ -496,7 +497,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Max number of times we'll walk the boosting chain:
*/
-@@ -715,13 +749,16 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -703,13 +737,16 @@ static int rt_mutex_adjust_prio_chain(st
* follow here. This is the end of the chain we are walking.
*/
if (!rt_mutex_owner(lock)) {
@@ -515,7 +516,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
return 0;
}
-@@ -823,9 +860,11 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -811,9 +848,11 @@ static int rt_mutex_adjust_prio_chain(st
* @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL
@@ -529,7 +530,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -861,12 +900,11 @@ static int try_to_take_rt_mutex(struct r
+@@ -849,12 +888,11 @@ static int try_to_take_rt_mutex(struct r
*/
if (waiter) {
/*
@@ -545,7 +546,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* We can acquire the lock. Remove the waiter from the
* lock waiters tree.
-@@ -884,14 +922,12 @@ static int try_to_take_rt_mutex(struct r
+@@ -872,14 +910,12 @@ static int try_to_take_rt_mutex(struct r
*/
if (rt_mutex_has_waiters(lock)) {
/*
@@ -564,7 +565,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* The current top waiter stays enqueued. We
* don't have to change anything in the lock
-@@ -938,6 +974,309 @@ static int try_to_take_rt_mutex(struct r
+@@ -926,6 +962,309 @@ static int try_to_take_rt_mutex(struct r
return 1;
}
@@ -874,7 +875,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Task blocks on lock.
*
-@@ -1051,6 +1390,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -1039,6 +1378,7 @@ static int task_blocks_on_rt_mutex(struc
* Called with lock->wait_lock held and interrupts disabled.
*/
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
@@ -882,7 +883,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rt_mutex *lock)
{
struct rt_mutex_waiter *waiter;
-@@ -1090,7 +1430,10 @@ static void mark_wakeup_next_waiter(stru
+@@ -1078,7 +1418,10 @@ static void mark_wakeup_next_waiter(stru
* Pairs with preempt_enable() in rt_mutex_postunlock();
*/
preempt_disable();
@@ -894,7 +895,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock(&current->pi_lock);
}
-@@ -1174,21 +1517,22 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1162,21 +1505,22 @@ void rt_mutex_adjust_pi(struct task_stru
return;
}
next_lock = waiter->lock;
@@ -919,7 +920,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1307,7 +1651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1295,7 +1639,7 @@ rt_mutex_slowlock(struct rt_mutex *lock,
unsigned long flags;
int ret = 0;
@@ -928,7 +929,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Technically we could use raw_spin_[un]lock_irq() here, but this can
-@@ -1373,7 +1717,8 @@ static inline int rt_mutex_slowtrylock(s
+@@ -1361,7 +1705,8 @@ static inline int rt_mutex_slowtrylock(s
* Return whether the current task needs to call rt_mutex_postunlock().
*/
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
@@ -938,7 +939,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
-@@ -1427,7 +1772,7 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1415,7 +1760,7 @@ static bool __sched rt_mutex_slowunlock(
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
@@ -947,7 +948,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
return true; /* call rt_mutex_postunlock() */
-@@ -1479,9 +1824,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
+@@ -1467,9 +1812,11 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
/*
* Performs the wakeup of the the top-waiter and re-enables preemption.
*/
@@ -960,7 +961,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
preempt_enable();
-@@ -1490,15 +1837,17 @@ void rt_mutex_postunlock(struct wake_q_h
+@@ -1478,15 +1825,17 @@ void rt_mutex_postunlock(struct wake_q_h
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
bool (*slowfn)(struct rt_mutex *lock,
@@ -981,7 +982,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
int __sched __rt_mutex_lock_state(struct rt_mutex *lock, int state)
-@@ -1653,16 +2002,13 @@ void __sched __rt_mutex_unlock(struct rt
+@@ -1641,16 +1990,13 @@ void __sched __rt_mutex_unlock(struct rt
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
mutex_release(&lock->dep_map, 1, _RET_IP_);
@@ -1002,7 +1003,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
lockdep_assert_held(&lock->wait_lock);
-@@ -1679,22 +2025,34 @@ bool __sched __rt_mutex_futex_unlock(str
+@@ -1667,22 +2013,34 @@ bool __sched __rt_mutex_futex_unlock(str
* avoid inversion prior to the wakeup. preempt_disable()
* therein pairs with rt_mutex_postunlock().
*/
@@ -1040,7 +1041,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/**
-@@ -1734,7 +2092,7 @@ void __rt_mutex_init(struct rt_mutex *lo
+@@ -1721,7 +2079,7 @@ void __rt_mutex_init(struct rt_mutex *lo
if (name && key)
debug_rt_mutex_init(lock, name, key);
}
@@ -1049,7 +1050,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
-@@ -1903,6 +2261,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1890,6 +2248,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter)
{
@@ -1057,7 +1058,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int ret;
raw_spin_lock_irq(&lock->wait_lock);
-@@ -1914,6 +2273,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -1901,6 +2260,24 @@ int rt_mutex_wait_proxy_lock(struct rt_m
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
@@ -1084,7 +1085,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return ret;
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -14,6 +14,7 @@
+@@ -15,6 +15,7 @@
#include <linux/rtmutex.h>
#include <linux/sched/wake_q.h>
@@ -1092,7 +1093,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* This is the control structure for tasks blocked on a rt_mutex,
-@@ -28,6 +29,7 @@ struct rt_mutex_waiter {
+@@ -29,6 +30,7 @@ struct rt_mutex_waiter {
struct rb_node pi_tree_entry;
struct task_struct *task;
struct rt_mutex *lock;
@@ -1100,7 +1101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
-@@ -107,7 +109,7 @@ extern void rt_mutex_init_proxy_locked(s
+@@ -137,7 +139,7 @@ extern void rt_mutex_init_proxy_locked(s
struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock,
struct task_struct *proxy_owner);
@@ -1109,7 +1110,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *task);
-@@ -124,9 +126,12 @@ extern int rt_mutex_futex_trylock(struct
+@@ -154,9 +156,12 @@ extern int rt_mutex_futex_trylock(struct
extern void rt_mutex_futex_unlock(struct rt_mutex *lock);
extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
@@ -1124,7 +1125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* RW semaphore special interface */
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
-@@ -136,6 +141,10 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -166,6 +171,10 @@ int __sched rt_mutex_slowlock_locked(str
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter);
diff --git a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
index 466b9d7da8fd..99510d642915 100644
--- a/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
+++ b/patches/rtmutex-add-ww_mutex-addon-for-mutex-rt.patch
@@ -4,10 +4,10 @@ Subject: rtmutex: add ww_mutex addon for mutex-rt
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- kernel/locking/rtmutex.c | 265 ++++++++++++++++++++++++++++++++++++++--
+ kernel/locking/rtmutex.c | 264 ++++++++++++++++++++++++++++++++++++++--
kernel/locking/rtmutex_common.h | 2
kernel/locking/rwsem-rt.c | 2
- 3 files changed, 258 insertions(+), 11 deletions(-)
+ 3 files changed, 257 insertions(+), 11 deletions(-)
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rtmutex_common.h"
-@@ -1270,6 +1271,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
+@@ -1258,6 +1259,40 @@ EXPORT_SYMBOL(__rt_spin_lock_init);
#endif /* PREEMPT_RT_FULL */
@@ -60,7 +60,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline int
try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
struct rt_mutex_waiter *waiter)
-@@ -1548,7 +1583,8 @@ void rt_mutex_init_waiter(struct rt_mute
+@@ -1536,7 +1571,8 @@ void rt_mutex_init_waiter(struct rt_mute
static int __sched
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
int ret = 0;
-@@ -1566,6 +1602,12 @@ static int __sched
+@@ -1554,6 +1590,12 @@ static int __sched
break;
}
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irq(&lock->wait_lock);
debug_rt_mutex_print_deadlock(waiter);
-@@ -1600,16 +1642,107 @@ static void rt_mutex_handle_deadlock(int
+@@ -1588,16 +1630,106 @@ static void rt_mutex_handle_deadlock(int
}
}
@@ -145,14 +145,13 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * Give any possible sleeping processes the chance to wake up,
+ * so they can recheck if they have to back off.
+ */
-+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters.rb_root,
+ tree_entry) {
+ /* XXX debug rt mutex waiter wakeup */
+
+ BUG_ON(waiter->lock != lock);
+ rt_mutex_wake_waiter(waiter);
+ }
-+
+}
+
+#else
@@ -192,7 +191,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
set_current_state(state);
-@@ -1621,7 +1754,12 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1609,7 +1741,12 @@ int __sched rt_mutex_slowlock_locked(str
if (likely(!ret)) {
/* sleep on the mutex */
@@ -206,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
if (unlikely(ret)) {
-@@ -1629,6 +1767,10 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1617,6 +1754,10 @@ int __sched rt_mutex_slowlock_locked(str
if (rt_mutex_has_waiters(lock))
remove_waiter(lock, waiter);
/* ww_mutex want to report EDEADLK/EALREADY, let them */
@@ -217,7 +216,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -1645,7 +1787,8 @@ int __sched rt_mutex_slowlock_locked(str
+@@ -1633,7 +1774,8 @@ int __sched rt_mutex_slowlock_locked(str
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
@@ -227,7 +226,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct rt_mutex_waiter waiter;
unsigned long flags;
-@@ -1663,7 +1806,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
+@@ -1651,7 +1793,8 @@ rt_mutex_slowlock(struct rt_mutex *lock,
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);
@@ -237,7 +236,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
-@@ -1786,29 +1930,33 @@ static bool __sched rt_mutex_slowunlock(
+@@ -1774,29 +1917,33 @@ static bool __sched rt_mutex_slowunlock(
*/
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
@@ -275,7 +274,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static inline int
-@@ -1946,6 +2094,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1934,6 +2081,7 @@ rt_mutex_timed_lock(struct rt_mutex *loc
mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
RT_MUTEX_MIN_CHAINWALK,
@@ -283,7 +282,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rt_mutex_slowlock);
if (ret)
mutex_release(&lock->dep_map, 1, _RET_IP_);
-@@ -2267,7 +2416,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
+@@ -2254,7 +2402,7 @@ int rt_mutex_wait_proxy_lock(struct rt_m
raw_spin_lock_irq(&lock->wait_lock);
/* sleep on the mutex */
set_current_state(TASK_INTERRUPTIBLE);
@@ -292,7 +291,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
-@@ -2351,3 +2500,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
+@@ -2338,3 +2486,99 @@ bool rt_mutex_cleanup_proxy_lock(struct
return cleanup;
}
@@ -394,7 +393,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#endif
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -133,6 +133,7 @@ extern void rt_mutex_postunlock(struct w
+@@ -163,6 +163,7 @@ extern void rt_mutex_postunlock(struct w
struct wake_q_head *wake_sleeper_q);
/* RW semaphore special interface */
@@ -402,7 +401,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern int __rt_mutex_lock_state(struct rt_mutex *lock, int state);
extern int __rt_mutex_trylock(struct rt_mutex *lock);
-@@ -140,6 +141,7 @@ extern void __rt_mutex_unlock(struct rt_
+@@ -170,6 +171,7 @@ extern void __rt_mutex_unlock(struct rt_
int __sched rt_mutex_slowlock_locked(struct rt_mutex *lock, int state,
struct hrtimer_sleeper *timeout,
enum rtmutex_chainwalk chwalk,
diff --git a/patches/rtmutex-avoid-include-hell.patch b/patches/rtmutex-avoid-include-hell.patch
index 20d85673292e..a3b55f5b742d 100644
--- a/patches/rtmutex-avoid-include-hell.patch
+++ b/patches/rtmutex-avoid-include-hell.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
-@@ -14,7 +14,7 @@
+@@ -15,7 +15,7 @@
#include <linux/linkage.h>
#include <linux/rbtree.h>
diff --git a/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch b/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
index f7c3d58cdeab..e25577179998 100644
--- a/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
+++ b/patches/rtmutex-export-lockdep-less-version-of-rt_mutex-s-lo.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1501,6 +1501,29 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1489,6 +1489,29 @@ rt_mutex_fastunlock(struct rt_mutex *loc
rt_mutex_postunlock(&wake_q);
}
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_lock - lock a rt_mutex
*
-@@ -1508,10 +1531,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
+@@ -1496,10 +1519,7 @@ rt_mutex_fastunlock(struct rt_mutex *loc
*/
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
@@ -56,7 +56,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);
-@@ -1526,16 +1546,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
+@@ -1514,16 +1534,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{
@@ -74,7 +74,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
-@@ -1556,13 +1567,10 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -1544,13 +1555,10 @@ int __sched rt_mutex_futex_trylock(struc
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
-@@ -1597,6 +1605,18 @@ rt_mutex_timed_lock(struct rt_mutex *loc
+@@ -1585,6 +1593,18 @@ rt_mutex_timed_lock(struct rt_mutex *loc
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@@ -108,7 +108,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/**
* rt_mutex_trylock - try to lock a rt_mutex
*
-@@ -1612,14 +1632,7 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1600,14 +1620,7 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
@@ -124,7 +124,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ret)
mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
-@@ -1627,6 +1640,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1615,6 +1628,11 @@ int __sched rt_mutex_trylock(struct rt_m
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);
@@ -138,7 +138,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -129,6 +129,9 @@ extern bool __rt_mutex_futex_unlock(stru
+@@ -159,6 +159,9 @@ extern bool __rt_mutex_futex_unlock(stru
extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
/* RW semaphore special interface */
diff --git a/patches/rtmutex-futex-prepare-rt.patch b/patches/rtmutex-futex-prepare-rt.patch
index 06b03f3c6207..d1d1fd306e93 100644
--- a/patches/rtmutex-futex-prepare-rt.patch
+++ b/patches/rtmutex-futex-prepare-rt.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/futex.c
+++ b/kernel/futex.c
-@@ -2024,6 +2024,16 @@ static int futex_requeue(u32 __user *uad
+@@ -2101,6 +2101,16 @@ static int futex_requeue(u32 __user *uad
requeue_pi_wake_futex(this, &key2, hb2);
drop_count++;
continue;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else if (ret) {
/*
* rt_mutex_start_proxy_lock() detected a
-@@ -3007,7 +3017,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3090,7 +3100,7 @@ static int futex_wait_requeue_pi(u32 __u
struct hrtimer_sleeper timeout, *to = NULL;
struct futex_pi_state *pi_state = NULL;
struct rt_mutex_waiter rt_waiter;
@@ -41,7 +41,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
union futex_key key2 = FUTEX_KEY_INIT;
struct futex_q q = futex_q_init;
int res, ret;
-@@ -3062,20 +3072,55 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3148,20 +3158,55 @@ static int futex_wait_requeue_pi(u32 __u
/* Queue the futex_q, drop the hb lock, wait for wakeup. */
futex_wait_queue_me(hb, &q, to);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
-@@ -3084,7 +3129,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3170,7 +3215,8 @@ static int futex_wait_requeue_pi(u32 __u
* did a lock-steal - fix up the PI-state in that case.
*/
if (q.pi_state && (q.pi_state->owner != current)) {
@@ -118,7 +118,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
ret = fixup_pi_state_owner(uaddr2, &q, current);
if (ret && rt_mutex_owner(&q.pi_state->pi_mutex) == current) {
pi_state = q.pi_state;
-@@ -3095,7 +3141,7 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3181,7 +3227,7 @@ static int futex_wait_requeue_pi(u32 __u
* the requeue_pi() code acquired for us.
*/
put_pi_state(q.pi_state);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
} else {
struct rt_mutex *pi_mutex;
-@@ -3109,7 +3155,8 @@ static int futex_wait_requeue_pi(u32 __u
+@@ -3195,7 +3241,8 @@ static int futex_wait_requeue_pi(u32 __u
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_wait_proxy_lock(pi_mutex, to, &rt_waiter);
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We can speed up the acquire/release, if there's no debugging state to be
* set up.
-@@ -391,7 +396,8 @@ int max_lock_depth = 1024;
+@@ -379,7 +384,8 @@ int max_lock_depth = 1024;
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
@@ -161,7 +161,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -527,7 +533,7 @@ static int rt_mutex_adjust_prio_chain(st
+@@ -515,7 +521,7 @@ static int rt_mutex_adjust_prio_chain(st
* reached or the state of the chain has changed while we
* dropped the locks.
*/
@@ -170,7 +170,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto out_unlock_pi;
/*
-@@ -963,6 +969,22 @@ static int task_blocks_on_rt_mutex(struc
+@@ -951,6 +957,22 @@ static int task_blocks_on_rt_mutex(struc
return -EDEADLK;
raw_spin_lock(&task->pi_lock);
@@ -193,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
waiter->task = task;
waiter->lock = lock;
waiter->prio = task->prio;
-@@ -986,7 +1008,7 @@ static int task_blocks_on_rt_mutex(struc
+@@ -974,7 +996,7 @@ static int task_blocks_on_rt_mutex(struc
rt_mutex_enqueue_pi(owner, waiter);
rt_mutex_adjust_prio(owner);
@@ -202,7 +202,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
chain_walk = 1;
} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
chain_walk = 1;
-@@ -1082,7 +1104,7 @@ static void remove_waiter(struct rt_mute
+@@ -1070,7 +1092,7 @@ static void remove_waiter(struct rt_mute
{
bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
struct task_struct *owner = rt_mutex_owner(lock);
@@ -211,7 +211,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
lockdep_assert_held(&lock->wait_lock);
-@@ -1108,7 +1130,8 @@ static void remove_waiter(struct rt_mute
+@@ -1096,7 +1118,8 @@ static void remove_waiter(struct rt_mute
rt_mutex_adjust_prio(owner);
/* Store the lock on which owner is blocked or NULL */
@@ -221,7 +221,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_unlock(&owner->pi_lock);
-@@ -1144,7 +1167,8 @@ void rt_mutex_adjust_pi(struct task_stru
+@@ -1132,7 +1155,8 @@ void rt_mutex_adjust_pi(struct task_stru
raw_spin_lock_irqsave(&task->pi_lock, flags);
waiter = task->pi_blocked_on;
@@ -233,7 +233,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
-@@ -99,6 +99,8 @@ enum rtmutex_chainwalk {
+@@ -129,6 +129,8 @@ enum rtmutex_chainwalk {
/*
* PI-futex support (proxy locking functions, etc.):
*/
diff --git a/patches/rtmutex-lock-killable.patch b/patches/rtmutex-lock-killable.patch
index 77e42285f528..06f54c70122f 100644
--- a/patches/rtmutex-lock-killable.patch
+++ b/patches/rtmutex-lock-killable.patch
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1543,6 +1543,25 @@ int __sched rt_mutex_futex_trylock(struc
+@@ -1531,6 +1531,25 @@ int __sched rt_mutex_futex_trylock(struc
}
/**
diff --git a/patches/rtmutex-trylock-is-okay-on-RT.patch b/patches/rtmutex-trylock-is-okay-on-RT.patch
index 55da027d3868..952f80e2b496 100644
--- a/patches/rtmutex-trylock-is-okay-on-RT.patch
+++ b/patches/rtmutex-trylock-is-okay-on-RT.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
-@@ -1563,7 +1563,11 @@ int __sched rt_mutex_trylock(struct rt_m
+@@ -1551,7 +1551,11 @@ int __sched rt_mutex_trylock(struct rt_m
{
int ret;
diff --git a/patches/rtmutex-wire-up-RT-s-locking.patch b/patches/rtmutex-wire-up-RT-s-locking.patch
index 201b93a02eb8..bc1f7264d103 100644
--- a/patches/rtmutex-wire-up-RT-s-locking.patch
+++ b/patches/rtmutex-wire-up-RT-s-locking.patch
@@ -17,7 +17,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
-@@ -22,6 +22,17 @@
+@@ -23,6 +23,17 @@
struct ww_acquire_ctx;
@@ -35,7 +35,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Simple, straightforward mutexes with strict semantics:
*
-@@ -113,13 +124,6 @@ do { \
+@@ -114,13 +125,6 @@ do { \
__mutex_init((mutex), #mutex, &__key); \
} while (0)
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define __MUTEX_INITIALIZER(lockname) \
{ .owner = ATOMIC_LONG_INIT(0) \
, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
-@@ -227,4 +231,6 @@ mutex_trylock_recursive(struct mutex *lo
+@@ -228,4 +232,6 @@ mutex_trylock_recursive(struct mutex *lo
return mutex_trylock(lock);
}
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __LINUX_MUTEX_H */
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
-@@ -19,6 +19,10 @@
+@@ -20,6 +20,10 @@
#include <linux/osq_lock.h>
#endif
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct rw_semaphore;
#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
-@@ -106,6 +110,13 @@ static inline int rwsem_is_contended(str
+@@ -108,6 +112,13 @@ static inline int rwsem_is_contended(str
return !list_empty(&sem->wait_list);
}
@@ -85,7 +85,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
-@@ -268,7 +268,11 @@ static inline void do_raw_spin_unlock(ra
+@@ -286,7 +286,11 @@ static inline void do_raw_spin_unlock(ra
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
/* Include rwlock functions */
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
-@@ -279,6 +283,10 @@ static inline void do_raw_spin_unlock(ra
+@@ -297,6 +301,10 @@ static inline void do_raw_spin_unlock(ra
# include <linux/spinlock_api_up.h>
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Map the spin_lock functions to the raw variants for PREEMPT_RT=n
*/
-@@ -428,4 +436,6 @@ extern int _atomic_dec_and_lock(atomic_t
+@@ -421,4 +429,6 @@ extern int _atomic_dec_and_lock(atomic_t
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
@@ -149,7 +149,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif /* __LINUX_SPINLOCK_TYPES_H */
--- a/kernel/locking/Makefile
+++ b/kernel/locking/Makefile
-@@ -2,7 +2,7 @@
+@@ -3,7 +3,7 @@
# and is generally not a function of system call inputs.
KCOV_INSTRUMENT := n
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
-@@ -11,7 +11,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS
+@@ -12,7 +12,11 @@ CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif
@@ -170,7 +170,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj-$(CONFIG_LOCKDEP) += lockdep.o
ifeq ($(CONFIG_PROC_FS),y)
obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
-@@ -24,8 +28,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
+@@ -25,8 +29,11 @@ obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
@@ -184,7 +184,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
--- a/kernel/locking/spinlock.c
+++ b/kernel/locking/spinlock.c
-@@ -124,8 +124,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
+@@ -125,8 +125,11 @@ void __lockfunc __raw_##op##_lock_bh(loc
* __[spin|read|write]_lock_bh()
*/
BUILD_LOCK_OPS(spin, raw_spinlock);
@@ -196,7 +196,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
-@@ -209,6 +212,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
+@@ -210,6 +213,8 @@ void __lockfunc _raw_spin_unlock_bh(raw_
EXPORT_SYMBOL(_raw_spin_unlock_bh);
#endif
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#ifndef CONFIG_INLINE_READ_TRYLOCK
int __lockfunc _raw_read_trylock(rwlock_t *lock)
{
-@@ -353,6 +358,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
+@@ -354,6 +359,8 @@ void __lockfunc _raw_write_unlock_bh(rwl
EXPORT_SYMBOL(_raw_write_unlock_bh);
#endif
diff --git a/patches/rtmutex_dont_include_rcu.patch b/patches/rtmutex_dont_include_rcu.patch
index e9bd19e7e460..d63e678a061c 100644
--- a/patches/rtmutex_dont_include_rcu.patch
+++ b/patches/rtmutex_dont_include_rcu.patch
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b))
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
-@@ -367,54 +368,6 @@ static inline void rcu_preempt_sleep_che
+@@ -372,54 +373,6 @@ static inline void rcu_preempt_sleep_che
})
/**
@@ -153,6 +153,6 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-})
-
-/**
- * rcu_access_pointer() - fetch RCU pointer with no dereferencing
- * @p: The pointer to read
- *
+ * rcu_swap_protected() - swap an RCU and a regular pointer
+ * @rcu_ptr: RCU pointer
+ * @ptr: regular pointer
diff --git a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
index 84e70e764848..93f554941463 100644
--- a/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
+++ b/patches/sched-Prevent-task-state-corruption-by-spurious-lock.patch
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2198,7 +2198,7 @@ EXPORT_SYMBOL(wake_up_process);
+@@ -2207,7 +2207,7 @@ EXPORT_SYMBOL(wake_up_process);
*/
int wake_up_lock_sleeper(struct task_struct *p)
{
diff --git a/patches/sched-Remove-TASK_ALL.patch b/patches/sched-Remove-TASK_ALL.patch
index f1b09654e434..4844fb7e99b0 100644
--- a/patches/sched-Remove-TASK_ALL.patch
+++ b/patches/sched-Remove-TASK_ALL.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -94,7 +94,6 @@ struct task_group;
+@@ -93,7 +93,6 @@ struct task_group;
/* Convenience macros for the sake of wake_up(): */
#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
diff --git a/patches/sched-delay-put-task.patch b/patches/sched-delay-put-task.patch
index 8f7709b6b478..5f8fcdb573df 100644
--- a/patches/sched-delay-put-task.patch
+++ b/patches/sched-delay-put-task.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1082,6 +1082,9 @@ struct task_struct {
+@@ -1093,6 +1093,9 @@ struct task_struct {
unsigned int sequential_io;
unsigned int sequential_io_avg;
#endif
@@ -53,7 +53,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_ARCH_WANTS_DYNAMIC_TASK_STRUCT
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -402,7 +402,9 @@ static inline void put_signal_struct(str
+@@ -408,7 +408,9 @@ static inline void put_signal_struct(str
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
-@@ -419,7 +421,18 @@ void __put_task_struct(struct task_struc
+@@ -425,7 +427,18 @@ void __put_task_struct(struct task_struc
if (!profile_handoff_task(tsk))
free_task(tsk);
}
diff --git a/patches/sched-disable-ttwu-queue.patch b/patches/sched-disable-ttwu-queue.patch
index 4f4bec45cc26..95221e68081d 100644
--- a/patches/sched-disable-ttwu-queue.patch
+++ b/patches/sched-disable-ttwu-queue.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
-@@ -45,11 +45,16 @@ SCHED_FEAT(LB_BIAS, true)
+@@ -46,11 +46,16 @@ SCHED_FEAT(LB_BIAS, true)
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index c40bff6072bd..20068ac44695 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
-@@ -76,6 +76,11 @@ void synchronize_rcu(void);
+@@ -74,6 +74,11 @@ void synchronize_rcu(void);
* types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
*/
#define rcu_preempt_depth() (current->rcu_read_lock_nesting)
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#else /* #ifdef CONFIG_PREEMPT_RCU */
-@@ -101,6 +106,8 @@ static inline int rcu_preempt_depth(void
+@@ -99,6 +104,8 @@ static inline int rcu_preempt_depth(void
return 0;
}
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6006,7 +6006,7 @@ void __init sched_init(void)
+@@ -6050,7 +6050,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index a4d581afb61a..d82ae655cbe8 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
-@@ -11,6 +11,7 @@
+@@ -12,6 +12,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
#include <linux/uprobes.h>
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include <linux/page-flags-layout.h>
#include <linux/workqueue.h>
-@@ -498,6 +499,9 @@ struct mm_struct {
+@@ -504,6 +505,9 @@ struct mm_struct {
bool tlb_flush_batched;
#endif
struct uprobes_state uprobes_state;
@@ -35,7 +35,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
-@@ -42,6 +42,17 @@ static inline void mmdrop(struct mm_stru
+@@ -43,6 +43,17 @@ static inline void mmdrop(struct mm_stru
__mmdrop(mm);
}
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -915,6 +915,19 @@ void __mmdrop(struct mm_struct *mm)
+@@ -931,6 +931,19 @@ void __mmdrop(struct mm_struct *mm)
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
VM_BUG_ON(atomic_read(&mm->mm_users));
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2677,8 +2677,12 @@ static struct rq *finish_task_switch(str
+@@ -2696,8 +2696,12 @@ static struct rq *finish_task_switch(str
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -5390,6 +5394,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5434,6 +5438,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5404,7 +5410,12 @@ void idle_task_exit(void)
+@@ -5448,7 +5454,12 @@ void idle_task_exit(void)
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5707,6 +5718,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5751,6 +5762,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(cpu);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index c01c4cfb1d40..300029d015b1 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -26,7 +26,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This begins the randomizable portion of task_struct. Only
-@@ -1466,6 +1468,7 @@ extern struct task_struct *find_task_by_
+@@ -1507,6 +1509,7 @@ extern struct task_struct *find_task_by_
extern int wake_up_state(struct task_struct *tsk, unsigned int state);
extern int wake_up_process(struct task_struct *tsk);
@@ -36,10 +36,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2007,8 +2007,25 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2016,8 +2016,25 @@ try_to_wake_up(struct task_struct *p, un
*/
- smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
+ smp_mb__after_spinlock();
- if (!(p->state & state))
+ if (!(p->state & state)) {
+ /*
@@ -63,7 +63,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2172,6 +2189,18 @@ int wake_up_process(struct task_struct *
+@@ -2181,6 +2198,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
@@ -84,7 +84,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return try_to_wake_up(p, state, 0);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
-@@ -1340,6 +1340,7 @@ static inline void finish_lock_switch(st
+@@ -1342,6 +1342,7 @@ static inline void finish_lock_switch(st
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
#define WF_MIGRATED 0x4 /* internal use, task got migrated */
diff --git a/patches/sched-ttwu-ensure-success-return-is-correct.patch b/patches/sched-ttwu-ensure-success-return-is-correct.patch
index d384984c3f9b..eaf5e7294227 100644
--- a/patches/sched-ttwu-ensure-success-return-is-correct.patch
+++ b/patches/sched-ttwu-ensure-success-return-is-correct.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2014,8 +2014,10 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2023,8 +2023,10 @@ try_to_wake_up(struct task_struct *p, un
* if the wakeup condition is true.
*/
if (!(wake_flags & WF_LOCK_SLEEPER)) {
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index 55dbac74ecbf..fd96248b4745 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3369,8 +3369,10 @@ static void __sched notrace __schedule(b
+@@ -3388,8 +3388,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/seqlock-prevent-rt-starvation.patch b/patches/seqlock-prevent-rt-starvation.patch
index 4700d37a21d4..3fd3d955e9c1 100644
--- a/patches/seqlock-prevent-rt-starvation.patch
+++ b/patches/seqlock-prevent-rt-starvation.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
-@@ -220,20 +220,30 @@ static inline int read_seqcount_retry(co
+@@ -221,20 +221,30 @@ static inline int read_seqcount_retry(co
return __read_seqcount_retry(s, start);
}
@@ -62,7 +62,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* raw_write_seqcount_barrier - do a seq write barrier
* @s: pointer to seqcount_t
-@@ -428,10 +438,32 @@ typedef struct {
+@@ -429,10 +439,32 @@ typedef struct {
/*
* Read side functions for starting and finalizing a read side section.
*/
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
{
-@@ -446,36 +478,36 @@ static inline unsigned read_seqretry(con
+@@ -447,36 +479,36 @@ static inline unsigned read_seqretry(con
static inline void write_seqlock(seqlock_t *sl)
{
spin_lock(&sl->lock);
@@ -138,7 +138,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
spin_unlock_irq(&sl->lock);
}
-@@ -484,7 +516,7 @@ static inline unsigned long __write_seql
+@@ -485,7 +517,7 @@ static inline unsigned long __write_seql
unsigned long flags;
spin_lock_irqsave(&sl->lock, flags);
@@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flags;
}
-@@ -494,7 +526,7 @@ static inline unsigned long __write_seql
+@@ -495,7 +527,7 @@ static inline unsigned long __write_seql
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
{
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
-@@ -449,7 +449,7 @@ static inline int neigh_hh_bridge(struct
+@@ -450,7 +450,7 @@ static inline int neigh_hh_bridge(struct
}
#endif
@@ -167,7 +167,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
unsigned int seq;
unsigned int hh_len;
-@@ -473,7 +473,7 @@ static inline int neigh_hh_output(const
+@@ -474,7 +474,7 @@ static inline int neigh_hh_output(const
static inline int neigh_output(struct neighbour *n, struct sk_buff *skb)
{
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if ((n->nud_state & NUD_CONNECTED) && hh->hh_len)
return neigh_hh_output(hh, skb);
-@@ -514,7 +514,7 @@ struct neighbour_cb {
+@@ -515,7 +515,7 @@ struct neighbour_cb {
#define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb)
diff --git a/patches/series b/patches/series
index 3c89ea844702..7edb87b7c557 100644
--- a/patches/series
+++ b/patches/series
@@ -15,30 +15,43 @@ rcu-Suppress-lockdep-false-positive-boost_mtx-compla.patch
# Stuff broken upstream, patches submitted
############################################################
-# soft hrtimer patches (v1)
-0001-hrtimer-Use-predefined-function-for-updating-next_ti.patch
+# soft hrtimer patches (v3)
+0001-timers-Use-static-keys-for-migrate_enable-and-nohz_a.patch
0002-hrtimer-Correct-blantanly-wrong-comment.patch
0003-hrtimer-Fix-kerneldoc-for-struct-hrtimer_cpu_base.patch
0004-hrtimer-Cleanup-clock-argument-in-schedule_hrtimeout.patch
-0005-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
-0006-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
-0007-hrtimer-Reduce-conditional-code-hres_active.patch
-0008-hrtimer-Reduce-conditional-code-expires_next-next_ti.patch
-0009-hrtimer-Reduce-conditional-code-hrtimer_reprogram.patch
-0010-hrtimer-Make-handling-of-hrtimer-reprogramming-and-e.patch
-0011-hrtimer-Allow-remote-hrtimer-enqueue-with-expires_ne.patch
-0012-hrtimer-Simplify-hrtimer_reprogram-call.patch
-0013-hrtimer-Split-out-code-from-hrtimer_start_range_ns-f.patch
-0014-hrtimer-Split-out-code-from-__hrtimer_get_next_event.patch
-0015-hrtimer-Add-clock-bases-for-soft-irq-context.patch
-0016-hrtimer-Allow-function-reuse-for-softirq-based-hrtim.patch
-0017-hrtimer-Implementation-of-softirq-hrtimer-handling.patch
-0018-hrtimer-Enable-soft-and-hard-hrtimer.patch
-hrtimer-soft-bases-timekeeping.patch
-0019-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
-0020-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
-0021-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
-0022-softirq-Remove-tasklet_hrtimer.patch
+0005-hrtimer-Fix-hrtimer-function-description.patch
+0006-hrtimer-Ensure-POSIX-compliance-relative-CLOCK_REALT.patch
+0007-hrtimer-Cleanup-hrtimer_mode-enum.patch
+0008-tracing-hrtimer-Take-all-clock-bases-and-modes-into-.patch
+0009-tracing-hrtimer-Print-hrtimer-mode-in-hrtimer_start-.patch
+0010-hrtimer-Switch-for-loop-to-_ffs-evaluation.patch
+0011-hrtimer-Store-running-timer-in-hrtimer_clock_base.patch
+0012-hrtimer-Make-room-in-struct-hrtimer_cpu_base.patch
+0013-hrtimer-Reduce-conditional-code-hres_active.patch
+0014-hrtimer-Use-accesor-functions-instead-of-direct-acce.patch
+0015-hrtimer-Make-the-remote-enqueue-check-unconditional.patch
+0016-hrtimer-Make-hrtimer_cpu_base.next_timer-handling-un.patch
+0017-hrtimer-Make-hrtimer_reprogramm-unconditional.patch
+0018-hrtimer-Reduce-conditional-code-and-make-hrtimer_for.patch
+0019-hrtimer-Unify-handling-of-hrtimer-remove.patch
+0020-hrtimer-Unify-handling-of-remote-enqueue.patch
+0021-hrtimer-Make-remote-enqueue-decision-less-restrictiv.patch
+0022-hrtimer-Remove-base-argument-from-hrtimer_reprogram.patch
+0023-hrtimer-Split-hrtimer_start_range_ns.patch
+0024-hrtimer-Split-__hrtimer_get_next_event.patch
+0025-hrtimer-Use-irqsave-irqrestore-around-__run_hrtimer.patch
+0026-hrtimer-Add-clock-bases-and-hrtimer-mode-for-soft-ir.patch
+0027-hrtimer-Prepare-handling-of-hard-and-softirq-based-h.patch
+0028-hrtimer-Implement-support-for-softirq-based-hrtimers.patch
+0029-hrtimer-Implement-SOFT-HARD-clock-base-selection.patch
+0030-can-bcm-Replace-hrtimer_tasklet-with-softirq-based-h.patch
+0031-mac80211_hwsim-Replace-hrtimer-tasklet-with-softirq-.patch
+0032-xfrm-Replace-hrtimer-tasklet-with-softirq-hrtimer.patch
+0033-softirq-Remove-tasklet_hrtimer.patch
+0034-ALSA-dummy-Replace-tasklet-with-softirq-hrtimer.patch
+0035-usb-gadget-NCM-Replace-tasklet-with-softirq-hrtimer.patch
+0036-net-mvpp2-Replace-tasklet-with-softirq-hrtimer.patch
# Those two should vanish soon (not use PIT during bootup)
at91_dont_enable_disable_clock.patch
@@ -55,9 +68,7 @@ rfc-arm-smp-__cpu_disable-fix-sleeping-function-called-from-invalid-context.patc
rtmutex--Handle-non-enqueued-waiters-gracefully.patch
rbtree-include-rcu.h-because-we-use-it.patch
rxrpc-remove-unused-static-variables.patch
-cpu_pm-replace-raw_notifier-to-atomic_notifier.patch
mfd-syscon-atmel-smc-include-string.h.patch
-pci-switchtec-Don-t-use-completion-s-wait-queue.patch
# Wants a different fix for upstream
NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -66,8 +77,7 @@ NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
# Submitted on LKML
############################################################
Bluetooth-avoid-recursive-locking-in-hci_send_to_cha.patch
-iommu-amd-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-for.patch
-arm-xen-don-t-inclide-rwlock.h-directly.patch
+iommu-iova-Use-raw_cpu_ptr-instead-of-get_cpu_ptr-fo.patch
greybus-audio-don-t-inclide-rwlock.h-directly.patch
xen-9pfs-don-t-inclide-rwlock.h-directly.patch
@@ -264,7 +274,6 @@ net-wireless-warn-nort.patch
# BIT SPINLOCKS - SIGH
fs-replace-bh_uptodate_lock-for-rt.patch
-fs-convert-two-mroe-BH_Uptodate_Lock-related-bitspin.patch
fs-jbd-replace-bh_state-lock.patch
# GENIRQ
@@ -323,6 +332,7 @@ timer-delay-waking-softirqs-from-the-jiffy-tick.patch
x86-kvm-require-const-tsc-for-rt.patch
# HRTIMERS
+time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
hrtimers-prepare-full-preemption.patch
hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -353,7 +363,6 @@ stop_machine-convert-stop_machine_run-to-PREEMPT_RT.patch
stop-machine-raw-lock.patch
# MIGRATE DISABLE AND PER CPU
-# XXX redo
hotplug-light-get-online-cpus.patch
ftrace-migrate-disable-tracing.patch
@@ -623,9 +632,9 @@ drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
drivers-zram-fix-zcomp_stream_get-smp_processor_id-u.patch
tpm_tis-fix-stall-after-iowrite-s.patch
+pci-switchtec-Don-t-use-completion-s-wait-queue.patch
# I915
-i915-bogus-warning-from-i915-when-running-on-PREEMPT.patch
drmradeoni915_Use_preempt_disableenable_rt()_where_recommended.patch
drmi915_Use_local_lockunlock_irq()_in_intel_pipe_update_startend().patch
drm-i915-init-spinlock-properly-on-RT.patch
@@ -650,4 +659,3 @@ workqueue-prevent-deadlock-stall.patch
# Add RT to version
localversion.patch
-
diff --git a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
index 12f2887a2469..3b19184eb83f 100644
--- a/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
+++ b/patches/signals-allow-rt-tasks-to-cache-one-sigqueue-struct.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -787,6 +787,8 @@ struct task_struct {
+@@ -788,6 +788,8 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
struct sighand_struct *sighand;
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Restored if set_restore_sigmask() was used: */
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
-@@ -228,6 +228,7 @@ static inline void init_sigpending(struc
+@@ -243,6 +243,7 @@ static inline void init_sigpending(struc
}
extern void flush_sigqueue(struct sigpending *queue);
@@ -49,7 +49,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/fork.c
+++ b/kernel/fork.c
-@@ -1637,6 +1637,7 @@ static __latent_entropy struct task_stru
+@@ -1649,6 +1649,7 @@ static __latent_entropy struct task_stru
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
diff --git a/patches/skbufhead-raw-lock.patch b/patches/skbufhead-raw-lock.patch
index 1129981695bf..aff4b6fb5ceb 100644
--- a/patches/skbufhead-raw-lock.patch
+++ b/patches/skbufhead-raw-lock.patch
@@ -15,7 +15,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
-@@ -2782,6 +2782,7 @@ struct softnet_data {
+@@ -2772,6 +2772,7 @@ struct softnet_data {
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -287,6 +287,7 @@ struct sk_buff_head {
+@@ -288,6 +288,7 @@ struct sk_buff_head {
__u32 qlen;
spinlock_t lock;
@@ -33,7 +33,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
};
struct sk_buff;
-@@ -1573,6 +1574,12 @@ static inline void skb_queue_head_init(s
+@@ -1668,6 +1669,12 @@ static inline void skb_queue_head_init(s
__skb_queue_head_init(list);
}
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -216,14 +216,14 @@ static inline struct hlist_head *dev_ind
+@@ -217,14 +217,14 @@ static inline struct hlist_head *dev_ind
static inline void rps_lock(struct softnet_data *sd)
{
#ifdef CONFIG_RPS
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
}
-@@ -4540,7 +4540,7 @@ static void flush_backlog(struct work_st
+@@ -4581,7 +4581,7 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->input_pkt_queue);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
input_queue_head_incr(sd);
}
}
-@@ -4550,11 +4550,14 @@ static void flush_backlog(struct work_st
+@@ -4591,11 +4591,14 @@ static void flush_backlog(struct work_st
skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
if (skb->dev->reg_state == NETREG_UNREGISTERING) {
__skb_unlink(skb, &sd->process_queue);
@@ -90,7 +90,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static void flush_all_backlogs(void)
-@@ -5101,7 +5104,9 @@ static int process_backlog(struct napi_s
+@@ -5142,7 +5145,9 @@ static int process_backlog(struct napi_s
while (again) {
struct sk_buff *skb;
@@ -100,7 +100,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
__netif_receive_skb(skb);
rcu_read_unlock();
-@@ -5109,9 +5114,9 @@ static int process_backlog(struct napi_s
+@@ -5150,9 +5155,9 @@ static int process_backlog(struct napi_s
if (++work >= quota)
return work;
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rps_lock(sd);
if (skb_queue_empty(&sd->input_pkt_queue)) {
/*
-@@ -5551,13 +5556,21 @@ static __latent_entropy void net_rx_acti
+@@ -5592,13 +5597,21 @@ static __latent_entropy void net_rx_acti
unsigned long time_limit = jiffies +
usecs_to_jiffies(netdev_budget_usecs);
int budget = netdev_budget;
@@ -133,7 +133,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for (;;) {
struct napi_struct *n;
-@@ -8361,6 +8374,9 @@ static int dev_cpu_dead(unsigned int old
+@@ -8413,6 +8426,9 @@ static int dev_cpu_dead(unsigned int old
netif_rx_ni(skb);
input_queue_head_incr(oldsd);
}
@@ -143,7 +143,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -8664,8 +8680,9 @@ static int __init net_dev_init(void)
+@@ -8716,8 +8732,9 @@ static int __init net_dev_init(void)
INIT_WORK(flush, flush_backlog);
diff --git a/patches/slub-disable-SLUB_CPU_PARTIAL.patch b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
index ccb4abf7b1b4..2900baa6aad5 100644
--- a/patches/slub-disable-SLUB_CPU_PARTIAL.patch
+++ b/patches/slub-disable-SLUB_CPU_PARTIAL.patch
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/init/Kconfig
+++ b/init/Kconfig
-@@ -1575,7 +1575,7 @@ config SLAB_FREELIST_RANDOM
+@@ -1589,7 +1589,7 @@ config SLAB_FREELIST_HARDENED
config SLUB_CPU_PARTIAL
default y
diff --git a/patches/slub-enable-irqs-for-no-wait.patch b/patches/slub-enable-irqs-for-no-wait.patch
index e648f7fbf2d8..4336c4fe76bd 100644
--- a/patches/slub-enable-irqs-for-no-wait.patch
+++ b/patches/slub-enable-irqs-for-no-wait.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -1538,14 +1538,17 @@ static struct page *allocate_slab(struct
+@@ -1572,14 +1572,17 @@ static struct page *allocate_slab(struct
void *start, *p;
int idx, order;
bool shuffle;
@@ -32,7 +32,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_enable();
flags |= s->allocflags;
-@@ -1620,11 +1623,7 @@ static struct page *allocate_slab(struct
+@@ -1654,11 +1657,7 @@ static struct page *allocate_slab(struct
page->frozen = 1;
out:
diff --git a/patches/softirq-disable-softirq-stacks-for-rt.patch b/patches/softirq-disable-softirq-stacks-for-rt.patch
index e0b5f22ba5ca..f28261e0c9f1 100644
--- a/patches/softirq-disable-softirq-stacks-for-rt.patch
+++ b/patches/softirq-disable-softirq-stacks-for-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
-@@ -670,6 +670,7 @@ void irq_ctx_init(void)
+@@ -685,6 +685,7 @@ void irq_ctx_init(void)
}
}
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
struct thread_info *curtp, *irqtp;
-@@ -687,6 +688,7 @@ void do_softirq_own_stack(void)
+@@ -702,6 +703,7 @@ void do_softirq_own_stack(void)
if (irqtp->flags)
set_bits(irqtp->flags, &curtp->flags);
}
@@ -73,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
mflr r0
--- a/arch/sh/kernel/irq.c
+++ b/arch/sh/kernel/irq.c
-@@ -147,6 +147,7 @@ void irq_ctx_exit(int cpu)
+@@ -148,6 +148,7 @@ void irq_ctx_exit(int cpu)
hardirq_ctx[cpu] = NULL;
}
@@ -81,7 +81,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
struct thread_info *curctx;
-@@ -174,6 +175,7 @@ void do_softirq_own_stack(void)
+@@ -175,6 +176,7 @@ void do_softirq_own_stack(void)
"r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
);
}
@@ -91,7 +91,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/arch/sparc/kernel/irq_64.c
+++ b/arch/sparc/kernel/irq_64.c
-@@ -854,6 +854,7 @@ void __irq_entry handler_irq(int pil, st
+@@ -855,6 +855,7 @@ void __irq_entry handler_irq(int pil, st
set_irq_regs(old_regs);
}
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
void *orig_sp, *sp = softirq_stack[smp_processor_id()];
-@@ -868,6 +869,7 @@ void do_softirq_own_stack(void)
+@@ -869,6 +870,7 @@ void do_softirq_own_stack(void)
__asm__ __volatile__("mov %0, %%sp"
: : "r" (orig_sp));
}
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void fixup_irqs(void)
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -888,6 +888,7 @@ EXPORT_SYMBOL(native_load_gs_index)
+@@ -950,6 +950,7 @@ EXPORT_SYMBOL(native_load_gs_index)
jmp 2b
.previous
@@ -117,17 +117,17 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Call softirq on interrupt stack. Interrupts are off. */
ENTRY(do_softirq_own_stack)
pushq %rbp
-@@ -900,6 +901,7 @@ ENTRY(do_softirq_own_stack)
- decl PER_CPU_VAR(irq_count)
+@@ -960,6 +961,7 @@ ENTRY(do_softirq_own_stack)
+ leaveq
ret
- END(do_softirq_own_stack)
+ ENDPROC(do_softirq_own_stack)
+#endif
#ifdef CONFIG_XEN
- idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+ idtentry hypervisor_callback xen_do_hypervisor_callback has_error_code=0
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
-@@ -127,6 +127,7 @@ void irq_ctx_init(int cpu)
+@@ -128,6 +128,7 @@ void irq_ctx_init(int cpu)
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void)
{
struct irq_stack *irqstk;
-@@ -143,6 +144,7 @@ void do_softirq_own_stack(void)
+@@ -144,6 +145,7 @@ void do_softirq_own_stack(void)
call_on_stack(__do_softirq, isp);
}
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
{
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -493,7 +493,7 @@ struct softirq_action
+@@ -495,7 +495,7 @@ struct softirq_action
asmlinkage void do_softirq(void);
asmlinkage void __do_softirq(void);
diff --git a/patches/softirq-preempt-fix-3-re.patch b/patches/softirq-preempt-fix-3-re.patch
index 404890cbdbe4..387e35955a5a 100644
--- a/patches/softirq-preempt-fix-3-re.patch
+++ b/patches/softirq-preempt-fix-3-re.patch
@@ -22,7 +22,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
-@@ -52,6 +52,7 @@ static void trigger_softirq(void *data)
+@@ -53,6 +53,7 @@ static void trigger_softirq(void *data)
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_restore(flags);
@@ -30,7 +30,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -90,6 +91,7 @@ static int blk_softirq_cpu_dead(unsigned
+@@ -91,6 +92,7 @@ static int blk_softirq_cpu_dead(unsigned
this_cpu_ptr(&blk_cpu_done));
raise_softirq_irqoff(BLOCK_SOFTIRQ);
local_irq_enable();
@@ -38,7 +38,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return 0;
}
-@@ -142,6 +144,7 @@ void __blk_complete_request(struct reque
+@@ -143,6 +145,7 @@ void __blk_complete_request(struct reque
goto do_local;
local_irq_restore(flags);
@@ -48,7 +48,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -186,8 +186,10 @@ do { \
+@@ -187,8 +187,10 @@ do { \
#ifdef CONFIG_PREEMPT_RT_BASE
# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#define preemptible() (preempt_count() == 0 && !irqs_disabled())
-@@ -274,6 +276,7 @@ do { \
+@@ -275,6 +277,7 @@ do { \
#define preempt_disable_notrace() barrier()
#define preempt_enable_no_resched_notrace() barrier()
#define preempt_enable_notrace() barrier()
@@ -69,7 +69,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define migrate_disable() barrier()
--- a/lib/irq_poll.c
+++ b/lib/irq_poll.c
-@@ -36,6 +36,7 @@ void irq_poll_sched(struct irq_poll *iop
+@@ -37,6 +37,7 @@ void irq_poll_sched(struct irq_poll *iop
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_restore(flags);
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(irq_poll_sched);
-@@ -71,6 +72,7 @@ void irq_poll_complete(struct irq_poll *
+@@ -72,6 +73,7 @@ void irq_poll_complete(struct irq_poll *
local_irq_save(flags);
__irq_poll_complete(iop);
local_irq_restore(flags);
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(irq_poll_complete);
-@@ -95,6 +97,7 @@ static void __latent_entropy irq_poll_so
+@@ -96,6 +98,7 @@ static void __latent_entropy irq_poll_so
}
local_irq_enable();
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Even though interrupts have been re-enabled, this
* access is safe because interrupts can only add new
-@@ -132,6 +135,7 @@ static void __latent_entropy irq_poll_so
+@@ -133,6 +136,7 @@ static void __latent_entropy irq_poll_so
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -195,6 +199,7 @@ static int irq_poll_cpu_dead(unsigned in
+@@ -196,6 +200,7 @@ static int irq_poll_cpu_dead(unsigned in
this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(IRQ_POLL_SOFTIRQ);
local_irq_enable();
@@ -111,7 +111,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -2434,6 +2434,7 @@ static void __netif_reschedule(struct Qd
+@@ -2431,6 +2431,7 @@ static void __netif_reschedule(struct Qd
sd->output_queue_tailp = &q->next_sched;
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -119,7 +119,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
void __netif_schedule(struct Qdisc *q)
-@@ -2496,6 +2497,7 @@ void __dev_kfree_skb_irq(struct sk_buff
+@@ -2493,6 +2494,7 @@ void __dev_kfree_skb_irq(struct sk_buff
__this_cpu_write(softnet_data.completion_queue, skb);
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_restore(flags);
@@ -127,7 +127,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__dev_kfree_skb_irq);
-@@ -3859,6 +3861,7 @@ static int enqueue_to_backlog(struct sk_
+@@ -3855,6 +3857,7 @@ static int enqueue_to_backlog(struct sk_
rps_unlock(sd);
local_irq_restore(flags);
@@ -135,7 +135,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
atomic_long_inc(&skb->dev->rx_dropped);
kfree_skb(skb);
-@@ -5063,12 +5066,14 @@ static void net_rps_action_and_irq_enabl
+@@ -5104,12 +5107,14 @@ static void net_rps_action_and_irq_enabl
sd->rps_ipi_list = NULL;
local_irq_enable();
@@ -150,7 +150,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
-@@ -5146,6 +5151,7 @@ void __napi_schedule(struct napi_struct
+@@ -5187,6 +5192,7 @@ void __napi_schedule(struct napi_struct
local_irq_save(flags);
____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags);
@@ -158,7 +158,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(__napi_schedule);
-@@ -8339,6 +8345,7 @@ static int dev_cpu_dead(unsigned int old
+@@ -8391,6 +8397,7 @@ static int dev_cpu_dead(unsigned int old
raise_softirq_irqoff(NET_TX_SOFTIRQ);
local_irq_enable();
diff --git a/patches/softirq-split-locks.patch b/patches/softirq-split-locks.patch
index 693153a9d444..cecd34bdb6a8 100644
--- a/patches/softirq-split-locks.patch
+++ b/patches/softirq-split-locks.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/bottom_half.h
+++ b/include/linux/bottom_half.h
-@@ -3,6 +3,39 @@
+@@ -4,6 +4,39 @@
#include <linux/preempt.h>
@@ -76,7 +76,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_TRACE_IRQFLAGS
extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
#else
-@@ -30,5 +63,6 @@ static inline void local_bh_enable(void)
+@@ -31,5 +64,6 @@ static inline void local_bh_enable(void)
{
__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
}
@@ -85,7 +85,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif /* _LINUX_BH_H */
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -490,10 +490,11 @@ struct softirq_action
+@@ -492,10 +492,11 @@ struct softirq_action
void (*action)(struct softirq_action *);
};
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void do_softirq_own_stack(void);
#else
static inline void do_softirq_own_stack(void)
-@@ -501,6 +502,9 @@ static inline void do_softirq_own_stack(
+@@ -503,6 +504,9 @@ static inline void do_softirq_own_stack(
__do_softirq();
}
#endif
@@ -109,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
extern void open_softirq(int nr, void (*action)(struct softirq_action *));
extern void softirq_init(void);
-@@ -508,6 +512,7 @@ extern void __raise_softirq_irqoff(unsig
+@@ -510,6 +514,7 @@ extern void __raise_softirq_irqoff(unsig
extern void raise_softirq_irqoff(unsigned int nr);
extern void raise_softirq(unsigned int nr);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
-@@ -640,6 +645,12 @@ extern void tasklet_kill_immediate(struc
+@@ -642,6 +647,12 @@ extern void tasklet_kill_immediate(struc
extern void tasklet_init(struct tasklet_struct *t,
void (*func)(unsigned long), unsigned long data);
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
-@@ -50,7 +50,11 @@
+@@ -51,7 +51,11 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* We use the MSB mostly because its available */
#define PREEMPT_NEED_RESCHED 0x80000000
-@@ -80,9 +84,15 @@
+@@ -81,9 +85,15 @@
#include <asm/preempt.h>
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
@@ -162,7 +162,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Are we doing bottom half or hardware interrupt processing?
-@@ -100,7 +110,6 @@
+@@ -101,7 +111,6 @@
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
@@ -172,7 +172,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
(NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
-@@ -1085,6 +1085,8 @@ struct task_struct {
+@@ -1096,6 +1096,8 @@ struct task_struct {
#endif
#ifdef CONFIG_PREEMPT_RT_BASE
struct rcu_head put_rcu;
@@ -181,7 +181,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
unsigned long task_state_change;
-@@ -1272,6 +1274,7 @@ extern struct pid *cad_pid;
+@@ -1313,6 +1315,7 @@ extern struct pid *cad_pid;
/*
* Per process flags
*/
@@ -191,7 +191,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
--- a/init/main.c
+++ b/init/main.c
-@@ -538,6 +538,7 @@ asmlinkage __visible void __init start_k
+@@ -539,6 +539,7 @@ asmlinkage __visible void __init start_k
setup_command_line(command_line);
setup_nr_cpu_ids();
setup_per_cpu_areas();
@@ -816,7 +816,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3931,11 +3931,9 @@ int netif_rx_ni(struct sk_buff *skb)
+@@ -4061,11 +4061,9 @@ int netif_rx_ni(struct sk_buff *skb)
trace_netif_rx_ni_entry(skb);
diff --git a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
index 84f960733568..fadbc6829dd2 100644
--- a/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
+++ b/patches/sparc64-use-generic-rwsem-spinlocks-rt.patch
@@ -10,7 +10,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
-@@ -204,12 +204,10 @@ config NR_CPUS
+@@ -206,12 +206,10 @@ config NR_CPUS
source kernel/Kconfig.hz
config RWSEM_GENERIC_SPINLOCK
diff --git a/patches/srcu-Prohibit-call_srcu-use-under-raw-spinlocks.patch b/patches/srcu-Prohibit-call_srcu-use-under-raw-spinlocks.patch
index 0dd7519d2e33..44d638795424 100644
--- a/patches/srcu-Prohibit-call_srcu-use-under-raw-spinlocks.patch
+++ b/patches/srcu-Prohibit-call_srcu-use-under-raw-spinlocks.patch
@@ -1,8 +1,8 @@
From: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Date: Tue, 10 Oct 2017 13:52:30 -0700
-Subject: [PATCH] srcu: Prohibit call_srcu() use under raw spinlocks
+Subject: srcu: Prohibit call_srcu() use under raw spinlocks
-commit 7c4b15340e4e23668cb3cadbf4f76795ee495085
+Upstream commit 08265b8f1a139c1cff052b35ab7cf929528f88bb
Invoking queue_delayed_work() while holding a raw spinlock is forbidden
in -rt kernels, which is exactly what __call_srcu() does, indirectly via
@@ -12,11 +12,10 @@ is not ever called while holding a raw spinlock.
Reported-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
include/linux/srcutree.h | 8 +--
- kernel/rcu/srcutree.c | 118 ++++++++++++++++++++++++++++++-----------------
- 2 files changed, 81 insertions(+), 45 deletions(-)
+ kernel/rcu/srcutree.c | 109 +++++++++++++++++++++++++++++------------------
+ 2 files changed, 72 insertions(+), 45 deletions(-)
--- a/include/linux/srcutree.h
+++ b/include/linux/srcutree.h
@@ -47,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct mutex srcu_gp_mutex; /* Serialize GP work. */
unsigned int srcu_idx; /* Current rdr array element. */
unsigned long srcu_gp_seq; /* Grace-period seq #. */
-@@ -109,7 +109,7 @@ void process_srcu(struct work_struct *wo
+@@ -107,7 +107,7 @@ struct srcu_struct {
#define __SRCU_STRUCT_INIT(name, pcpu_name) \
{ \
.sda = &pcpu_name, \
@@ -58,9 +57,9 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
--- a/kernel/rcu/srcutree.c
+++ b/kernel/rcu/srcutree.c
-@@ -53,6 +53,42 @@ module_param(counter_wrap_check, ulong,
- static void srcu_invoke_callbacks(struct work_struct *work);
+@@ -54,6 +54,33 @@ static void srcu_invoke_callbacks(struct
static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
+ static void process_srcu(struct work_struct *work);
+/* Wrappers for lock acquisition and release, see raw_spin_lock_rcu_node(). */
+#define spin_lock_rcu_node(p) \
@@ -89,19 +88,10 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+#define spin_unlock_irqrestore_rcu_node(p, flags) \
+ spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
+
-+#define spin_trylock_rcu_node(p) \
-+({ \
-+ bool ___locked = spin_trylock(&ACCESS_PRIVATE(p, lock)); \
-+ \
-+ if (___locked) \
-+ smp_mb__after_unlock_lock(); \
-+ ___locked; \
-+})
-+
/*
* Initialize SRCU combining tree. Note that statically allocated
* srcu_struct structures might already have srcu_read_lock() and
-@@ -77,7 +113,7 @@ static void init_srcu_struct_nodes(struc
+@@ -78,7 +105,7 @@ static void init_srcu_struct_nodes(struc
/* Each pass through this loop initializes one srcu_node structure. */
rcu_for_each_node_breadth_first(sp, snp) {
@@ -110,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
ARRAY_SIZE(snp->srcu_data_have_cbs));
for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
-@@ -111,7 +147,7 @@ static void init_srcu_struct_nodes(struc
+@@ -112,7 +139,7 @@ static void init_srcu_struct_nodes(struc
snp_first = sp->level[level];
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
@@ -119,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_segcblist_init(&sdp->srcu_cblist);
sdp->srcu_cblist_invoking = false;
sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
-@@ -170,7 +206,7 @@ int __init_srcu_struct(struct srcu_struc
+@@ -171,7 +198,7 @@ int __init_srcu_struct(struct srcu_struc
/* Don't re-initialize a lock while it is held. */
debug_check_no_locks_freed((void *)sp, sizeof(*sp));
lockdep_init_map(&sp->dep_map, name, key, 0);
@@ -128,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(__init_srcu_struct);
-@@ -187,7 +223,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
+@@ -188,7 +215,7 @@ EXPORT_SYMBOL_GPL(__init_srcu_struct);
*/
int init_srcu_struct(struct srcu_struct *sp)
{
@@ -137,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return init_srcu_struct_fields(sp, false);
}
EXPORT_SYMBOL_GPL(init_srcu_struct);
-@@ -210,13 +246,13 @@ static void check_init_srcu_struct(struc
+@@ -211,13 +238,13 @@ static void check_init_srcu_struct(struc
/* The smp_load_acquire() pairs with the smp_store_release(). */
if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed))) /*^^^*/
return; /* Already initialized. */
@@ -154,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -498,7 +534,7 @@ static void srcu_gp_end(struct srcu_stru
+@@ -499,7 +526,7 @@ static void srcu_gp_end(struct srcu_stru
mutex_lock(&sp->srcu_cb_mutex);
/* End the current grace period. */
@@ -163,7 +153,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
idx = rcu_seq_state(sp->srcu_gp_seq);
WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
cbdelay = srcu_get_delay(sp);
-@@ -507,7 +543,7 @@ static void srcu_gp_end(struct srcu_stru
+@@ -508,7 +535,7 @@ static void srcu_gp_end(struct srcu_stru
gpseq = rcu_seq_current(&sp->srcu_gp_seq);
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
sp->srcu_gp_seq_needed_exp = gpseq;
@@ -172,7 +162,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
mutex_unlock(&sp->srcu_gp_mutex);
/* A new grace period can start at this point. But only one. */
-@@ -515,7 +551,7 @@ static void srcu_gp_end(struct srcu_stru
+@@ -516,7 +543,7 @@ static void srcu_gp_end(struct srcu_stru
idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
rcu_for_each_node_breadth_first(sp, snp) {
@@ -181,7 +171,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
cbs = false;
if (snp >= sp->level[rcu_num_lvls - 1])
cbs = snp->srcu_have_cbs[idx] == gpseq;
-@@ -525,7 +561,7 @@ static void srcu_gp_end(struct srcu_stru
+@@ -526,7 +553,7 @@ static void srcu_gp_end(struct srcu_stru
snp->srcu_gp_seq_needed_exp = gpseq;
mask = snp->srcu_data_have_cbs[idx];
snp->srcu_data_have_cbs[idx] = 0;
@@ -190,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (cbs)
srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
-@@ -533,11 +569,11 @@ static void srcu_gp_end(struct srcu_stru
+@@ -534,11 +561,11 @@ static void srcu_gp_end(struct srcu_stru
if (!(gpseq & counter_wrap_check))
for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
sdp = per_cpu_ptr(sp->sda, cpu);
@@ -204,7 +194,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -545,17 +581,17 @@ static void srcu_gp_end(struct srcu_stru
+@@ -546,17 +573,17 @@ static void srcu_gp_end(struct srcu_stru
mutex_unlock(&sp->srcu_cb_mutex);
/* Start a new grace period if needed. */
@@ -225,7 +215,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -575,18 +611,18 @@ static void srcu_funnel_exp_start(struct
+@@ -576,18 +603,18 @@ static void srcu_funnel_exp_start(struct
if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
return;
@@ -249,7 +239,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -608,12 +644,12 @@ static void srcu_funnel_gp_start(struct
+@@ -609,12 +636,12 @@ static void srcu_funnel_gp_start(struct
for (; snp != NULL; snp = snp->srcu_parent) {
if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
return; /* GP already done and CBs recorded. */
@@ -264,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (snp == sdp->mynode && snp_seq != s) {
srcu_schedule_cbs_sdp(sdp, do_norm
? SRCU_INTERVAL
-@@ -629,11 +665,11 @@ static void srcu_funnel_gp_start(struct
+@@ -630,11 +657,11 @@ static void srcu_funnel_gp_start(struct
snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
snp->srcu_gp_seq_needed_exp = s;
@@ -278,7 +268,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
/*
* Record need for grace period s. Pair with load
-@@ -652,7 +688,7 @@ static void srcu_funnel_gp_start(struct
+@@ -653,7 +680,7 @@ static void srcu_funnel_gp_start(struct
queue_delayed_work(system_power_efficient_wq, &sp->work,
srcu_get_delay(sp));
}
@@ -287,7 +277,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -815,7 +851,7 @@ void __call_srcu(struct srcu_struct *sp,
+@@ -816,7 +843,7 @@ void __call_srcu(struct srcu_struct *sp,
rhp->func = func;
local_irq_save(flags);
sdp = this_cpu_ptr(sp->sda);
@@ -296,7 +286,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
rcu_segcblist_advance(&sdp->srcu_cblist,
rcu_seq_current(&sp->srcu_gp_seq));
-@@ -829,7 +865,7 @@ void __call_srcu(struct srcu_struct *sp,
+@@ -830,7 +857,7 @@ void __call_srcu(struct srcu_struct *sp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
@@ -305,7 +295,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (needgp)
srcu_funnel_gp_start(sp, sdp, s, do_norm);
else if (needexp)
-@@ -885,7 +921,7 @@ static void __synchronize_srcu(struct sr
+@@ -886,7 +913,7 @@ static void __synchronize_srcu(struct sr
/*
* Make sure that later code is ordered after the SRCU grace
@@ -314,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* in srcu_invoke_callbacks(). Unlike Tree RCU, this is needed
* because the current CPU might have been totally uninvolved with
* (and thus unordered against) that grace period.
-@@ -1009,7 +1045,7 @@ void srcu_barrier(struct srcu_struct *sp
+@@ -1010,7 +1037,7 @@ void srcu_barrier(struct srcu_struct *sp
*/
for_each_possible_cpu(cpu) {
sdp = per_cpu_ptr(sp->sda, cpu);
@@ -323,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
atomic_inc(&sp->srcu_barrier_cpu_cnt);
sdp->srcu_barrier_head.func = srcu_barrier_cb;
debug_rcu_head_queue(&sdp->srcu_barrier_head);
-@@ -1018,7 +1054,7 @@ void srcu_barrier(struct srcu_struct *sp
+@@ -1019,7 +1046,7 @@ void srcu_barrier(struct srcu_struct *sp
debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
atomic_dec(&sp->srcu_barrier_cpu_cnt);
}
@@ -332,7 +322,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/* Remove the initial count, at which point reaching zero can happen. */
-@@ -1067,17 +1103,17 @@ static void srcu_advance_state(struct sr
+@@ -1068,17 +1095,17 @@ static void srcu_advance_state(struct sr
*/
idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq)); /* ^^^ */
if (idx == SRCU_STATE_IDLE) {
@@ -353,7 +343,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (idx != SRCU_STATE_IDLE) {
mutex_unlock(&sp->srcu_gp_mutex);
return; /* Someone else started the grace period. */
-@@ -1126,19 +1162,19 @@ static void srcu_invoke_callbacks(struct
+@@ -1127,19 +1154,19 @@ static void srcu_invoke_callbacks(struct
sdp = container_of(work, struct srcu_data, work.work);
sp = sdp->sp;
rcu_cblist_init(&ready_cbs);
@@ -376,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rhp = rcu_cblist_dequeue(&ready_cbs);
for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
debug_rcu_head_unqueue(rhp);
-@@ -1151,13 +1187,13 @@ static void srcu_invoke_callbacks(struct
+@@ -1152,13 +1179,13 @@ static void srcu_invoke_callbacks(struct
* Update counts, accelerate new callbacks, and if needed,
* schedule another round of callback invocation.
*/
@@ -392,7 +382,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (more)
srcu_schedule_cbs_sdp(sdp, 0);
}
-@@ -1170,7 +1206,7 @@ static void srcu_reschedule(struct srcu_
+@@ -1171,7 +1198,7 @@ static void srcu_reschedule(struct srcu_
{
bool pushgp = true;
@@ -401,7 +391,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
/* All requests fulfilled, time to go idle. */
-@@ -1180,7 +1216,7 @@ static void srcu_reschedule(struct srcu_
+@@ -1181,7 +1208,7 @@ static void srcu_reschedule(struct srcu_
/* Outstanding request and no GP. Start one. */
srcu_gp_start(sp);
}
diff --git a/patches/srcu-replace-local_irqsave-with-a-locallock.patch b/patches/srcu-replace-local_irqsave-with-a-locallock.patch
index 08e2eddef775..086c46783664 100644
--- a/patches/srcu-replace-local_irqsave-with-a-locallock.patch
+++ b/patches/srcu-replace-local_irqsave-with-a-locallock.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rcu.h"
#include "rcu_segcblist.h"
-@@ -757,6 +758,8 @@ static void srcu_flip(struct srcu_struct
+@@ -749,6 +750,8 @@ static void srcu_flip(struct srcu_struct
* negligible when amoritized over that time period, and the extra latency
* of a needlessly non-expedited grace period is similarly negligible.
*/
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static bool srcu_might_be_idle(struct srcu_struct *sp)
{
unsigned long curseq;
-@@ -765,13 +768,13 @@ static bool srcu_might_be_idle(struct sr
+@@ -757,13 +760,13 @@ static bool srcu_might_be_idle(struct sr
unsigned long t;
/* If the local srcu_data structure has callbacks, not idle. */
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* No local callbacks, so probabalistically probe global state.
-@@ -849,7 +852,7 @@ void __call_srcu(struct srcu_struct *sp,
+@@ -841,7 +844,7 @@ void __call_srcu(struct srcu_struct *sp,
return;
}
rhp->func = func;
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
sdp = this_cpu_ptr(sp->sda);
spin_lock_rcu_node(sdp);
rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
-@@ -865,7 +868,8 @@ void __call_srcu(struct srcu_struct *sp,
+@@ -857,7 +860,8 @@ void __call_srcu(struct srcu_struct *sp,
sdp->srcu_gp_seq_needed_exp = s;
needexp = true;
}
diff --git a/patches/srcu-use-cpu_online-instead-custom-check.patch b/patches/srcu-use-cpu_online-instead-custom-check.patch
index c143b790a031..2fe2b226e0cd 100644
--- a/patches/srcu-use-cpu_online-instead-custom-check.patch
+++ b/patches/srcu-use-cpu_online-instead-custom-check.patch
@@ -30,7 +30,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "rcu.h"
#include "rcu_segcblist.h"
-@@ -424,21 +425,6 @@ static void srcu_gp_start(struct srcu_st
+@@ -425,21 +426,6 @@ static void srcu_gp_start(struct srcu_st
}
/*
@@ -52,7 +52,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* Place the workqueue handler on the specified CPU if online, otherwise
* just run it whereever. This is useful for placing workqueue handlers
* that are to invoke the specified CPU's callbacks.
-@@ -449,12 +435,12 @@ static bool srcu_queue_delayed_work_on(i
+@@ -450,12 +436,12 @@ static bool srcu_queue_delayed_work_on(i
{
bool ret;
@@ -70,7 +70,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -3868,8 +3868,6 @@ int rcutree_online_cpu(unsigned int cpu)
+@@ -3775,8 +3775,6 @@ int rcutree_online_cpu(unsigned int cpu)
{
sync_sched_exp_online_cleanup(cpu);
rcutree_affinity_setting(cpu, -1);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -3880,8 +3878,6 @@ int rcutree_online_cpu(unsigned int cpu)
+@@ -3787,8 +3785,6 @@ int rcutree_online_cpu(unsigned int cpu)
int rcutree_offline_cpu(unsigned int cpu)
{
rcutree_affinity_setting(cpu, cpu);
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -4277,8 +4273,6 @@ void __init rcu_init(void)
+@@ -4236,8 +4232,6 @@ void __init rcu_init(void)
for_each_online_cpu(cpu) {
rcutree_prepare_cpu(cpu);
rcu_cpu_starting(cpu);
diff --git a/patches/suspend-prevernt-might-sleep-splats.patch b/patches/suspend-prevernt-might-sleep-splats.patch
index 6a2637911e99..4530d3697d3e 100644
--- a/patches/suspend-prevernt-might-sleep-splats.patch
+++ b/patches/suspend-prevernt-might-sleep-splats.patch
@@ -25,7 +25,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
-@@ -502,6 +502,7 @@ extern enum system_states {
+@@ -531,6 +531,7 @@ extern enum system_states {
SYSTEM_HALT,
SYSTEM_POWER_OFF,
SYSTEM_RESTART,
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Enable_cpus:
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
-@@ -407,6 +407,8 @@ static int suspend_enter(suspend_state_t
+@@ -428,6 +428,8 @@ static int suspend_enter(suspend_state_t
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
error = syscore_suspend();
if (!error) {
*wakeup = pm_wakeup_pending();
-@@ -423,6 +425,8 @@ static int suspend_enter(suspend_state_t
+@@ -444,6 +446,8 @@ static int suspend_enter(suspend_state_t
syscore_resume();
}
diff --git a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
index d1796546d29b..6cf372b7018a 100644
--- a/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
+++ b/patches/tasklet-rt-prevent-tasklets-from-going-into-infinite-spin-in-rt.patch
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
-@@ -529,8 +529,9 @@ static inline struct task_struct *this_c
+@@ -531,8 +531,9 @@ static inline struct task_struct *this_c
to be executed on some cpu at least once after this.
* If the tasklet is already scheduled, but its execution is still not
started, it will be executed only once.
@@ -55,7 +55,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* Tasklet is strictly serialized wrt itself, but not
wrt another tasklets. If client needs some intertask synchronization,
he makes it with spinlocks.
-@@ -555,27 +556,36 @@ struct tasklet_struct name = { NULL, 0,
+@@ -557,27 +558,36 @@ struct tasklet_struct name = { NULL, 0,
enum
{
TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif
-@@ -624,12 +634,7 @@ static inline void tasklet_disable(struc
+@@ -626,12 +636,7 @@ static inline void tasklet_disable(struc
smp_mb();
}
diff --git a/patches/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch b/patches/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
new file mode 100644
index 000000000000..01818c08a242
--- /dev/null
+++ b/patches/time-hrtimer-avoid-schedule_work-with-interrupts-dis.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 15 Nov 2017 17:29:51 +0100
+Subject: [PATCH] time/hrtimer: avoid schedule_work() with interrupts disabled
+
+The NOHZ code tries to schedule a workqueue with interrupts disabled.
+Since this does not work -RT I am switching it to swork instead.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ kernel/time/timer.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+--- a/kernel/time/timer.c
++++ b/kernel/time/timer.c
+@@ -217,8 +217,7 @@ static DEFINE_PER_CPU(struct timer_base,
+ static DEFINE_STATIC_KEY_FALSE(timers_nohz_active);
+ static DEFINE_MUTEX(timer_keys_mutex);
+
+-static void timer_update_keys(struct work_struct *work);
+-static DECLARE_WORK(timer_update_work, timer_update_keys);
++static struct swork_event timer_update_swork;
+
+ #ifdef CONFIG_SMP
+ unsigned int sysctl_timer_migration = 1;
+@@ -238,7 +237,7 @@ static void timers_update_migration(void
+ static inline void timers_update_migration(void) { }
+ #endif /* !CONFIG_SMP */
+
+-static void timer_update_keys(struct work_struct *work)
++static void timer_update_keys(struct swork_event *event)
+ {
+ mutex_lock(&timer_keys_mutex);
+ timers_update_migration();
+@@ -248,9 +247,17 @@ static void timer_update_keys(struct wor
+
+ void timers_update_nohz(void)
+ {
+- schedule_work(&timer_update_work);
++ swork_queue(&timer_update_swork);
+ }
+
++static __init int hrtimer_init_thread(void)
++{
++ WARN_ON(swork_get());
++ INIT_SWORK(&timer_update_swork, timer_update_keys);
++ return 0;
++}
++early_initcall(hrtimer_init_thread);
++
+ int timer_migration_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
diff --git a/patches/timekeeping-split-jiffies-lock.patch b/patches/timekeeping-split-jiffies-lock.patch
index 1f30043f19cd..e969fc59e8a7 100644
--- a/patches/timekeeping-split-jiffies-lock.patch
+++ b/patches/timekeeping-split-jiffies-lock.patch
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
--- a/kernel/time/timekeeping.h
+++ b/kernel/time/timekeeping.h
-@@ -17,7 +17,8 @@ extern void timekeeping_resume(void);
+@@ -18,7 +18,8 @@ extern void timekeeping_resume(void);
extern void do_timer(unsigned long ticks);
extern void update_wall_time(void);
diff --git a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
index 627ea936f4ef..35c47d387ff6 100644
--- a/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
+++ b/patches/timer-delay-waking-softirqs-from-the-jiffy-tick.patch
@@ -58,7 +58,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
-@@ -1620,13 +1620,13 @@ void update_process_times(int user_tick)
+@@ -1636,13 +1636,13 @@ void update_process_times(int user_tick)
/* Note: this timer irq context must be accounted for as well. */
account_process_tick(p, user_tick);
diff --git a/patches/timer-fd-avoid-live-lock.patch b/patches/timer-fd-avoid-live-lock.patch
index 40bc39571447..31a9b1b5ea4a 100644
--- a/patches/timer-fd-avoid-live-lock.patch
+++ b/patches/timer-fd-avoid-live-lock.patch
@@ -16,7 +16,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
-@@ -470,7 +470,10 @@ static int do_timerfd_settime(int ufd, i
+@@ -471,7 +471,10 @@ static int do_timerfd_settime(int ufd, i
break;
}
spin_unlock_irq(&ctx->wqh.lock);
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 6af63f6d67a5..115843b285fc 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -17,7 +17,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
-@@ -198,7 +198,7 @@ extern void add_timer(struct timer_list
+@@ -213,7 +213,7 @@ extern void add_timer(struct timer_list
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
unsigned long clk;
unsigned long next_expiry;
unsigned int cpu;
-@@ -1132,6 +1136,33 @@ void add_timer_on(struct timer_list *tim
+@@ -1148,6 +1152,33 @@ void add_timer_on(struct timer_list *tim
}
EXPORT_SYMBOL_GPL(add_timer_on);
@@ -108,7 +108,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer - deactivate a timer.
* @timer: the timer to be deactivated
-@@ -1187,7 +1218,7 @@ int try_to_del_timer_sync(struct timer_l
+@@ -1203,7 +1234,7 @@ int try_to_del_timer_sync(struct timer_l
}
EXPORT_SYMBOL(try_to_del_timer_sync);
@@ -117,7 +117,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
* del_timer_sync - deactivate a timer and wait for the handler to finish.
* @timer: the timer to be deactivated
-@@ -1247,7 +1278,7 @@ int del_timer_sync(struct timer_list *ti
+@@ -1263,7 +1294,7 @@ int del_timer_sync(struct timer_list *ti
int ret = try_to_del_timer_sync(timer);
if (ret >= 0)
return ret;
@@ -126,7 +126,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
EXPORT_SYMBOL(del_timer_sync);
-@@ -1311,13 +1342,16 @@ static void expire_timers(struct timer_b
+@@ -1327,13 +1358,16 @@ static void expire_timers(struct timer_b
fn = timer->function;
data = timer->data;
@@ -144,7 +144,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
raw_spin_lock_irq(&base->lock);
}
}
-@@ -1619,8 +1653,8 @@ static inline void __run_timers(struct t
+@@ -1635,8 +1669,8 @@ static inline void __run_timers(struct t
while (levels--)
expire_timers(base, heads + levels);
}
@@ -154,7 +154,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -1845,6 +1879,9 @@ static void __init init_timer_cpu(int cp
+@@ -1861,6 +1895,9 @@ static void __init init_timer_cpu(int cp
base->cpu = cpu;
raw_spin_lock_init(&base->lock);
base->clk = jiffies;
diff --git a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
index a0a8d090338a..d52134f1eec8 100644
--- a/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
+++ b/patches/tracing-account-for-preempt-off-in-preempt_schedule.patch
@@ -27,7 +27,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3571,7 +3571,16 @@ asmlinkage __visible void __sched notrac
+@@ -3605,7 +3605,16 @@ asmlinkage __visible void __sched notrac
* an infinite recursion.
*/
prev_ctx = exception_enter();
diff --git a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
index 2e55c765cd47..95da5239d942 100644
--- a/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
+++ b/patches/tty-serial-8250-don-t-take-the-trylock-during-oops.patch
@@ -14,7 +14,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
-@@ -3189,10 +3189,8 @@ void serial8250_console_write(struct uar
+@@ -3214,10 +3214,8 @@ void serial8250_console_write(struct uar
serial8250_rpm_get(up);
diff --git a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
index d0c0b2e7a2de..e588eb5a131b 100644
--- a/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
+++ b/patches/upstream-net-rt-remove-preemption-disabling-in-netif_rx.patch
@@ -37,7 +37,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/net/core/dev.c
+++ b/net/core/dev.c
-@@ -3885,7 +3885,7 @@ static int netif_rx_internal(struct sk_b
+@@ -4015,7 +4015,7 @@ static int netif_rx_internal(struct sk_b
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
@@ -46,7 +46,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
cpu = get_rps_cpu(skb->dev, skb, &rflow);
-@@ -3895,14 +3895,14 @@ static int netif_rx_internal(struct sk_b
+@@ -4025,14 +4025,14 @@ static int netif_rx_internal(struct sk_b
ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
rcu_read_unlock();
diff --git a/patches/wait.h-include-atomic.h.patch b/patches/wait.h-include-atomic.h.patch
index 4c101b8c3d6d..0a04f7859422 100644
--- a/patches/wait.h-include-atomic.h.patch
+++ b/patches/wait.h-include-atomic.h.patch
@@ -22,7 +22,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
-@@ -9,6 +9,7 @@
+@@ -10,6 +10,7 @@
#include <asm/current.h>
#include <uapi/linux/wait.h>
diff --git a/patches/work-queue-work-around-irqsafe-timer-optimization.patch b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
index ab1a5a18b7b1..d5dbe4de48b2 100644
--- a/patches/work-queue-work-around-irqsafe-timer-optimization.patch
+++ b/patches/work-queue-work-around-irqsafe-timer-optimization.patch
@@ -121,7 +121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "workqueue_internal.h"
-@@ -1281,7 +1282,7 @@ static int try_to_grab_pending(struct wo
+@@ -1282,7 +1283,7 @@ static int try_to_grab_pending(struct wo
local_unlock_irqrestore(pendingb_lock, *flags);
if (work_is_canceling(work))
return -ENOENT;
diff --git a/patches/work-simple-Simple-work-queue-implemenation.patch b/patches/work-simple-Simple-work-queue-implemenation.patch
index dbaa1ab13eb8..b73fc3929483 100644
--- a/patches/work-simple-Simple-work-queue-implemenation.patch
+++ b/patches/work-simple-Simple-work-queue-implemenation.patch
@@ -44,7 +44,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
+#endif /* _LINUX_SWORK_H */
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
-@@ -17,7 +17,7 @@ endif
+@@ -18,7 +18,7 @@ endif
obj-y += core.o loadavg.o clock.o cputime.o
obj-y += idle_task.o fair.o rt.o deadline.o
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index db5f6e22e052..dd5801c44141 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -31,7 +31,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1707,10 +1707,6 @@ static inline void ttwu_activate(struct
+@@ -1716,10 +1716,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -42,7 +42,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -2151,56 +2147,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2160,56 +2156,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3391,21 +3337,6 @@ static void __sched notrace __schedule(b
+@@ -3410,21 +3356,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -121,7 +121,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3466,6 +3397,14 @@ static inline void sched_submit_work(str
+@@ -3500,6 +3431,14 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -136,7 +136,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3474,6 +3413,12 @@ static inline void sched_submit_work(str
+@@ -3508,6 +3447,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -149,7 +149,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3484,6 +3429,7 @@ asmlinkage __visible void __sched schedu
+@@ -3518,6 +3463,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
@@ -159,7 +159,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -843,43 +843,32 @@ static void wake_up_worker(struct worker
+@@ -844,43 +844,32 @@ static void wake_up_worker(struct worker
}
/**
@@ -214,7 +214,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct worker_pool *pool;
/*
-@@ -888,13 +877,15 @@ struct task_struct *wq_worker_sleeping(s
+@@ -889,13 +878,15 @@ struct task_struct *wq_worker_sleeping(s
* checking NOT_RUNNING.
*/
if (worker->flags & WORKER_NOT_RUNNING)
@@ -234,7 +234,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The counterpart of the following dec_and_test, implied mb,
-@@ -908,9 +899,12 @@ struct task_struct *wq_worker_sleeping(s
+@@ -909,9 +900,12 @@ struct task_struct *wq_worker_sleeping(s
* lock is safe.
*/
if (atomic_dec_and_test(&pool->nr_running) &&
@@ -252,7 +252,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/**
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
-@@ -43,6 +43,7 @@ struct worker {
+@@ -45,6 +45,7 @@ struct worker {
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
int id; /* I: worker id */
@@ -260,7 +260,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Opaque string set with work_set_desc(). Printed out with task
-@@ -68,7 +69,7 @@ static inline struct worker *current_wq_
+@@ -70,7 +71,7 @@ static inline struct worker *current_wq_
* Scheduler hooks for concurrency managed workqueue. Only to be used from
* sched/core.c and workqueue.c.
*/
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index ea68e89ea3f3..057e521a3d8a 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3441,9 +3441,8 @@ void __noreturn do_task_dead(void)
+@@ -3475,9 +3475,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3451,6 +3450,10 @@ static inline void sched_submit_work(str
+@@ -3485,6 +3484,10 @@ static inline void sched_submit_work(str
if (tsk->flags & PF_WQ_WORKER)
wq_worker_sleeping(tsk);
@@ -67,7 +67,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
* make sure to submit it to avoid deadlocks.
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -123,6 +123,11 @@ enum {
+@@ -124,6 +124,11 @@ enum {
* cpu or grabbing pool->lock is enough for read access. If
* POOL_DISASSOCIATED is set, it's identical to L.
*
@@ -79,7 +79,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
* A: pool->attach_mutex protected.
*
* PL: wq_pool_mutex protected.
-@@ -430,6 +435,31 @@ static void workqueue_sysfs_unregister(s
+@@ -431,6 +436,31 @@ static void workqueue_sysfs_unregister(s
if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \
else
@@ -111,7 +111,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
#ifdef CONFIG_DEBUG_OBJECTS_WORK
static struct debug_obj_descr work_debug_descr;
-@@ -836,10 +866,16 @@ static struct worker *first_idle_worker(
+@@ -837,10 +867,16 @@ static struct worker *first_idle_worker(
*/
static void wake_up_worker(struct worker_pool *pool)
{
@@ -129,7 +129,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -868,7 +904,7 @@ void wq_worker_running(struct task_struc
+@@ -869,7 +905,7 @@ void wq_worker_running(struct task_struc
*/
void wq_worker_sleeping(struct task_struct *task)
{
@@ -138,7 +138,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
struct worker_pool *pool;
/*
-@@ -885,26 +921,18 @@ void wq_worker_sleeping(struct task_stru
+@@ -886,26 +922,18 @@ void wq_worker_sleeping(struct task_stru
return;
worker->sleeping = 1;
@@ -168,7 +168,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
/**
-@@ -1635,7 +1663,9 @@ static void worker_enter_idle(struct wor
+@@ -1636,7 +1664,9 @@ static void worker_enter_idle(struct wor
worker->last_active = jiffies;
/* idle_list is LIFO */
@@ -178,7 +178,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
if (too_many_workers(pool) && !timer_pending(&pool->idle_timer))
mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT);
-@@ -1668,7 +1698,9 @@ static void worker_leave_idle(struct wor
+@@ -1669,7 +1699,9 @@ static void worker_leave_idle(struct wor
return;
worker_clr_flags(worker, WORKER_IDLE);
pool->nr_idle--;
@@ -188,7 +188,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
}
static struct worker *alloc_worker(int node)
-@@ -1834,7 +1866,9 @@ static void destroy_worker(struct worker
+@@ -1835,7 +1867,9 @@ static void destroy_worker(struct worker
pool->nr_workers--;
pool->nr_idle--;
diff --git a/patches/workqueue-use-locallock.patch b/patches/workqueue-use-locallock.patch
index 8c798dc53dee..6683ec63c03c 100644
--- a/patches/workqueue-use-locallock.patch
+++ b/patches/workqueue-use-locallock.patch
@@ -20,7 +20,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#include "workqueue_internal.h"
-@@ -350,6 +351,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
+@@ -351,6 +352,8 @@ EXPORT_SYMBOL_GPL(system_power_efficient
struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly;
EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
@@ -29,7 +29,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static int worker_thread(void *__worker);
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
-@@ -1103,9 +1106,11 @@ static void put_pwq_unlocked(struct pool
+@@ -1104,9 +1107,11 @@ static void put_pwq_unlocked(struct pool
* As both pwqs and pools are RCU protected, the
* following lock operations are safe.
*/
@@ -43,7 +43,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
}
-@@ -1209,7 +1214,7 @@ static int try_to_grab_pending(struct wo
+@@ -1210,7 +1215,7 @@ static int try_to_grab_pending(struct wo
struct worker_pool *pool;
struct pool_workqueue *pwq;
@@ -52,7 +52,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* try to steal the timer if it exists */
if (is_dwork) {
-@@ -1273,7 +1278,7 @@ static int try_to_grab_pending(struct wo
+@@ -1274,7 +1279,7 @@ static int try_to_grab_pending(struct wo
spin_unlock(&pool->lock);
fail:
rcu_read_unlock();
@@ -61,7 +61,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (work_is_canceling(work))
return -ENOENT;
cpu_relax();
-@@ -1378,7 +1383,7 @@ static void __queue_work(int cpu, struct
+@@ -1379,7 +1384,7 @@ static void __queue_work(int cpu, struct
* queued or lose PENDING. Grabbing PENDING and queueing should
* happen with IRQ disabled.
*/
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
debug_work_activate(work);
-@@ -1484,14 +1489,14 @@ bool queue_work_on(int cpu, struct workq
+@@ -1485,14 +1490,14 @@ bool queue_work_on(int cpu, struct workq
bool ret = false;
unsigned long flags;
@@ -87,7 +87,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_work_on);
-@@ -1500,8 +1505,11 @@ void delayed_work_timer_fn(unsigned long
+@@ -1501,8 +1506,11 @@ void delayed_work_timer_fn(unsigned long
{
struct delayed_work *dwork = (struct delayed_work *)__data;
@@ -99,7 +99,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(delayed_work_timer_fn);
-@@ -1557,14 +1565,14 @@ bool queue_delayed_work_on(int cpu, stru
+@@ -1558,14 +1566,14 @@ bool queue_delayed_work_on(int cpu, stru
unsigned long flags;
/* read the comment in __queue_work() */
@@ -116,7 +116,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
EXPORT_SYMBOL(queue_delayed_work_on);
-@@ -1599,7 +1607,7 @@ bool mod_delayed_work_on(int cpu, struct
+@@ -1600,7 +1608,7 @@ bool mod_delayed_work_on(int cpu, struct
if (likely(ret >= 0)) {
__queue_delayed_work(cpu, wq, dwork, delay);
@@ -125,7 +125,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/* -ENOENT from try_to_grab_pending() becomes %true */
-@@ -2923,7 +2931,7 @@ static bool __cancel_work_timer(struct w
+@@ -2950,7 +2958,7 @@ static bool __cancel_work_timer(struct w
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -134,7 +134,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* This allows canceling during early boot. We know that @work
-@@ -2984,10 +2992,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
+@@ -3011,10 +3019,10 @@ EXPORT_SYMBOL_GPL(cancel_work_sync);
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -147,7 +147,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -3005,7 +3013,7 @@ static bool __cancel_work(struct work_st
+@@ -3032,7 +3040,7 @@ static bool __cancel_work(struct work_st
return false;
set_work_pool_and_clear_pending(work, get_work_pool_id(work));
diff --git a/patches/workqueue-use-rcu.patch b/patches/workqueue-use-rcu.patch
index fe6251b9ab49..2ba285dadb33 100644
--- a/patches/workqueue-use-rcu.patch
+++ b/patches/workqueue-use-rcu.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
-@@ -125,7 +125,7 @@ enum {
+@@ -126,7 +126,7 @@ enum {
*
* PL: wq_pool_mutex protected.
*
@@ -23,7 +23,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads.
*
-@@ -134,7 +134,7 @@ enum {
+@@ -135,7 +135,7 @@ enum {
*
* WQ: wq->mutex protected.
*
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* determined without grabbing wq->mutex.
*/
struct work_struct unbound_release_work;
-@@ -357,20 +357,20 @@ static void workqueue_sysfs_unregister(s
+@@ -358,20 +358,20 @@ static void workqueue_sysfs_unregister(s
#include <trace/events/workqueue.h>
#define assert_rcu_or_pool_mutex() \
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define for_each_cpu_worker_pool(pool, cpu) \
for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
-@@ -382,7 +382,7 @@ static void workqueue_sysfs_unregister(s
+@@ -383,7 +383,7 @@ static void workqueue_sysfs_unregister(s
* @pool: iteration cursor
* @pi: integer used for iteration
*
@@ -86,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* locked. If the pool needs to be used beyond the locking in effect, the
* caller is responsible for guaranteeing that the pool stays online.
*
-@@ -414,7 +414,7 @@ static void workqueue_sysfs_unregister(s
+@@ -415,7 +415,7 @@ static void workqueue_sysfs_unregister(s
* @pwq: iteration cursor
* @wq: the target workqueue
*
@@ -95,7 +95,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
*
-@@ -550,7 +550,7 @@ static int worker_pool_assign_id(struct
+@@ -551,7 +551,7 @@ static int worker_pool_assign_id(struct
* @wq: the target workqueue
* @node: the node ID
*
@@ -104,7 +104,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* read locked.
* If the pwq needs to be used beyond the locking in effect, the caller is
* responsible for guaranteeing that the pwq stays online.
-@@ -694,8 +694,8 @@ static struct pool_workqueue *get_work_p
+@@ -695,8 +695,8 @@ static struct pool_workqueue *get_work_p
* @work: the work item of interest
*
* Pools are created and destroyed under wq_pool_mutex, and allows read
@@ -115,7 +115,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
*
* All fields of the returned pool are accessible as long as the above
* mentioned locking is in effect. If the returned pool needs to be used
-@@ -1100,7 +1100,7 @@ static void put_pwq_unlocked(struct pool
+@@ -1101,7 +1101,7 @@ static void put_pwq_unlocked(struct pool
{
if (pwq) {
/*
@@ -124,7 +124,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* following lock operations are safe.
*/
spin_lock_irq(&pwq->pool->lock);
-@@ -1228,6 +1228,7 @@ static int try_to_grab_pending(struct wo
+@@ -1229,6 +1229,7 @@ static int try_to_grab_pending(struct wo
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
return 0;
@@ -132,7 +132,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The queueing is in progress, or it is already queued. Try to
* steal it from ->worklist without clearing WORK_STRUCT_PENDING.
-@@ -1266,10 +1267,12 @@ static int try_to_grab_pending(struct wo
+@@ -1267,10 +1268,12 @@ static int try_to_grab_pending(struct wo
set_work_pool_and_keep_pending(work, pool->id);
spin_unlock(&pool->lock);
@@ -145,7 +145,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
local_irq_restore(*flags);
if (work_is_canceling(work))
return -ENOENT;
-@@ -1383,6 +1386,7 @@ static void __queue_work(int cpu, struct
+@@ -1384,6 +1387,7 @@ static void __queue_work(int cpu, struct
if (unlikely(wq->flags & __WQ_DRAINING) &&
WARN_ON_ONCE(!is_chained_work(wq)))
return;
@@ -153,7 +153,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
retry:
if (req_cpu == WORK_CPU_UNBOUND)
cpu = wq_select_unbound_cpu(raw_smp_processor_id());
-@@ -1439,10 +1443,8 @@ static void __queue_work(int cpu, struct
+@@ -1440,10 +1444,8 @@ static void __queue_work(int cpu, struct
/* pwq determined, queue */
trace_workqueue_queue_work(req_cpu, pwq, work);
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pwq->nr_in_flight[pwq->work_color]++;
work_flags = work_color_to_flags(pwq->work_color);
-@@ -1460,7 +1462,9 @@ static void __queue_work(int cpu, struct
+@@ -1461,7 +1463,9 @@ static void __queue_work(int cpu, struct
insert_work(pwq, work, worklist, work_flags);
@@ -176,7 +176,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -2789,14 +2793,14 @@ static bool start_flush_work(struct work
+@@ -2814,14 +2818,14 @@ static bool start_flush_work(struct work
might_sleep();
@@ -194,10 +194,10 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* see the comment in try_to_grab_pending() with the same code */
pwq = get_work_pwq(work);
if (pwq) {
-@@ -2825,10 +2829,11 @@ static bool start_flush_work(struct work
- else
- lock_map_acquire_read(&pwq->wq->lockdep_map);
- lock_map_release(&pwq->wq->lockdep_map);
+@@ -2852,10 +2856,11 @@ static bool start_flush_work(struct work
+ lock_map_acquire(&pwq->wq->lockdep_map);
+ lock_map_release(&pwq->wq->lockdep_map);
+ }
-
+ rcu_read_unlock();
return true;
@@ -207,7 +207,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return false;
}
-@@ -3257,7 +3262,7 @@ static void rcu_free_pool(struct rcu_hea
+@@ -3283,7 +3288,7 @@ static void rcu_free_pool(struct rcu_hea
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -216,7 +216,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3311,8 +3316,8 @@ static void put_unbound_pool(struct work
+@@ -3337,8 +3342,8 @@ static void put_unbound_pool(struct work
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -227,7 +227,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -3419,14 +3424,14 @@ static void pwq_unbound_release_workfn(s
+@@ -3445,14 +3450,14 @@ static void pwq_unbound_release_workfn(s
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -244,7 +244,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/**
-@@ -4101,7 +4106,7 @@ void destroy_workqueue(struct workqueue_
+@@ -4127,7 +4132,7 @@ void destroy_workqueue(struct workqueue_
* The base ref is never dropped on per-cpu pwqs. Directly
* schedule RCU free.
*/
@@ -253,7 +253,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
} else {
/*
* We're the sole accessor of @wq at this point. Directly
-@@ -4195,7 +4200,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4221,7 +4226,8 @@ bool workqueue_congested(int cpu, struct
struct pool_workqueue *pwq;
bool ret;
@@ -263,7 +263,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4206,7 +4212,8 @@ bool workqueue_congested(int cpu, struct
+@@ -4232,7 +4238,8 @@ bool workqueue_congested(int cpu, struct
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -273,7 +273,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4232,15 +4239,15 @@ unsigned int work_busy(struct work_struc
+@@ -4258,15 +4265,15 @@ unsigned int work_busy(struct work_struc
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -293,7 +293,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -4429,7 +4436,7 @@ void show_workqueue_state(void)
+@@ -4455,7 +4462,7 @@ void show_workqueue_state(void)
unsigned long flags;
int pi;
@@ -302,7 +302,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pr_info("Showing busy workqueues and worker pools:\n");
-@@ -4482,7 +4489,7 @@ void show_workqueue_state(void)
+@@ -4508,7 +4515,7 @@ void show_workqueue_state(void)
spin_unlock_irqrestore(&pool->lock, flags);
}
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -4843,16 +4850,16 @@ bool freeze_workqueues_busy(void)
+@@ -4869,16 +4876,16 @@ bool freeze_workqueues_busy(void)
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -331,7 +331,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-@@ -5042,7 +5049,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5068,7 +5075,8 @@ static ssize_t wq_pool_ids_show(struct d
const char *delim = "";
int node, written = 0;
@@ -341,7 +341,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -5050,7 +5058,8 @@ static ssize_t wq_pool_ids_show(struct d
+@@ -5076,7 +5084,8 @@ static ssize_t wq_pool_ids_show(struct d
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
diff --git a/patches/x86-io-apic-migra-no-unmask.patch b/patches/x86-io-apic-migra-no-unmask.patch
index 30457f5b67fc..d8e91ba2781e 100644
--- a/patches/x86-io-apic-migra-no-unmask.patch
+++ b/patches/x86-io-apic-migra-no-unmask.patch
@@ -15,7 +15,7 @@ xXx
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
-@@ -1689,7 +1689,8 @@ static bool io_apic_level_ack_pending(st
+@@ -1690,7 +1690,8 @@ static bool io_apic_level_ack_pending(st
static inline bool ioapic_irqd_mask(struct irq_data *data)
{
/* If we are moving the irq we need to mask it */
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 76b7e5e2052f..60b3c1eca6aa 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6115,6 +6115,13 @@ int kvm_arch_init(void *opaque)
+@@ -6133,6 +6133,13 @@ int kvm_arch_init(void *opaque)
goto out;
}
diff --git a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
index 380795b0f85f..14962b4389a3 100644
--- a/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
+++ b/patches/x86-mce-use-swait-queue-for-mce-wakeups.patch
@@ -68,7 +68,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
#include "mce-internal.h"
-@@ -105,13 +106,43 @@ static void mce_do_trigger(struct work_s
+@@ -86,13 +87,43 @@ static void mce_do_trigger(struct work_s
static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
@@ -114,7 +114,7 @@ Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
static ssize_t
show_trigger(struct device *s, struct device_attribute *attr, char *buf)
{
-@@ -423,7 +454,7 @@ static __init int dev_mcelog_init_device
+@@ -356,7 +387,7 @@ static __init int dev_mcelog_init_device
return err;
}
diff --git a/patches/x86-preempt-lazy.patch b/patches/x86-preempt-lazy.patch
index 5bc941b09ed6..ea0903a04f69 100644
--- a/patches/x86-preempt-lazy.patch
+++ b/patches/x86-preempt-lazy.patch
@@ -18,16 +18,16 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -169,6 +169,7 @@ config X86
+ select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
+ select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select HAVE_REGS_AND_STACK_ACCESS_API
- select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER && STACK_VALIDATION
+ select HAVE_PREEMPT_LAZY
- select HAVE_STACK_VALIDATION if X86_64
- select HAVE_SYSCALL_TRACEPOINTS
- select HAVE_UNSTABLE_SCHED_CLOCK
+ select HAVE_RCU_TABLE_FREE
+ select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RELIABLE_STACKTRACE if X86_64 && FRAME_POINTER_UNWINDER && STACK_VALIDATION
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
-@@ -131,7 +131,7 @@ static long syscall_trace_enter(struct p
+@@ -132,7 +132,7 @@ static long syscall_trace_enter(struct p
#define EXIT_TO_USERMODE_LOOP_FLAGS \
(_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
{
-@@ -146,7 +146,7 @@ static void exit_to_usermode_loop(struct
+@@ -147,7 +147,7 @@ static void exit_to_usermode_loop(struct
/* We have work to do. */
local_irq_enable();
@@ -47,7 +47,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
-@@ -337,8 +337,25 @@ END(ret_from_exception)
+@@ -338,8 +338,25 @@ END(ret_from_exception)
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
.Lneed_resched:
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
call preempt_schedule_irq
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
-@@ -539,7 +539,23 @@ GLOBAL(retint_user)
+@@ -623,7 +623,23 @@ GLOBAL(retint_user)
bt $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
1:
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
-@@ -85,17 +85,46 @@ static __always_inline void __preempt_co
+@@ -86,17 +86,46 @@ static __always_inline void __preempt_co
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
*/
@@ -151,7 +151,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
-@@ -54,11 +54,14 @@ struct task_struct;
+@@ -55,11 +55,14 @@ struct task_struct;
struct thread_info {
unsigned long flags; /* low level flags */
@@ -166,7 +166,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
#define init_stack (init_thread_union.stack)
-@@ -67,6 +70,10 @@ struct thread_info {
+@@ -68,6 +71,10 @@ struct thread_info {
#include <asm/asm-offsets.h>
@@ -177,7 +177,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
/*
-@@ -82,6 +89,7 @@ struct thread_info {
+@@ -83,6 +90,7 @@ struct thread_info {
#define TIF_SYSCALL_EMU 6 /* syscall emulation active */
#define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
#define TIF_SECCOMP 8 /* secure computing */
@@ -185,7 +185,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */
#define TIF_UPROBE 12 /* breakpointed or singlestepping */
#define TIF_PATCH_PENDING 13 /* pending live patching update */
-@@ -107,6 +115,7 @@ struct thread_info {
+@@ -109,6 +117,7 @@ struct thread_info {
#define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
@@ -193,7 +193,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY)
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_PATCH_PENDING (1 << TIF_PATCH_PENDING)
-@@ -146,6 +155,8 @@ struct thread_info {
+@@ -150,6 +159,8 @@ struct thread_info {
#define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
#define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
@@ -204,7 +204,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/arch/x86/kernel/asm-offsets.c
+++ b/arch/x86/kernel/asm-offsets.c
-@@ -36,6 +36,7 @@ void common(void) {
+@@ -37,6 +37,7 @@ void common(void) {
BLANK();
OFFSET(TASK_TI_flags, task_struct, thread_info.flags);
@@ -212,7 +212,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
OFFSET(TASK_addr_limit, task_struct, thread.addr_limit);
BLANK();
-@@ -92,4 +93,5 @@ void common(void) {
+@@ -93,4 +94,5 @@ void common(void) {
BLANK();
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
diff --git a/patches/x86-signal-delay-calling-signals-on-32bit.patch b/patches/x86-signal-delay-calling-signals-on-32bit.patch
index f16a66c4687c..e41ae9db7df5 100644
--- a/patches/x86-signal-delay-calling-signals-on-32bit.patch
+++ b/patches/x86-signal-delay-calling-signals-on-32bit.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/include/asm/signal.h
+++ b/arch/x86/include/asm/signal.h
-@@ -36,7 +36,7 @@ typedef struct {
+@@ -37,7 +37,7 @@ typedef struct {
* TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
* trap.
*/
diff --git a/patches/x86-stackprot-no-random-on-rt.patch b/patches/x86-stackprot-no-random-on-rt.patch
index 7b423aeb0786..66c2ecf596d2 100644
--- a/patches/x86-stackprot-no-random-on-rt.patch
+++ b/patches/x86-stackprot-no-random-on-rt.patch
@@ -19,7 +19,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
-@@ -59,7 +59,7 @@
+@@ -60,7 +60,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
{
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
u64 tsc;
#ifdef CONFIG_X86_64
-@@ -70,8 +70,14 @@ static __always_inline void boot_init_st
+@@ -71,8 +71,14 @@ static __always_inline void boot_init_st
* of randomness. The TSC only matters for very early init,
* there it already has some randomness on most systems. Later
* on during the bootup the random pool has true entropy too.
diff --git a/patches/x86-use-gen-rwsem-spinlocks-rt.patch b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
index caf096ca41ed..06a17285bfdf 100644
--- a/patches/x86-use-gen-rwsem-spinlocks-rt.patch
+++ b/patches/x86-use-gen-rwsem-spinlocks-rt.patch
@@ -13,7 +13,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
-@@ -252,8 +252,11 @@ config ARCH_MAY_HAVE_PC_FDC
+@@ -255,8 +255,11 @@ config ARCH_MAY_HAVE_PC_FDC
def_bool y
depends on ISA_DMA_API