summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-04-12 21:56:41 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2019-04-12 21:56:41 +0200
commit09ad88b70b36b27afb90ad924aad26e079ace65b (patch)
tree8cfbd823d8b4b6b29e316c2716c10ab36ac2f0ef
parentd1dcc22198fd59ebb23f211c802cc0ff8443bb56 (diff)
downloadlinux-rt-09ad88b70b36b27afb90ad924aad26e079ace65b.tar.gz
[ANNOUNCE] v5.0.7-rt5v5.0.7-rt5-patches
Dear RT folks! I'm pleased to announce the v5.0.7-rt5 patch set. Changes since v5.0.7-rt4: - Update "x86: load FPU registers on return to userland" from v7 to v9. - Update "clocksource: improve Atmel TCB timer driver" from v7 to latest post by Alexandre Belloni. I hope this works, my HW refuses to cooperate so I can't verify. - Avoid allocating a spin lock with disabled interrupts in i915. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. - rcutorture is currently broken on -RT. Reported by Juri Lelli. The delta patch against v5.0.7-rt4 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/incr/patch-5.0.7-rt4-rt5.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.0.7-rt5 The RT patch against v5.0.7 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patch-5.0.7-rt5.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.0/older/patches-5.0.7-rt5.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/0001-ARM-at91-add-TCB-registers-definitions.patch202
-rw-r--r--patches/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch601
-rw-r--r--patches/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch2
-rw-r--r--patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch473
-rw-r--r--patches/0002-misc-atmel_tclib-drop-AVR32-support.patch33
-rw-r--r--patches/0002-x86-fpu-Remove-fpu__restore.patch2
-rw-r--r--patches/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch264
-rw-r--r--patches/0003-misc-atmel_tclib-move-definitions-to-header-file.patch85
-rw-r--r--patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch82
-rw-r--r--patches/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch2
-rw-r--r--patches/0004-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch241
-rw-r--r--patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch2
-rw-r--r--patches/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch2
-rw-r--r--patches/0005-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch73
-rw-r--r--patches/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch2
-rw-r--r--patches/0006-ARM-at91-Implement-clocksource-selection.patch (renamed from patches/0005-ARM-at91-Implement-clocksource-selection.patch)17
-rw-r--r--patches/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch34
-rw-r--r--patches/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch2
-rw-r--r--patches/0007-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch53
-rw-r--r--patches/0007-x86-fpu-Remove-fpu-initialized.patch2
-rw-r--r--patches/0008-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch (renamed from patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch)18
-rw-r--r--patches/0008-x86-fpu-Remove-user_fpu_begin.patch2
-rw-r--r--patches/0009-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch984
-rw-r--r--patches/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch2
-rw-r--r--patches/0010-ARM-configs-at91-unselect-PIT.patch (renamed from patches/0007-ARM-configs-at91-unselect-PIT.patch)4
-rw-r--r--patches/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch2
-rw-r--r--patches/0011-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch25
-rw-r--r--patches/0011-printk_safe-remove-printk-safe-code.patch4
-rw-r--r--patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch10
-rw-r--r--patches/0012-clocksource-drivers-timer-atmel-tcb-Use-ARRAY_SIZE-i.patch25
-rw-r--r--patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch77
-rw-r--r--patches/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch (renamed from patches/0012-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch)18
-rw-r--r--patches/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch (renamed from patches/0013-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch)2
-rw-r--r--patches/0015-x86-fpu-Eager-switch-PKRU-state.patch (renamed from patches/0014-x86-fpu-Eager-switch-PKRU-state.patch)56
-rw-r--r--patches/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch (renamed from patches/0015-x86-entry-Add-TIF_NEED_FPU_LOAD.patch)4
-rw-r--r--patches/0016-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch154
-rw-r--r--patches/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch70
-rw-r--r--patches/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch (renamed from patches/0017-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch)15
-rw-r--r--patches/0019-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch74
-rw-r--r--patches/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch (renamed from patches/0018-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch)2
-rw-r--r--patches/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch46
-rw-r--r--patches/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch (renamed from patches/0020-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch)98
-rw-r--r--patches/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch (renamed from patches/0021-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch)18
-rw-r--r--patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch (renamed from patches/0022-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch)39
-rw-r--r--patches/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch52
-rw-r--r--patches/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch78
-rw-r--r--patches/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch67
-rw-r--r--patches/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch73
-rw-r--r--patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch4
-rw-r--r--patches/add_migrate_disable.patch10
-rw-r--r--patches/arm-remove-printk_nmi_.patch2
-rw-r--r--patches/at91_dont_enable_disable_clock.patch18
-rw-r--r--patches/block-mq-drop-preempt-disable.patch2
-rw-r--r--patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch4
-rw-r--r--patches/cgroups-use-simple-wait-in-css_release.patch4
-rw-r--r--patches/clocksource-tclib-allow-higher-clockrates.patch80
-rw-r--r--patches/completion-use-simple-wait-queues.patch6
-rw-r--r--patches/cpu-hotplug--Implement-CPU-pinning.patch6
-rw-r--r--patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch2
-rw-r--r--patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch6
-rw-r--r--patches/drm-i915-Don-t-disable-interrupts-independently-of-t.patch43
-rw-r--r--patches/fs-aio-simple-simple-work.patch4
-rw-r--r--patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch2
-rw-r--r--patches/fs-replace-bh_uptodate_lock-for-rt.patch2
-rw-r--r--patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch6
-rw-r--r--patches/hotplug-light-get-online-cpus.patch4
-rw-r--r--patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch2
-rw-r--r--patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch2
-rw-r--r--patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch32
-rw-r--r--patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch2
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/lockdep-no-softirq-accounting-on-rt.patch4
-rw-r--r--patches/mm-enable-slub.patch4
-rw-r--r--patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch4
-rw-r--r--patches/mm-memcontrol-do_not_disable_irq.patch14
-rw-r--r--patches/mm-vmalloc-use-get-cpu-light.patch10
-rw-r--r--patches/peterz-percpu-rwsem-rt.patch22
-rw-r--r--patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch19
-rw-r--r--patches/powerpc-stackprotector-work-around-stack-guard-init-.patch9
-rw-r--r--patches/preempt-lazy-support.patch20
-rw-r--r--patches/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch6
-rw-r--r--patches/printk-only-allow-kernel-to-emergency-message.patch8
-rw-r--r--patches/printk-set-deferred-to-default-loglevel-enforce-mask.patch4
-rw-r--r--patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch6
-rw-r--r--patches/random-make-it-work-on-rt.patch2
-rw-r--r--patches/rcu-Eliminate-softirq-processing-from-rcutree.patch8
-rw-r--r--patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch2
-rw-r--r--patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch6
-rw-r--r--patches/rtmutex-add-sleeping-lock-implementation.patch4
-rw-r--r--patches/rtmutex-annotate-sleeping-lock-context.patch2
-rw-r--r--patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch2
-rw-r--r--patches/sched-might-sleep-do-not-account-rcu-depth.patch2
-rw-r--r--patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch2
-rw-r--r--patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch12
-rw-r--r--patches/sched-mmdrop-delayed.patch8
-rw-r--r--patches/sched-rt-mutex-wakeup.patch4
-rw-r--r--patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch2
-rw-r--r--patches/scsi-fcoe-rt-aware.patch4
-rw-r--r--patches/serial-8250-export-symbols-which-are-used-by-symbols.patch2
-rw-r--r--patches/series55
-rw-r--r--patches/srcu-Remove-srcu_queue_delayed_work_on.patch4
-rw-r--r--patches/timers-prepare-for-full-preemption.patch4
-rw-r--r--patches/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch9
-rw-r--r--patches/workqueue-distangle-from-rq-lock.patch12
-rw-r--r--patches/workqueue-prevent-deadlock-stall.patch4
-rw-r--r--patches/x86-kvm-require-const-tsc-for-rt.patch2
106 files changed, 3082 insertions, 1638 deletions
diff --git a/patches/0001-ARM-at91-add-TCB-registers-definitions.patch b/patches/0001-ARM-at91-add-TCB-registers-definitions.patch
deleted file mode 100644
index 547035daccad..000000000000
--- a/patches/0001-ARM-at91-add-TCB-registers-definitions.patch
+++ /dev/null
@@ -1,202 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:18 +0200
-Subject: [PATCH 1/7] ARM: at91: add TCB registers definitions
-
-Add registers and bits definitions for the timer counter blocks found on
-Atmel ARM SoCs.
-
-Tested-by: Alexander Dahl <ada@thorsis.com>
-Tested-by: Andras Szemzo <szemzo.andras@gmail.com>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- include/soc/at91/atmel_tcb.h | 183 +++++++++++++++++++++++++++++++++++++++++++
- 1 file changed, 183 insertions(+)
- create mode 100644 include/soc/at91/atmel_tcb.h
-
---- /dev/null
-+++ b/include/soc/at91/atmel_tcb.h
-@@ -0,0 +1,183 @@
-+//SPDX-License-Identifier: GPL-2.0
-+/* Copyright (C) 2018 Microchip */
-+
-+#ifndef __SOC_ATMEL_TCB_H
-+#define __SOC_ATMEL_TCB_H
-+
-+/* Channel registers */
-+#define ATMEL_TC_COFFS(c) ((c) * 0x40)
-+#define ATMEL_TC_CCR(c) ATMEL_TC_COFFS(c)
-+#define ATMEL_TC_CMR(c) (ATMEL_TC_COFFS(c) + 0x4)
-+#define ATMEL_TC_SMMR(c) (ATMEL_TC_COFFS(c) + 0x8)
-+#define ATMEL_TC_RAB(c) (ATMEL_TC_COFFS(c) + 0xc)
-+#define ATMEL_TC_CV(c) (ATMEL_TC_COFFS(c) + 0x10)
-+#define ATMEL_TC_RA(c) (ATMEL_TC_COFFS(c) + 0x14)
-+#define ATMEL_TC_RB(c) (ATMEL_TC_COFFS(c) + 0x18)
-+#define ATMEL_TC_RC(c) (ATMEL_TC_COFFS(c) + 0x1c)
-+#define ATMEL_TC_SR(c) (ATMEL_TC_COFFS(c) + 0x20)
-+#define ATMEL_TC_IER(c) (ATMEL_TC_COFFS(c) + 0x24)
-+#define ATMEL_TC_IDR(c) (ATMEL_TC_COFFS(c) + 0x28)
-+#define ATMEL_TC_IMR(c) (ATMEL_TC_COFFS(c) + 0x2c)
-+#define ATMEL_TC_EMR(c) (ATMEL_TC_COFFS(c) + 0x30)
-+
-+/* Block registers */
-+#define ATMEL_TC_BCR 0xc0
-+#define ATMEL_TC_BMR 0xc4
-+#define ATMEL_TC_QIER 0xc8
-+#define ATMEL_TC_QIDR 0xcc
-+#define ATMEL_TC_QIMR 0xd0
-+#define ATMEL_TC_QISR 0xd4
-+#define ATMEL_TC_FMR 0xd8
-+#define ATMEL_TC_WPMR 0xe4
-+
-+/* CCR fields */
-+#define ATMEL_TC_CCR_CLKEN BIT(0)
-+#define ATMEL_TC_CCR_CLKDIS BIT(1)
-+#define ATMEL_TC_CCR_SWTRG BIT(2)
-+
-+/* Common CMR fields */
-+#define ATMEL_TC_CMR_TCLKS_MSK GENMASK(2, 0)
-+#define ATMEL_TC_CMR_TCLK(x) (x)
-+#define ATMEL_TC_CMR_XC(x) ((x) + 5)
-+#define ATMEL_TC_CMR_CLKI BIT(3)
-+#define ATMEL_TC_CMR_BURST_MSK GENMASK(5, 4)
-+#define ATMEL_TC_CMR_BURST_XC(x) (((x) + 1) << 4)
-+#define ATMEL_TC_CMR_WAVE BIT(15)
-+
-+/* Capture mode CMR fields */
-+#define ATMEL_TC_CMR_LDBSTOP BIT(6)
-+#define ATMEL_TC_CMR_LDBDIS BIT(7)
-+#define ATMEL_TC_CMR_ETRGEDG_MSK GENMASK(9, 8)
-+#define ATMEL_TC_CMR_ETRGEDG_NONE (0 << 8)
-+#define ATMEL_TC_CMR_ETRGEDG_RISING (1 << 8)
-+#define ATMEL_TC_CMR_ETRGEDG_FALLING (2 << 8)
-+#define ATMEL_TC_CMR_ETRGEDG_BOTH (3 << 8)
-+#define ATMEL_TC_CMR_ABETRG BIT(10)
-+#define ATMEL_TC_CMR_CPCTRG BIT(14)
-+#define ATMEL_TC_CMR_LDRA_MSK GENMASK(17, 16)
-+#define ATMEL_TC_CMR_LDRA_NONE (0 << 16)
-+#define ATMEL_TC_CMR_LDRA_RISING (1 << 16)
-+#define ATMEL_TC_CMR_LDRA_FALLING (2 << 16)
-+#define ATMEL_TC_CMR_LDRA_BOTH (3 << 16)
-+#define ATMEL_TC_CMR_LDRB_MSK GENMASK(19, 18)
-+#define ATMEL_TC_CMR_LDRB_NONE (0 << 18)
-+#define ATMEL_TC_CMR_LDRB_RISING (1 << 18)
-+#define ATMEL_TC_CMR_LDRB_FALLING (2 << 18)
-+#define ATMEL_TC_CMR_LDRB_BOTH (3 << 18)
-+#define ATMEL_TC_CMR_SBSMPLR_MSK GENMASK(22, 20)
-+#define ATMEL_TC_CMR_SBSMPLR(x) ((x) << 20)
-+
-+/* Waveform mode CMR fields */
-+#define ATMEL_TC_CMR_CPCSTOP BIT(6)
-+#define ATMEL_TC_CMR_CPCDIS BIT(7)
-+#define ATMEL_TC_CMR_EEVTEDG_MSK GENMASK(9, 8)
-+#define ATMEL_TC_CMR_EEVTEDG_NONE (0 << 8)
-+#define ATMEL_TC_CMR_EEVTEDG_RISING (1 << 8)
-+#define ATMEL_TC_CMR_EEVTEDG_FALLING (2 << 8)
-+#define ATMEL_TC_CMR_EEVTEDG_BOTH (3 << 8)
-+#define ATMEL_TC_CMR_EEVT_MSK GENMASK(11, 10)
-+#define ATMEL_TC_CMR_EEVT_XC(x) (((x) + 1) << 10)
-+#define ATMEL_TC_CMR_ENETRG BIT(12)
-+#define ATMEL_TC_CMR_WAVESEL_MSK GENMASK(14, 13)
-+#define ATMEL_TC_CMR_WAVESEL_UP (0 << 13)
-+#define ATMEL_TC_CMR_WAVESEL_UPDOWN (1 << 13)
-+#define ATMEL_TC_CMR_WAVESEL_UPRC (2 << 13)
-+#define ATMEL_TC_CMR_WAVESEL_UPDOWNRC (3 << 13)
-+#define ATMEL_TC_CMR_ACPA_MSK GENMASK(17, 16)
-+#define ATMEL_TC_CMR_ACPA(a) (ATMEL_TC_CMR_ACTION_##a << 16)
-+#define ATMEL_TC_CMR_ACPC_MSK GENMASK(19, 18)
-+#define ATMEL_TC_CMR_ACPC(a) (ATMEL_TC_CMR_ACTION_##a << 18)
-+#define ATMEL_TC_CMR_AEEVT_MSK GENMASK(21, 20)
-+#define ATMEL_TC_CMR_AEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 20)
-+#define ATMEL_TC_CMR_ASWTRG_MSK GENMASK(23, 22)
-+#define ATMEL_TC_CMR_ASWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 22)
-+#define ATMEL_TC_CMR_BCPB_MSK GENMASK(25, 24)
-+#define ATMEL_TC_CMR_BCPB(a) (ATMEL_TC_CMR_ACTION_##a << 24)
-+#define ATMEL_TC_CMR_BCPC_MSK GENMASK(27, 26)
-+#define ATMEL_TC_CMR_BCPC(a) (ATMEL_TC_CMR_ACTION_##a << 26)
-+#define ATMEL_TC_CMR_BEEVT_MSK GENMASK(29, 28)
-+#define ATMEL_TC_CMR_BEEVT(a) (ATMEL_TC_CMR_ACTION_##a << 28)
-+#define ATMEL_TC_CMR_BSWTRG_MSK GENMASK(31, 30)
-+#define ATMEL_TC_CMR_BSWTRG(a) (ATMEL_TC_CMR_ACTION_##a << 30)
-+#define ATMEL_TC_CMR_ACTION_NONE 0
-+#define ATMEL_TC_CMR_ACTION_SET 1
-+#define ATMEL_TC_CMR_ACTION_CLEAR 2
-+#define ATMEL_TC_CMR_ACTION_TOGGLE 3
-+
-+/* SMMR fields */
-+#define ATMEL_TC_SMMR_GCEN BIT(0)
-+#define ATMEL_TC_SMMR_DOWN BIT(1)
-+
-+/* SR/IER/IDR/IMR fields */
-+#define ATMEL_TC_COVFS BIT(0)
-+#define ATMEL_TC_LOVRS BIT(1)
-+#define ATMEL_TC_CPAS BIT(2)
-+#define ATMEL_TC_CPBS BIT(3)
-+#define ATMEL_TC_CPCS BIT(4)
-+#define ATMEL_TC_LDRAS BIT(5)
-+#define ATMEL_TC_LDRBS BIT(6)
-+#define ATMEL_TC_ETRGS BIT(7)
-+#define ATMEL_TC_CLKSTA BIT(16)
-+#define ATMEL_TC_MTIOA BIT(17)
-+#define ATMEL_TC_MTIOB BIT(18)
-+
-+/* EMR fields */
-+#define ATMEL_TC_EMR_TRIGSRCA_MSK GENMASK(1, 0)
-+#define ATMEL_TC_EMR_TRIGSRCA_TIOA 0
-+#define ATMEL_TC_EMR_TRIGSRCA_PWMX 1
-+#define ATMEL_TC_EMR_TRIGSRCB_MSK GENMASK(5, 4)
-+#define ATMEL_TC_EMR_TRIGSRCB_TIOB (0 << 4)
-+#define ATMEL_TC_EMR_TRIGSRCB_PWM (1 << 4)
-+#define ATMEL_TC_EMR_NOCLKDIV BIT(8)
-+
-+/* BCR fields */
-+#define ATMEL_TC_BCR_SYNC BIT(0)
-+
-+/* BMR fields */
-+#define ATMEL_TC_BMR_TCXC_MSK(c) GENMASK(((c) * 2) + 1, (c) * 2)
-+#define ATMEL_TC_BMR_TCXC(x, c) ((x) << (2 * (c)))
-+#define ATMEL_TC_BMR_QDEN BIT(8)
-+#define ATMEL_TC_BMR_POSEN BIT(9)
-+#define ATMEL_TC_BMR_SPEEDEN BIT(10)
-+#define ATMEL_TC_BMR_QDTRANS BIT(11)
-+#define ATMEL_TC_BMR_EDGPHA BIT(12)
-+#define ATMEL_TC_BMR_INVA BIT(13)
-+#define ATMEL_TC_BMR_INVB BIT(14)
-+#define ATMEL_TC_BMR_INVIDX BIT(15)
-+#define ATMEL_TC_BMR_SWAP BIT(16)
-+#define ATMEL_TC_BMR_IDXPHB BIT(17)
-+#define ATMEL_TC_BMR_AUTOC BIT(18)
-+#define ATMEL_TC_MAXFILT_MSK GENMASK(25, 20)
-+#define ATMEL_TC_MAXFILT(x) (((x) - 1) << 20)
-+#define ATMEL_TC_MAXCMP_MSK GENMASK(29, 26)
-+#define ATMEL_TC_MAXCMP(x) ((x) << 26)
-+
-+/* QEDC fields */
-+#define ATMEL_TC_QEDC_IDX BIT(0)
-+#define ATMEL_TC_QEDC_DIRCHG BIT(1)
-+#define ATMEL_TC_QEDC_QERR BIT(2)
-+#define ATMEL_TC_QEDC_MPE BIT(3)
-+#define ATMEL_TC_QEDC_DIR BIT(8)
-+
-+/* FMR fields */
-+#define ATMEL_TC_FMR_ENCF(x) BIT(x)
-+
-+/* WPMR fields */
-+#define ATMEL_TC_WPMR_WPKEY (0x54494d << 8)
-+#define ATMEL_TC_WPMR_WPEN BIT(0)
-+
-+static const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
-+
-+static const struct of_device_id atmel_tcb_dt_ids[] = {
-+ {
-+ .compatible = "atmel,at91rm9200-tcb",
-+ .data = (void *)16,
-+ }, {
-+ .compatible = "atmel,at91sam9x5-tcb",
-+ .data = (void *)32,
-+ }, {
-+ /* sentinel */
-+ }
-+};
-+
-+#endif /* __SOC_ATMEL_TCB_H */
diff --git a/patches/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch b/patches/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
new file mode 100644
index 000000000000..64c8aefc54d8
--- /dev/null
+++ b/patches/0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
@@ -0,0 +1,601 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:09 +0200
+Subject: [PATCH 01/12] ARM: at91: move SoC specific definitions to SoC folder
+
+Move linux/atmel_tc.h to the SoC specific folder include/soc/at91.
+
+Cc: Thierry Reding <thierry.reding@gmail.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/tcb_clksrc.c | 2
+ drivers/misc/atmel_tclib.c | 2
+ drivers/pwm/pwm-atmel-tcb.c | 2
+ include/linux/atmel_tc.h | 270 ---------------------------------------
+ include/soc/at91/atmel_tcb.h | 270 +++++++++++++++++++++++++++++++++++++++
+ 5 files changed, 273 insertions(+), 273 deletions(-)
+ rename include/{linux/atmel_tc.h => soc/at91/atmel_tcb.h} (99%)
+
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -11,7 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+ #include <linux/syscore_ops.h>
+-#include <linux/atmel_tc.h>
++#include <soc/at91/atmel_tcb.h>
+
+
+ /*
+--- a/drivers/misc/atmel_tclib.c
++++ b/drivers/misc/atmel_tclib.c
+@@ -1,4 +1,3 @@
+-#include <linux/atmel_tc.h>
+ #include <linux/clk.h>
+ #include <linux/err.h>
+ #include <linux/init.h>
+@@ -10,6 +9,7 @@
+ #include <linux/slab.h>
+ #include <linux/export.h>
+ #include <linux/of.h>
++#include <soc/at91/atmel_tcb.h>
+
+ /*
+ * This is a thin library to solve the problem of how to portably allocate
+--- a/drivers/pwm/pwm-atmel-tcb.c
++++ b/drivers/pwm/pwm-atmel-tcb.c
+@@ -17,7 +17,7 @@
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+ #include <linux/platform_device.h>
+-#include <linux/atmel_tc.h>
++#include <soc/at91/atmel_tcb.h>
+ #include <linux/pwm.h>
+ #include <linux/of_device.h>
+ #include <linux/slab.h>
+--- a/include/linux/atmel_tc.h
++++ /dev/null
+@@ -1,270 +0,0 @@
+-/*
+- * Timer/Counter Unit (TC) registers.
+- *
+- * This program is free software; you can redistribute it and/or modify
+- * it under the terms of the GNU General Public License as published by
+- * the Free Software Foundation; either version 2 of the License, or
+- * (at your option) any later version.
+- */
+-
+-#ifndef ATMEL_TC_H
+-#define ATMEL_TC_H
+-
+-#include <linux/compiler.h>
+-#include <linux/list.h>
+-
+-/*
+- * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
+- * three general-purpose 16-bit timers. These timers share one register bank.
+- * Depending on the SOC, each timer may have its own clock and IRQ, or those
+- * may be shared by the whole TC block.
+- *
+- * These TC blocks may have up to nine external pins: TCLK0..2 signals for
+- * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
+- * or triggering. Those pins need to be set up for use with the TC block,
+- * else they will be used as GPIOs or for a different controller.
+- *
+- * Although we expect each TC block to have a platform_device node, those
+- * nodes are not what drivers bind to. Instead, they ask for a specific
+- * TC block, by number ... which is a common approach on systems with many
+- * timers. Then they use clk_get() and platform_get_irq() to get clock and
+- * IRQ resources.
+- */
+-
+-struct clk;
+-
+-/**
+- * struct atmel_tcb_config - SoC data for a Timer/Counter Block
+- * @counter_width: size in bits of a timer counter register
+- */
+-struct atmel_tcb_config {
+- size_t counter_width;
+-};
+-
+-/**
+- * struct atmel_tc - information about a Timer/Counter Block
+- * @pdev: physical device
+- * @regs: mapping through which the I/O registers can be accessed
+- * @id: block id
+- * @tcb_config: configuration data from SoC
+- * @irq: irq for each of the three channels
+- * @clk: internal clock source for each of the three channels
+- * @node: list node, for tclib internal use
+- * @allocated: if already used, for tclib internal use
+- *
+- * On some platforms, each TC channel has its own clocks and IRQs,
+- * while on others, all TC channels share the same clock and IRQ.
+- * Drivers should clk_enable() all the clocks they need even though
+- * all the entries in @clk may point to the same physical clock.
+- * Likewise, drivers should request irqs independently for each
+- * channel, but they must use IRQF_SHARED in case some of the entries
+- * in @irq are actually the same IRQ.
+- */
+-struct atmel_tc {
+- struct platform_device *pdev;
+- void __iomem *regs;
+- int id;
+- const struct atmel_tcb_config *tcb_config;
+- int irq[3];
+- struct clk *clk[3];
+- struct clk *slow_clk;
+- struct list_head node;
+- bool allocated;
+-};
+-
+-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
+-extern void atmel_tc_free(struct atmel_tc *tc);
+-
+-/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
+-extern const u8 atmel_tc_divisors[5];
+-
+-
+-/*
+- * Two registers have block-wide controls. These are: configuring the three
+- * "external" clocks (or event sources) used by the timer channels; and
+- * synchronizing the timers by resetting them all at once.
+- *
+- * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
+- * signals. Or, it can mean "external to timer", using the TIOA output from
+- * one of the other two timers that's being run in waveform mode.
+- */
+-
+-#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
+-#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
+-
+-#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
+-#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
+-#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
+-#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
+-#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
+-#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
+-#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
+-#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
+-#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
+-#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
+-#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
+-#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
+-#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
+-#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
+-#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
+-#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
+-
+-
+-/*
+- * Each TC block has three "channels", each with one counter and controls.
+- *
+- * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
+- * when it's not "external") is silicon-specific. AT91 platforms use one
+- * set of definitions; AVR32 platforms use a different set. Don't hard-wire
+- * such knowledge into your code, use the global "atmel_tc_divisors" ...
+- * where index N is the divisor for clock N+1, else zero to indicate it uses
+- * the 32 KiHz clock.
+- *
+- * The timers can be chained in various ways, and operated in "waveform"
+- * generation mode (including PWM) or "capture" mode (to time events). In
+- * both modes, behavior can be configured in many ways.
+- *
+- * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
+- * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
+- * uses them only as inputs.
+- */
+-#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
+-#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
+-
+-#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
+-#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
+-#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
+-#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
+-
+-#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
+-
+-/* Both modes share some CMR bits */
+-#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
+-#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
+-#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
+-#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
+-#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
+-#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
+-#define ATMEL_TC_XC0 (5 << 0)
+-#define ATMEL_TC_XC1 (6 << 0)
+-#define ATMEL_TC_XC2 (7 << 0)
+-#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
+-#define ATMEL_TC_BURST (3 << 4) /* clock gating */
+-#define ATMEL_TC_GATE_NONE (0 << 4)
+-#define ATMEL_TC_GATE_XC0 (1 << 4)
+-#define ATMEL_TC_GATE_XC1 (2 << 4)
+-#define ATMEL_TC_GATE_XC2 (3 << 4)
+-#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
+-
+-/* CAPTURE mode CMR bits */
+-#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
+-#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
+-#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
+-#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
+-#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
+-#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
+-#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
+-#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
+-#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
+-#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
+-#define ATMEL_TC_LDRA_NONE (0 << 16)
+-#define ATMEL_TC_LDRA_RISING (1 << 16)
+-#define ATMEL_TC_LDRA_FALLING (2 << 16)
+-#define ATMEL_TC_LDRA_BOTH (3 << 16)
+-#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
+-#define ATMEL_TC_LDRB_NONE (0 << 18)
+-#define ATMEL_TC_LDRB_RISING (1 << 18)
+-#define ATMEL_TC_LDRB_FALLING (2 << 18)
+-#define ATMEL_TC_LDRB_BOTH (3 << 18)
+-
+-/* WAVEFORM mode CMR bits */
+-#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
+-#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
+-#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
+-#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
+-#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
+-#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
+-#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
+-#define ATMEL_TC_EEVT (3 << 10) /* external event source */
+-#define ATMEL_TC_EEVT_TIOB (0 << 10)
+-#define ATMEL_TC_EEVT_XC0 (1 << 10)
+-#define ATMEL_TC_EEVT_XC1 (2 << 10)
+-#define ATMEL_TC_EEVT_XC2 (3 << 10)
+-#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
+-#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
+-#define ATMEL_TC_WAVESEL_UP (0 << 13)
+-#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
+-#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
+-#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
+-#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
+-#define ATMEL_TC_ACPA_NONE (0 << 16)
+-#define ATMEL_TC_ACPA_SET (1 << 16)
+-#define ATMEL_TC_ACPA_CLEAR (2 << 16)
+-#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
+-#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
+-#define ATMEL_TC_ACPC_NONE (0 << 18)
+-#define ATMEL_TC_ACPC_SET (1 << 18)
+-#define ATMEL_TC_ACPC_CLEAR (2 << 18)
+-#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
+-#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
+-#define ATMEL_TC_AEEVT_NONE (0 << 20)
+-#define ATMEL_TC_AEEVT_SET (1 << 20)
+-#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
+-#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
+-#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
+-#define ATMEL_TC_ASWTRG_NONE (0 << 22)
+-#define ATMEL_TC_ASWTRG_SET (1 << 22)
+-#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
+-#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
+-#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
+-#define ATMEL_TC_BCPB_NONE (0 << 24)
+-#define ATMEL_TC_BCPB_SET (1 << 24)
+-#define ATMEL_TC_BCPB_CLEAR (2 << 24)
+-#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
+-#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
+-#define ATMEL_TC_BCPC_NONE (0 << 26)
+-#define ATMEL_TC_BCPC_SET (1 << 26)
+-#define ATMEL_TC_BCPC_CLEAR (2 << 26)
+-#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
+-#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
+-#define ATMEL_TC_BEEVT_NONE (0 << 28)
+-#define ATMEL_TC_BEEVT_SET (1 << 28)
+-#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
+-#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
+-#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
+-#define ATMEL_TC_BSWTRG_NONE (0 << 30)
+-#define ATMEL_TC_BSWTRG_SET (1 << 30)
+-#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
+-#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
+-
+-#define ATMEL_TC_CV 0x10 /* counter Value */
+-#define ATMEL_TC_RA 0x14 /* register A */
+-#define ATMEL_TC_RB 0x18 /* register B */
+-#define ATMEL_TC_RC 0x1c /* register C */
+-
+-#define ATMEL_TC_SR 0x20 /* status (read-only) */
+-/* Status-only flags */
+-#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
+-#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
+-#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
+-
+-#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
+-#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
+-#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
+-
+-/* Status and IRQ flags */
+-#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
+-#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
+-#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
+-#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
+-#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
+-#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
+-#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
+-#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
+-#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
+- ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
+- ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
+- ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
+- /* all IRQs */
+-
+-#endif
+--- /dev/null
++++ b/include/soc/at91/atmel_tcb.h
+@@ -0,0 +1,270 @@
++/*
++ * Timer/Counter Unit (TC) registers.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ */
++
++#ifndef __SOC_ATMEL_TCB_H
++#define __SOC_ATMEL_TCB_H
++
++#include <linux/compiler.h>
++#include <linux/list.h>
++
++/*
++ * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds
++ * three general-purpose 16-bit timers. These timers share one register bank.
++ * Depending on the SOC, each timer may have its own clock and IRQ, or those
++ * may be shared by the whole TC block.
++ *
++ * These TC blocks may have up to nine external pins: TCLK0..2 signals for
++ * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM
++ * or triggering. Those pins need to be set up for use with the TC block,
++ * else they will be used as GPIOs or for a different controller.
++ *
++ * Although we expect each TC block to have a platform_device node, those
++ * nodes are not what drivers bind to. Instead, they ask for a specific
++ * TC block, by number ... which is a common approach on systems with many
++ * timers. Then they use clk_get() and platform_get_irq() to get clock and
++ * IRQ resources.
++ */
++
++struct clk;
++
++/**
++ * struct atmel_tcb_config - SoC data for a Timer/Counter Block
++ * @counter_width: size in bits of a timer counter register
++ */
++struct atmel_tcb_config {
++ size_t counter_width;
++};
++
++/**
++ * struct atmel_tc - information about a Timer/Counter Block
++ * @pdev: physical device
++ * @regs: mapping through which the I/O registers can be accessed
++ * @id: block id
++ * @tcb_config: configuration data from SoC
++ * @irq: irq for each of the three channels
++ * @clk: internal clock source for each of the three channels
++ * @node: list node, for tclib internal use
++ * @allocated: if already used, for tclib internal use
++ *
++ * On some platforms, each TC channel has its own clocks and IRQs,
++ * while on others, all TC channels share the same clock and IRQ.
++ * Drivers should clk_enable() all the clocks they need even though
++ * all the entries in @clk may point to the same physical clock.
++ * Likewise, drivers should request irqs independently for each
++ * channel, but they must use IRQF_SHARED in case some of the entries
++ * in @irq are actually the same IRQ.
++ */
++struct atmel_tc {
++ struct platform_device *pdev;
++ void __iomem *regs;
++ int id;
++ const struct atmel_tcb_config *tcb_config;
++ int irq[3];
++ struct clk *clk[3];
++ struct clk *slow_clk;
++ struct list_head node;
++ bool allocated;
++};
++
++extern struct atmel_tc *atmel_tc_alloc(unsigned block);
++extern void atmel_tc_free(struct atmel_tc *tc);
++
++/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
++extern const u8 atmel_tc_divisors[5];
++
++
++/*
++ * Two registers have block-wide controls. These are: configuring the three
++ * "external" clocks (or event sources) used by the timer channels; and
++ * synchronizing the timers by resetting them all at once.
++ *
++ * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2
++ * signals. Or, it can mean "external to timer", using the TIOA output from
++ * one of the other two timers that's being run in waveform mode.
++ */
++
++#define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */
++#define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */
++
++#define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */
++#define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */
++#define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0)
++#define ATMEL_TC_TC0XC0S_NONE (1 << 0)
++#define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0)
++#define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0)
++#define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */
++#define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2)
++#define ATMEL_TC_TC1XC1S_NONE (1 << 2)
++#define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2)
++#define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2)
++#define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */
++#define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4)
++#define ATMEL_TC_TC2XC2S_NONE (1 << 4)
++#define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4)
++#define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4)
++
++
++/*
++ * Each TC block has three "channels", each with one counter and controls.
++ *
++ * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection
++ * when it's not "external") is silicon-specific. AT91 platforms use one
++ * set of definitions; AVR32 platforms use a different set. Don't hard-wire
++ * such knowledge into your code, use the global "atmel_tc_divisors" ...
++ * where index N is the divisor for clock N+1, else zero to indicate it uses
++ * the 32 KiHz clock.
++ *
++ * The timers can be chained in various ways, and operated in "waveform"
++ * generation mode (including PWM) or "capture" mode (to time events). In
++ * both modes, behavior can be configured in many ways.
++ *
++ * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a
++ * PWM output, and TIOB as either another PWM or as a trigger. Capture mode
++ * uses them only as inputs.
++ */
++#define ATMEL_TC_CHAN(idx) ((idx)*0x40)
++#define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg)
++
++#define ATMEL_TC_CCR 0x00 /* Channel Control Register */
++#define ATMEL_TC_CLKEN (1 << 0) /* clock enable */
++#define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */
++#define ATMEL_TC_SWTRG (1 << 2) /* software trigger */
++
++#define ATMEL_TC_CMR 0x04 /* Channel Mode Register */
++
++/* Both modes share some CMR bits */
++#define ATMEL_TC_TCCLKS (7 << 0) /* clock source */
++#define ATMEL_TC_TIMER_CLOCK1 (0 << 0)
++#define ATMEL_TC_TIMER_CLOCK2 (1 << 0)
++#define ATMEL_TC_TIMER_CLOCK3 (2 << 0)
++#define ATMEL_TC_TIMER_CLOCK4 (3 << 0)
++#define ATMEL_TC_TIMER_CLOCK5 (4 << 0)
++#define ATMEL_TC_XC0 (5 << 0)
++#define ATMEL_TC_XC1 (6 << 0)
++#define ATMEL_TC_XC2 (7 << 0)
++#define ATMEL_TC_CLKI (1 << 3) /* clock invert */
++#define ATMEL_TC_BURST (3 << 4) /* clock gating */
++#define ATMEL_TC_GATE_NONE (0 << 4)
++#define ATMEL_TC_GATE_XC0 (1 << 4)
++#define ATMEL_TC_GATE_XC1 (2 << 4)
++#define ATMEL_TC_GATE_XC2 (3 << 4)
++#define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */
++
++/* CAPTURE mode CMR bits */
++#define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */
++#define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */
++#define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */
++#define ATMEL_TC_ETRGEDG_NONE (0 << 8)
++#define ATMEL_TC_ETRGEDG_RISING (1 << 8)
++#define ATMEL_TC_ETRGEDG_FALLING (2 << 8)
++#define ATMEL_TC_ETRGEDG_BOTH (3 << 8)
++#define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */
++#define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */
++#define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */
++#define ATMEL_TC_LDRA_NONE (0 << 16)
++#define ATMEL_TC_LDRA_RISING (1 << 16)
++#define ATMEL_TC_LDRA_FALLING (2 << 16)
++#define ATMEL_TC_LDRA_BOTH (3 << 16)
++#define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */
++#define ATMEL_TC_LDRB_NONE (0 << 18)
++#define ATMEL_TC_LDRB_RISING (1 << 18)
++#define ATMEL_TC_LDRB_FALLING (2 << 18)
++#define ATMEL_TC_LDRB_BOTH (3 << 18)
++
++/* WAVEFORM mode CMR bits */
++#define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */
++#define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */
++#define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */
++#define ATMEL_TC_EEVTEDG_NONE (0 << 8)
++#define ATMEL_TC_EEVTEDG_RISING (1 << 8)
++#define ATMEL_TC_EEVTEDG_FALLING (2 << 8)
++#define ATMEL_TC_EEVTEDG_BOTH (3 << 8)
++#define ATMEL_TC_EEVT (3 << 10) /* external event source */
++#define ATMEL_TC_EEVT_TIOB (0 << 10)
++#define ATMEL_TC_EEVT_XC0 (1 << 10)
++#define ATMEL_TC_EEVT_XC1 (2 << 10)
++#define ATMEL_TC_EEVT_XC2 (3 << 10)
++#define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */
++#define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */
++#define ATMEL_TC_WAVESEL_UP (0 << 13)
++#define ATMEL_TC_WAVESEL_UPDOWN (1 << 13)
++#define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13)
++#define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13)
++#define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */
++#define ATMEL_TC_ACPA_NONE (0 << 16)
++#define ATMEL_TC_ACPA_SET (1 << 16)
++#define ATMEL_TC_ACPA_CLEAR (2 << 16)
++#define ATMEL_TC_ACPA_TOGGLE (3 << 16)
++#define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */
++#define ATMEL_TC_ACPC_NONE (0 << 18)
++#define ATMEL_TC_ACPC_SET (1 << 18)
++#define ATMEL_TC_ACPC_CLEAR (2 << 18)
++#define ATMEL_TC_ACPC_TOGGLE (3 << 18)
++#define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */
++#define ATMEL_TC_AEEVT_NONE (0 << 20)
++#define ATMEL_TC_AEEVT_SET (1 << 20)
++#define ATMEL_TC_AEEVT_CLEAR (2 << 20)
++#define ATMEL_TC_AEEVT_TOGGLE (3 << 20)
++#define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */
++#define ATMEL_TC_ASWTRG_NONE (0 << 22)
++#define ATMEL_TC_ASWTRG_SET (1 << 22)
++#define ATMEL_TC_ASWTRG_CLEAR (2 << 22)
++#define ATMEL_TC_ASWTRG_TOGGLE (3 << 22)
++#define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */
++#define ATMEL_TC_BCPB_NONE (0 << 24)
++#define ATMEL_TC_BCPB_SET (1 << 24)
++#define ATMEL_TC_BCPB_CLEAR (2 << 24)
++#define ATMEL_TC_BCPB_TOGGLE (3 << 24)
++#define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */
++#define ATMEL_TC_BCPC_NONE (0 << 26)
++#define ATMEL_TC_BCPC_SET (1 << 26)
++#define ATMEL_TC_BCPC_CLEAR (2 << 26)
++#define ATMEL_TC_BCPC_TOGGLE (3 << 26)
++#define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */
++#define ATMEL_TC_BEEVT_NONE (0 << 28)
++#define ATMEL_TC_BEEVT_SET (1 << 28)
++#define ATMEL_TC_BEEVT_CLEAR (2 << 28)
++#define ATMEL_TC_BEEVT_TOGGLE (3 << 28)
++#define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */
++#define ATMEL_TC_BSWTRG_NONE (0 << 30)
++#define ATMEL_TC_BSWTRG_SET (1 << 30)
++#define ATMEL_TC_BSWTRG_CLEAR (2 << 30)
++#define ATMEL_TC_BSWTRG_TOGGLE (3 << 30)
++
++#define ATMEL_TC_CV 0x10 /* counter Value */
++#define ATMEL_TC_RA 0x14 /* register A */
++#define ATMEL_TC_RB 0x18 /* register B */
++#define ATMEL_TC_RC 0x1c /* register C */
++
++#define ATMEL_TC_SR 0x20 /* status (read-only) */
++/* Status-only flags */
++#define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */
++#define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */
++#define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */
++
++#define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */
++#define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */
++#define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */
++
++/* Status and IRQ flags */
++#define ATMEL_TC_COVFS (1 << 0) /* counter overflow */
++#define ATMEL_TC_LOVRS (1 << 1) /* load overrun */
++#define ATMEL_TC_CPAS (1 << 2) /* RA compare */
++#define ATMEL_TC_CPBS (1 << 3) /* RB compare */
++#define ATMEL_TC_CPCS (1 << 4) /* RC compare */
++#define ATMEL_TC_LDRAS (1 << 5) /* RA loading */
++#define ATMEL_TC_LDRBS (1 << 6) /* RB loading */
++#define ATMEL_TC_ETRGS (1 << 7) /* external trigger */
++#define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \
++ ATMEL_TC_CPAS | ATMEL_TC_CPBS | \
++ ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \
++ ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \
++ /* all IRQs */
++
++#endif
diff --git a/patches/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch b/patches/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
index 325b515a9f8a..1693d1c6ba87 100644
--- a/patches/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
+++ b/patches/0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 16 Oct 2018 11:08:14 +0200
-Subject: [PATCH 01/22] x86/fpu: Remove fpu->initialized usage in
+Subject: [PATCH 01/27] x86/fpu: Remove fpu->initialized usage in
__fpu__restore_sig()
This is a preparation for the removal of the ->initialized member in the
diff --git a/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch b/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch
deleted file mode 100644
index db0fa033192e..000000000000
--- a/patches/0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch
+++ /dev/null
@@ -1,473 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:19 +0200
-Subject: [PATCH 2/7] clocksource/drivers: Add a new driver for the Atmel ARM
- TC blocks
-
-Add a driver for the Atmel Timer Counter Blocks. This driver provides a
-clocksource and two clockevent devices.
-
-One of the clockevent device is linked to the clocksource counter and so it
-will run at the same frequency. This will be used when there is only on TCB
-channel available for timers.
-
-The other clockevent device runs on a separate TCB channel when available.
-
-This driver uses regmap and syscon to be able to probe early in the boot
-and avoid having to switch on the TCB clocksource later. Using regmap also
-means that unused TCB channels may be used by other drivers (PWM for
-example). read/writel are still used to access channel specific registers
-to avoid the performance impact of regmap (mainly locking).
-
-Tested-by: Alexander Dahl <ada@thorsis.com>
-Tested-by: Andras Szemzo <szemzo.andras@gmail.com>
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/Kconfig | 8
- drivers/clocksource/Makefile | 3
- drivers/clocksource/timer-atmel-tcb.c | 410 ++++++++++++++++++++++++++++++++++
- 3 files changed, 420 insertions(+), 1 deletion(-)
- create mode 100644 drivers/clocksource/timer-atmel-tcb.c
-
---- a/drivers/clocksource/Kconfig
-+++ b/drivers/clocksource/Kconfig
-@@ -399,6 +399,14 @@ config ATMEL_ST
- help
- Support for the Atmel ST timer.
-
-+config ATMEL_ARM_TCB_CLKSRC
-+ bool "Microchip ARM TC Block" if COMPILE_TEST
-+ select REGMAP_MMIO
-+ depends on GENERIC_CLOCKEVENTS
-+ help
-+ This enables build of clocksource and clockevent driver for
-+ the integrated Timer Counter Blocks in Microchip ARM SoCs.
-+
- config CLKSRC_EXYNOS_MCT
- bool "Exynos multi core timer driver" if COMPILE_TEST
- depends on ARM || ARM64
---- a/drivers/clocksource/Makefile
-+++ b/drivers/clocksource/Makefile
-@@ -3,7 +3,8 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
- obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
- obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
- obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
--obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
-+obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
-+obj-$(CONFIG_ATMEL_ARM_TCB_CLKSRC) += timer-atmel-tcb.o
- obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
- obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
- obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
---- /dev/null
-+++ b/drivers/clocksource/timer-atmel-tcb.c
-@@ -0,0 +1,410 @@
-+// SPDX-License-Identifier: GPL-2.0
-+#include <linux/clk.h>
-+#include <linux/clockchips.h>
-+#include <linux/clocksource.h>
-+#include <linux/interrupt.h>
-+#include <linux/kernel.h>
-+#include <linux/mfd/syscon.h>
-+#include <linux/of_address.h>
-+#include <linux/of_irq.h>
-+#include <linux/regmap.h>
-+#include <linux/sched_clock.h>
-+#include <soc/at91/atmel_tcb.h>
-+
-+struct atmel_tcb_clksrc {
-+ struct clocksource clksrc;
-+ struct clock_event_device clkevt;
-+ struct regmap *regmap;
-+ void __iomem *base;
-+ struct clk *clk[2];
-+ char name[20];
-+ int channels[2];
-+ int bits;
-+ int irq;
-+ struct {
-+ u32 cmr;
-+ u32 imr;
-+ u32 rc;
-+ bool clken;
-+ } cache[2];
-+ u32 bmr_cache;
-+ bool registered;
-+ bool clk_enabled;
-+};
-+
-+static struct atmel_tcb_clksrc tc;
-+
-+static struct clk *tcb_clk_get(struct device_node *node, int channel)
-+{
-+ struct clk *clk;
-+ char clk_name[] = "t0_clk";
-+
-+ clk_name[1] += channel;
-+ clk = of_clk_get_by_name(node->parent, clk_name);
-+ if (!IS_ERR(clk))
-+ return clk;
-+
-+ return of_clk_get_by_name(node->parent, "t0_clk");
-+}
-+
-+/*
-+ * Clocksource and clockevent using the same channel(s)
-+ */
-+static u64 tc_get_cycles(struct clocksource *cs)
-+{
-+ u32 lower, upper;
-+
-+ do {
-+ upper = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1]));
-+ lower = readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
-+ } while (upper != readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[1])));
-+
-+ return (upper << 16) | lower;
-+}
-+
-+static u64 tc_get_cycles32(struct clocksource *cs)
-+{
-+ return readl_relaxed(tc.base + ATMEL_TC_CV(tc.channels[0]));
-+}
-+
-+static u64 notrace tc_sched_clock_read(void)
-+{
-+ return tc_get_cycles(&tc.clksrc);
-+}
-+
-+static u64 notrace tc_sched_clock_read32(void)
-+{
-+ return tc_get_cycles32(&tc.clksrc);
-+}
-+
-+static int tcb_clkevt_next_event(unsigned long delta,
-+ struct clock_event_device *d)
-+{
-+ u32 old, next, cur;
-+
-+ old = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
-+ next = old + delta;
-+ writel(next, tc.base + ATMEL_TC_RC(tc.channels[0]));
-+ cur = readl(tc.base + ATMEL_TC_CV(tc.channels[0]));
-+
-+ /* check whether the delta elapsed while setting the register */
-+ if ((next < old && cur < old && cur > next) ||
-+ (next > old && (cur < old || cur > next))) {
-+ /*
-+ * Clear the CPCS bit in the status register to avoid
-+ * generating a spurious interrupt next time a valid
-+ * timer event is configured.
-+ */
-+ old = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
-+ return -ETIME;
-+ }
-+
-+ writel(ATMEL_TC_CPCS, tc.base + ATMEL_TC_IER(tc.channels[0]));
-+
-+ return 0;
-+}
-+
-+static irqreturn_t tc_clkevt_irq(int irq, void *handle)
-+{
-+ unsigned int sr;
-+
-+ sr = readl(tc.base + ATMEL_TC_SR(tc.channels[0]));
-+ if (sr & ATMEL_TC_CPCS) {
-+ tc.clkevt.event_handler(&tc.clkevt);
-+ return IRQ_HANDLED;
-+ }
-+
-+ return IRQ_NONE;
-+}
-+
-+static int tcb_clkevt_oneshot(struct clock_event_device *dev)
-+{
-+ if (clockevent_state_oneshot(dev))
-+ return 0;
-+
-+ /*
-+ * Because both clockevent devices may share the same IRQ, we don't want
-+ * the less likely one to stay requested
-+ */
-+ return request_irq(tc.irq, tc_clkevt_irq, IRQF_TIMER | IRQF_SHARED,
-+ tc.name, &tc);
-+}
-+
-+static int tcb_clkevt_shutdown(struct clock_event_device *dev)
-+{
-+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[0]));
-+ if (tc.bits == 16)
-+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[1]));
-+
-+ if (!clockevent_state_detached(dev))
-+ free_irq(tc.irq, &tc);
-+
-+ return 0;
-+}
-+
-+static void __init tcb_setup_dual_chan(struct atmel_tcb_clksrc *tc,
-+ int mck_divisor_idx)
-+{
-+ /* first channel: waveform mode, input mclk/8, clock TIOA on overflow */
-+ writel(mck_divisor_idx /* likely divide-by-8 */
-+ | ATMEL_TC_CMR_WAVE
-+ | ATMEL_TC_CMR_WAVESEL_UP /* free-run */
-+ | ATMEL_TC_CMR_ACPA(SET) /* TIOA rises at 0 */
-+ | ATMEL_TC_CMR_ACPC(CLEAR), /* (duty cycle 50%) */
-+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
-+ writel(0x0000, tc->base + ATMEL_TC_RA(tc->channels[0]));
-+ writel(0x8000, tc->base + ATMEL_TC_RC(tc->channels[0]));
-+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
-+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
-+
-+ /* second channel: waveform mode, input TIOA */
-+ writel(ATMEL_TC_CMR_XC(tc->channels[1]) /* input: TIOA */
-+ | ATMEL_TC_CMR_WAVE
-+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
-+ tc->base + ATMEL_TC_CMR(tc->channels[1]));
-+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[1])); /* no irqs */
-+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[1]));
-+
-+ /* chain both channel, we assume the previous channel */
-+ regmap_write(tc->regmap, ATMEL_TC_BMR,
-+ ATMEL_TC_BMR_TCXC(1 + tc->channels[1], tc->channels[1]));
-+ /* then reset all the timers */
-+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
-+}
-+
-+static void __init tcb_setup_single_chan(struct atmel_tcb_clksrc *tc,
-+ int mck_divisor_idx)
-+{
-+ /* channel 0: waveform mode, input mclk/8 */
-+ writel(mck_divisor_idx /* likely divide-by-8 */
-+ | ATMEL_TC_CMR_WAVE
-+ | ATMEL_TC_CMR_WAVESEL_UP, /* free-run */
-+ tc->base + ATMEL_TC_CMR(tc->channels[0]));
-+ writel(0xff, tc->base + ATMEL_TC_IDR(tc->channels[0])); /* no irqs */
-+ writel(ATMEL_TC_CCR_CLKEN, tc->base + ATMEL_TC_CCR(tc->channels[0]));
-+
-+ /* then reset all the timers */
-+ regmap_write(tc->regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
-+}
-+
-+static void tc_clksrc_suspend(struct clocksource *cs)
-+{
-+ int i;
-+
-+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
-+ tc.cache[i].cmr = readl(tc.base + ATMEL_TC_CMR(tc.channels[i]));
-+ tc.cache[i].imr = readl(tc.base + ATMEL_TC_IMR(tc.channels[i]));
-+ tc.cache[i].rc = readl(tc.base + ATMEL_TC_RC(tc.channels[i]));
-+ tc.cache[i].clken = !!(readl(tc.base +
-+ ATMEL_TC_SR(tc.channels[i])) &
-+ ATMEL_TC_CLKSTA);
-+ }
-+
-+ if (tc.bits == 16)
-+ regmap_read(tc.regmap, ATMEL_TC_BMR, &tc.bmr_cache);
-+}
-+
-+static void tc_clksrc_resume(struct clocksource *cs)
-+{
-+ int i;
-+
-+ for (i = 0; i < 1 + (tc.bits == 16); i++) {
-+ /* Restore registers for the channel, RA and RB are not used */
-+ writel(tc.cache[i].cmr, tc.base + ATMEL_TC_CMR(tc.channels[i]));
-+ writel(tc.cache[i].rc, tc.base + ATMEL_TC_RC(tc.channels[i]));
-+ writel(0, tc.base + ATMEL_TC_RA(tc.channels[i]));
-+ writel(0, tc.base + ATMEL_TC_RB(tc.channels[i]));
-+ /* Disable all the interrupts */
-+ writel(0xff, tc.base + ATMEL_TC_IDR(tc.channels[i]));
-+ /* Reenable interrupts that were enabled before suspending */
-+ writel(tc.cache[i].imr, tc.base + ATMEL_TC_IER(tc.channels[i]));
-+
-+ /* Start the clock if it was used */
-+ if (tc.cache[i].clken)
-+ writel(ATMEL_TC_CCR_CLKEN, tc.base +
-+ ATMEL_TC_CCR(tc.channels[i]));
-+ }
-+
-+ /* in case of dual channel, chain channels */
-+ if (tc.bits == 16)
-+ regmap_write(tc.regmap, ATMEL_TC_BMR, tc.bmr_cache);
-+ /* Finally, trigger all the channels*/
-+ regmap_write(tc.regmap, ATMEL_TC_BCR, ATMEL_TC_BCR_SYNC);
-+}
-+
-+static int __init tcb_clksrc_register(struct device_node *node,
-+ struct regmap *regmap, void __iomem *base,
-+ int channel, int channel1, int irq,
-+ int bits)
-+{
-+ u32 rate, divided_rate = 0;
-+ int best_divisor_idx = -1;
-+ int i, err = -1;
-+ u64 (*tc_sched_clock)(void);
-+
-+ tc.regmap = regmap;
-+ tc.base = base;
-+ tc.channels[0] = channel;
-+ tc.channels[1] = channel1;
-+ tc.irq = irq;
-+ tc.bits = bits;
-+
-+ tc.clk[0] = tcb_clk_get(node, tc.channels[0]);
-+ if (IS_ERR(tc.clk[0]))
-+ return PTR_ERR(tc.clk[0]);
-+ err = clk_prepare_enable(tc.clk[0]);
-+ if (err) {
-+ pr_debug("can't enable T0 clk\n");
-+ goto err_clk;
-+ }
-+
-+ /* How fast will we be counting? Pick something over 5 MHz. */
-+ rate = (u32)clk_get_rate(tc.clk[0]);
-+ for (i = 0; i < 5; i++) {
-+ unsigned int divisor = atmel_tc_divisors[i];
-+ unsigned int tmp;
-+
-+ if (!divisor)
-+ continue;
-+
-+ tmp = rate / divisor;
-+ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
-+ if (best_divisor_idx > 0) {
-+ if (tmp < 5 * 1000 * 1000)
-+ continue;
-+ }
-+ divided_rate = tmp;
-+ best_divisor_idx = i;
-+ }
-+
-+ if (tc.bits == 32) {
-+ tc.clksrc.read = tc_get_cycles32;
-+ tcb_setup_single_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read32;
-+ snprintf(tc.name, sizeof(tc.name), "%s:%d",
-+ kbasename(node->parent->full_name), tc.channels[0]);
-+ } else {
-+ tc.clk[1] = tcb_clk_get(node, tc.channels[1]);
-+ if (IS_ERR(tc.clk[1]))
-+ goto err_disable_t0;
-+
-+ err = clk_prepare_enable(tc.clk[1]);
-+ if (err) {
-+ pr_debug("can't enable T1 clk\n");
-+ goto err_clk1;
-+ }
-+ tc.clksrc.read = tc_get_cycles,
-+ tcb_setup_dual_chan(&tc, best_divisor_idx);
-+ tc_sched_clock = tc_sched_clock_read;
-+ snprintf(tc.name, sizeof(tc.name), "%s:%d,%d",
-+ kbasename(node->parent->full_name), tc.channels[0],
-+ tc.channels[1]);
-+ }
-+
-+ pr_debug("%s at %d.%03d MHz\n", tc.name,
-+ divided_rate / 1000000,
-+ ((divided_rate + 500000) % 1000000) / 1000);
-+
-+ tc.clksrc.name = tc.name;
-+ tc.clksrc.suspend = tc_clksrc_suspend;
-+ tc.clksrc.resume = tc_clksrc_resume;
-+ tc.clksrc.rating = 200;
-+ tc.clksrc.mask = CLOCKSOURCE_MASK(32);
-+ tc.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
-+
-+ err = clocksource_register_hz(&tc.clksrc, divided_rate);
-+ if (err)
-+ goto err_disable_t1;
-+
-+ sched_clock_register(tc_sched_clock, 32, divided_rate);
-+
-+ tc.registered = true;
-+
-+ /* Set up and register clockevents */
-+ tc.clkevt.name = tc.name;
-+ tc.clkevt.cpumask = cpumask_of(0);
-+ tc.clkevt.set_next_event = tcb_clkevt_next_event;
-+ tc.clkevt.set_state_oneshot = tcb_clkevt_oneshot;
-+ tc.clkevt.set_state_shutdown = tcb_clkevt_shutdown;
-+ tc.clkevt.features = CLOCK_EVT_FEAT_ONESHOT;
-+ tc.clkevt.rating = 125;
-+
-+ clockevents_config_and_register(&tc.clkevt, divided_rate, 1,
-+ BIT(tc.bits) - 1);
-+
-+ return 0;
-+
-+err_disable_t1:
-+ if (tc.bits == 16)
-+ clk_disable_unprepare(tc.clk[1]);
-+
-+err_clk1:
-+ if (tc.bits == 16)
-+ clk_put(tc.clk[1]);
-+
-+err_disable_t0:
-+ clk_disable_unprepare(tc.clk[0]);
-+
-+err_clk:
-+ clk_put(tc.clk[0]);
-+
-+ pr_err("%s: unable to register clocksource/clockevent\n",
-+ tc.clksrc.name);
-+
-+ return err;
-+}
-+
-+static int __init tcb_clksrc_init(struct device_node *node)
-+{
-+ const struct of_device_id *match;
-+ struct regmap *regmap;
-+ void __iomem *tcb_base;
-+ u32 channel;
-+ int irq, err, chan1 = -1;
-+ unsigned bits;
-+
-+ if (tc.registered)
-+ return -ENODEV;
-+
-+ /*
-+ * The regmap has to be used to access registers that are shared
-+ * between channels on the same TCB but we keep direct IO access for
-+ * the counters to avoid the impact on performance
-+ */
-+ regmap = syscon_node_to_regmap(node->parent);
-+ if (IS_ERR(regmap))
-+ return PTR_ERR(regmap);
-+
-+ tcb_base = of_iomap(node->parent, 0);
-+ if (!tcb_base) {
-+ pr_err("%s +%d %s\n", __FILE__, __LINE__, __func__);
-+ return -ENXIO;
-+ }
-+
-+ match = of_match_node(atmel_tcb_dt_ids, node->parent);
-+ bits = (uintptr_t)match->data;
-+
-+ err = of_property_read_u32_index(node, "reg", 0, &channel);
-+ if (err)
-+ return err;
-+
-+ irq = of_irq_get(node->parent, channel);
-+ if (irq < 0) {
-+ irq = of_irq_get(node->parent, 0);
-+ if (irq < 0)
-+ return irq;
-+ }
-+
-+ if (bits == 16) {
-+ of_property_read_u32_index(node, "reg", 1, &chan1);
-+ if (chan1 == -1) {
-+ pr_err("%s: clocksource needs two channels\n",
-+ node->parent->full_name);
-+ return -EINVAL;
-+ }
-+ }
-+
-+ return tcb_clksrc_register(node, regmap, tcb_base, channel, chan1, irq,
-+ bits);
-+}
-+TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/patches/0002-misc-atmel_tclib-drop-AVR32-support.patch b/patches/0002-misc-atmel_tclib-drop-AVR32-support.patch
new file mode 100644
index 000000000000..7958b43bd715
--- /dev/null
+++ b/patches/0002-misc-atmel_tclib-drop-AVR32-support.patch
@@ -0,0 +1,33 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:10 +0200
+Subject: [PATCH 02/12] misc: atmel_tclib: drop AVR32 support
+
+AVR32 is gone from the kernel, remove its support from tclib.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/atmel_tclib.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+--- a/drivers/misc/atmel_tclib.c
++++ b/drivers/misc/atmel_tclib.c
+@@ -17,18 +17,10 @@
+ * share individual timers between different drivers.
+ */
+
+-#if defined(CONFIG_AVR32)
+-/* AVR32 has these divide PBB */
+-const u8 atmel_tc_divisors[5] = { 0, 4, 8, 16, 32, };
+-EXPORT_SYMBOL(atmel_tc_divisors);
+-
+-#elif defined(CONFIG_ARCH_AT91)
+ /* AT91 has these divide MCK */
+ const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
+ EXPORT_SYMBOL(atmel_tc_divisors);
+
+-#endif
+-
+ static DEFINE_SPINLOCK(tc_list_lock);
+ static LIST_HEAD(tc_list);
+
diff --git a/patches/0002-x86-fpu-Remove-fpu__restore.patch b/patches/0002-x86-fpu-Remove-fpu__restore.patch
index c4a5d21b81c9..220bfeded59b 100644
--- a/patches/0002-x86-fpu-Remove-fpu__restore.patch
+++ b/patches/0002-x86-fpu-Remove-fpu__restore.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 17 Oct 2018 16:10:45 +0200
-Subject: [PATCH 02/22] x86/fpu: Remove fpu__restore()
+Subject: [PATCH 02/27] x86/fpu: Remove fpu__restore()
There are no users of fpu__restore() so it is time to remove it.
The comment regarding fpu__restore() and TS bit is stale since commit
diff --git a/patches/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch b/patches/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch
deleted file mode 100644
index 698988cbd0ba..000000000000
--- a/patches/0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch
+++ /dev/null
@@ -1,264 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:20 +0200
-Subject: [PATCH 3/7] clocksource/drivers: timer-atmel-tcb: add clockevent
- device on separate channel
-
-Add an other clockevent device that uses a separate TCB channel when
-available.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- drivers/clocksource/timer-atmel-tcb.c | 217 +++++++++++++++++++++++++++++++++-
- 1 file changed, 212 insertions(+), 5 deletions(-)
-
---- a/drivers/clocksource/timer-atmel-tcb.c
-+++ b/drivers/clocksource/timer-atmel-tcb.c
-@@ -32,7 +32,7 @@ struct atmel_tcb_clksrc {
- bool clk_enabled;
- };
-
--static struct atmel_tcb_clksrc tc;
-+static struct atmel_tcb_clksrc tc, tce;
-
- static struct clk *tcb_clk_get(struct device_node *node, int channel)
- {
-@@ -48,6 +48,203 @@ static struct clk *tcb_clk_get(struct de
- }
-
- /*
-+ * Clockevent device using its own channel
-+ */
-+
-+static void tc_clkevt2_clk_disable(struct clock_event_device *d)
-+{
-+ clk_disable(tce.clk[0]);
-+ tce.clk_enabled = false;
-+}
-+
-+static void tc_clkevt2_clk_enable(struct clock_event_device *d)
-+{
-+ if (tce.clk_enabled)
-+ return;
-+ clk_enable(tce.clk[0]);
-+ tce.clk_enabled = true;
-+}
-+
-+static int tc_clkevt2_stop(struct clock_event_device *d)
-+{
-+ writel(0xff, tce.base + ATMEL_TC_IDR(tce.channels[0]));
-+ writel(ATMEL_TC_CCR_CLKDIS, tce.base + ATMEL_TC_CCR(tce.channels[0]));
-+
-+ return 0;
-+}
-+
-+static int tc_clkevt2_shutdown(struct clock_event_device *d)
-+{
-+ tc_clkevt2_stop(d);
-+ if (!clockevent_state_detached(d))
-+ tc_clkevt2_clk_disable(d);
-+
-+ return 0;
-+}
-+
-+/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
-+ * because using one of the divided clocks would usually mean the
-+ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
-+ *
-+ * A divided clock could be good for high resolution timers, since
-+ * 30.5 usec resolution can seem "low".
-+ */
-+static int tc_clkevt2_set_oneshot(struct clock_event_device *d)
-+{
-+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-+ tc_clkevt2_stop(d);
-+
-+ tc_clkevt2_clk_enable(d);
-+
-+ /* slow clock, count up to RC, then irq and stop */
-+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_CPCSTOP |
-+ ATMEL_TC_CMR_WAVE | ATMEL_TC_CMR_WAVESEL_UPRC,
-+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
-+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
-+
-+ return 0;
-+}
-+
-+static int tc_clkevt2_set_periodic(struct clock_event_device *d)
-+{
-+ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
-+ tc_clkevt2_stop(d);
-+
-+ /* By not making the gentime core emulate periodic mode on top
-+ * of oneshot, we get lower overhead and improved accuracy.
-+ */
-+ tc_clkevt2_clk_enable(d);
-+
-+ /* slow clock, count up to RC, then irq and restart */
-+ writel(ATMEL_TC_CMR_TCLK(4) | ATMEL_TC_CMR_WAVE |
-+ ATMEL_TC_CMR_WAVESEL_UPRC,
-+ tce.base + ATMEL_TC_CMR(tce.channels[0]));
-+ writel((32768 + HZ / 2) / HZ, tce.base + ATMEL_TC_RC(tce.channels[0]));
-+
-+ /* Enable clock and interrupts on RC compare */
-+ writel(ATMEL_TC_CPCS, tce.base + ATMEL_TC_IER(tce.channels[0]));
-+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
-+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
-+
-+ return 0;
-+}
-+
-+static int tc_clkevt2_next_event(unsigned long delta,
-+ struct clock_event_device *d)
-+{
-+ writel(delta, tce.base + ATMEL_TC_RC(tce.channels[0]));
-+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
-+ tce.base + ATMEL_TC_CCR(tce.channels[0]));
-+
-+ return 0;
-+}
-+
-+static irqreturn_t tc_clkevt2_irq(int irq, void *handle)
-+{
-+ unsigned int sr;
-+
-+ sr = readl(tce.base + ATMEL_TC_SR(tce.channels[0]));
-+ if (sr & ATMEL_TC_CPCS) {
-+ tce.clkevt.event_handler(&tce.clkevt);
-+ return IRQ_HANDLED;
-+ }
-+
-+ return IRQ_NONE;
-+}
-+
-+static void tc_clkevt2_suspend(struct clock_event_device *d)
-+{
-+ tce.cache[0].cmr = readl(tce.base + ATMEL_TC_CMR(tce.channels[0]));
-+ tce.cache[0].imr = readl(tce.base + ATMEL_TC_IMR(tce.channels[0]));
-+ tce.cache[0].rc = readl(tce.base + ATMEL_TC_RC(tce.channels[0]));
-+ tce.cache[0].clken = !!(readl(tce.base + ATMEL_TC_SR(tce.channels[0])) &
-+ ATMEL_TC_CLKSTA);
-+}
-+
-+static void tc_clkevt2_resume(struct clock_event_device *d)
-+{
-+ /* Restore registers for the channel, RA and RB are not used */
-+ writel(tce.cache[0].cmr, tc.base + ATMEL_TC_CMR(tce.channels[0]));
-+ writel(tce.cache[0].rc, tc.base + ATMEL_TC_RC(tce.channels[0]));
-+ writel(0, tc.base + ATMEL_TC_RA(tce.channels[0]));
-+ writel(0, tc.base + ATMEL_TC_RB(tce.channels[0]));
-+ /* Disable all the interrupts */
-+ writel(0xff, tc.base + ATMEL_TC_IDR(tce.channels[0]));
-+ /* Reenable interrupts that were enabled before suspending */
-+ writel(tce.cache[0].imr, tc.base + ATMEL_TC_IER(tce.channels[0]));
-+
-+ /* Start the clock if it was used */
-+ if (tce.cache[0].clken)
-+ writel(ATMEL_TC_CCR_CLKEN | ATMEL_TC_CCR_SWTRG,
-+ tc.base + ATMEL_TC_CCR(tce.channels[0]));
-+}
-+
-+static int __init tc_clkevt_register(struct device_node *node,
-+ struct regmap *regmap, void __iomem *base,
-+ int channel, int irq, int bits)
-+{
-+ int ret;
-+ struct clk *slow_clk;
-+
-+ tce.regmap = regmap;
-+ tce.base = base;
-+ tce.channels[0] = channel;
-+ tce.irq = irq;
-+
-+ slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
-+ if (IS_ERR(slow_clk))
-+ return PTR_ERR(slow_clk);
-+
-+ ret = clk_prepare_enable(slow_clk);
-+ if (ret)
-+ return ret;
-+
-+ tce.clk[0] = tcb_clk_get(node, tce.channels[0]);
-+ if (IS_ERR(tce.clk[0])) {
-+ ret = PTR_ERR(tce.clk[0]);
-+ goto err_slow;
-+ }
-+
-+ snprintf(tce.name, sizeof(tce.name), "%s:%d",
-+ kbasename(node->parent->full_name), channel);
-+ tce.clkevt.cpumask = cpumask_of(0);
-+ tce.clkevt.name = tce.name;
-+ tce.clkevt.set_next_event = tc_clkevt2_next_event,
-+ tce.clkevt.set_state_shutdown = tc_clkevt2_shutdown,
-+ tce.clkevt.set_state_periodic = tc_clkevt2_set_periodic,
-+ tce.clkevt.set_state_oneshot = tc_clkevt2_set_oneshot,
-+ tce.clkevt.suspend = tc_clkevt2_suspend,
-+ tce.clkevt.resume = tc_clkevt2_resume,
-+ tce.clkevt.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
-+ tce.clkevt.rating = 140;
-+
-+ /* try to enable clk to avoid future errors in mode change */
-+ ret = clk_prepare_enable(tce.clk[0]);
-+ if (ret)
-+ goto err_slow;
-+ clk_disable(tce.clk[0]);
-+
-+ clockevents_config_and_register(&tce.clkevt, 32768, 1,
-+ CLOCKSOURCE_MASK(bits));
-+
-+ ret = request_irq(tce.irq, tc_clkevt2_irq, IRQF_TIMER | IRQF_SHARED,
-+ tce.clkevt.name, &tce);
-+ if (ret)
-+ goto err_clk;
-+
-+ tce.registered = true;
-+
-+ return 0;
-+
-+err_clk:
-+ clk_unprepare(tce.clk[0]);
-+err_slow:
-+ clk_disable_unprepare(slow_clk);
-+
-+ return ret;
-+}
-+
-+/*
- * Clocksource and clockevent using the same channel(s)
- */
- static u64 tc_get_cycles(struct clocksource *cs)
-@@ -363,7 +560,7 @@ static int __init tcb_clksrc_init(struct
- int irq, err, chan1 = -1;
- unsigned bits;
-
-- if (tc.registered)
-+ if (tc.registered && tce.registered)
- return -ENODEV;
-
- /*
-@@ -395,12 +592,22 @@ static int __init tcb_clksrc_init(struct
- return irq;
- }
-
-+ if (tc.registered)
-+ return tc_clkevt_register(node, regmap, tcb_base, channel, irq,
-+ bits);
-+
- if (bits == 16) {
- of_property_read_u32_index(node, "reg", 1, &chan1);
- if (chan1 == -1) {
-- pr_err("%s: clocksource needs two channels\n",
-- node->parent->full_name);
-- return -EINVAL;
-+ if (tce.registered) {
-+ pr_err("%s: clocksource needs two channels\n",
-+ node->parent->full_name);
-+ return -EINVAL;
-+ } else {
-+ return tc_clkevt_register(node, regmap,
-+ tcb_base, channel,
-+ irq, bits);
-+ }
- }
- }
-
diff --git a/patches/0003-misc-atmel_tclib-move-definitions-to-header-file.patch b/patches/0003-misc-atmel_tclib-move-definitions-to-header-file.patch
new file mode 100644
index 000000000000..b999c1147004
--- /dev/null
+++ b/patches/0003-misc-atmel_tclib-move-definitions-to-header-file.patch
@@ -0,0 +1,85 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:11 +0200
+Subject: [PATCH 03/12] misc: atmel_tclib: move definitions to header file
+
+Move atmel_tc_divisors and atmel_tcb_dt_ids definitions to the header file
+so they can be used without using tclib.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/atmel_tclib.c | 24 ------------------------
+ include/soc/at91/atmel_tcb.h | 21 ++++++++++++++++++++-
+ 2 files changed, 20 insertions(+), 25 deletions(-)
+
+--- a/drivers/misc/atmel_tclib.c
++++ b/drivers/misc/atmel_tclib.c
+@@ -17,10 +17,6 @@
+ * share individual timers between different drivers.
+ */
+
+-/* AT91 has these divide MCK */
+-const u8 atmel_tc_divisors[5] = { 2, 8, 32, 128, 0, };
+-EXPORT_SYMBOL(atmel_tc_divisors);
+-
+ static DEFINE_SPINLOCK(tc_list_lock);
+ static LIST_HEAD(tc_list);
+
+@@ -72,26 +68,6 @@ void atmel_tc_free(struct atmel_tc *tc)
+ EXPORT_SYMBOL_GPL(atmel_tc_free);
+
+ #if defined(CONFIG_OF)
+-static struct atmel_tcb_config tcb_rm9200_config = {
+- .counter_width = 16,
+-};
+-
+-static struct atmel_tcb_config tcb_sam9x5_config = {
+- .counter_width = 32,
+-};
+-
+-static const struct of_device_id atmel_tcb_dt_ids[] = {
+- {
+- .compatible = "atmel,at91rm9200-tcb",
+- .data = &tcb_rm9200_config,
+- }, {
+- .compatible = "atmel,at91sam9x5-tcb",
+- .data = &tcb_sam9x5_config,
+- }, {
+- /* sentinel */
+- }
+-};
+-
+ MODULE_DEVICE_TABLE(of, atmel_tcb_dt_ids);
+ #endif
+
+--- a/include/soc/at91/atmel_tcb.h
++++ b/include/soc/at91/atmel_tcb.h
+@@ -76,8 +76,27 @@ extern struct atmel_tc *atmel_tc_alloc(u
+ extern void atmel_tc_free(struct atmel_tc *tc);
+
+ /* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
+-extern const u8 atmel_tc_divisors[5];
++static const u8 atmel_tc_divisors[] = { 2, 8, 32, 128, 0, };
+
++static const struct atmel_tcb_config tcb_rm9200_config = {
++ .counter_width = 16,
++};
++
++static const struct atmel_tcb_config tcb_sam9x5_config = {
++ .counter_width = 32,
++};
++
++static const struct of_device_id atmel_tcb_dt_ids[] = {
++ {
++ .compatible = "atmel,at91rm9200-tcb",
++ .data = &tcb_rm9200_config,
++ }, {
++ .compatible = "atmel,at91sam9x5-tcb",
++ .data = &tcb_sam9x5_config,
++ }, {
++ /* sentinel */
++ }
++};
+
+ /*
+ * Two registers have block-wide controls. These are: configuring the three
diff --git a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
index 7cce9a754217..2579c1b52362 100644
--- a/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
+++ b/patches/0003-mm-SLxB-change-list_lock-to-raw_spinlock_t.patch
@@ -39,7 +39,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -717,7 +717,7 @@ static void __drain_alien_cache(struct k
+@@ -718,7 +718,7 @@ static void __drain_alien_cache(struct k
struct kmem_cache_node *n = get_node(cachep, node);
if (ac->avail) {
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Stuff objects into the remote nodes shared array first.
* That way we could avoid the overhead of putting the objects
-@@ -728,7 +728,7 @@ static void __drain_alien_cache(struct k
+@@ -729,7 +729,7 @@ static void __drain_alien_cache(struct k
free_block(cachep, ac->entry, ac->avail, node, list);
ac->avail = 0;
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -801,9 +801,9 @@ static int __cache_free_alien(struct kme
+@@ -802,9 +802,9 @@ static int __cache_free_alien(struct kme
slabs_destroy(cachep, &list);
} else {
n = get_node(cachep, page_node);
@@ -69,7 +69,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
return 1;
-@@ -844,10 +844,10 @@ static int init_cache_node(struct kmem_c
+@@ -845,10 +845,10 @@ static int init_cache_node(struct kmem_c
*/
n = get_node(cachep, node);
if (n) {
@@ -82,7 +82,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -926,7 +926,7 @@ static int setup_kmem_cache_node(struct
+@@ -927,7 +927,7 @@ static int setup_kmem_cache_node(struct
goto fail;
n = get_node(cachep, node);
@@ -91,7 +91,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared && force_change) {
free_block(cachep, n->shared->entry,
n->shared->avail, node, &list);
-@@ -944,7 +944,7 @@ static int setup_kmem_cache_node(struct
+@@ -945,7 +945,7 @@ static int setup_kmem_cache_node(struct
new_alien = NULL;
}
@@ -100,7 +100,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
/*
-@@ -983,7 +983,7 @@ static void cpuup_canceled(long cpu)
+@@ -984,7 +984,7 @@ static void cpuup_canceled(long cpu)
if (!n)
continue;
@@ -109,7 +109,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Free limit for this kmem_cache_node */
n->free_limit -= cachep->batchcount;
-@@ -996,7 +996,7 @@ static void cpuup_canceled(long cpu)
+@@ -997,7 +997,7 @@ static void cpuup_canceled(long cpu)
}
if (!cpumask_empty(mask)) {
@@ -118,7 +118,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto free_slab;
}
-@@ -1010,7 +1010,7 @@ static void cpuup_canceled(long cpu)
+@@ -1011,7 +1011,7 @@ static void cpuup_canceled(long cpu)
alien = n->alien;
n->alien = NULL;
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
kfree(shared);
if (alien) {
-@@ -1194,7 +1194,7 @@ static void __init init_list(struct kmem
+@@ -1195,7 +1195,7 @@ static void __init init_list(struct kmem
/*
* Do not assume that spinlocks can be initialized via memcpy:
*/
@@ -136,7 +136,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
MAKE_ALL_LISTS(cachep, ptr, nodeid);
cachep->node[nodeid] = ptr;
-@@ -1365,11 +1365,11 @@ slab_out_of_memory(struct kmem_cache *ca
+@@ -1366,11 +1366,11 @@ slab_out_of_memory(struct kmem_cache *ca
for_each_kmem_cache_node(cachep, node, n) {
unsigned long total_slabs, free_slabs, free_objs;
@@ -150,7 +150,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
pr_warn(" node %d: slabs: %ld/%ld, objs: %ld/%ld\n",
node, total_slabs - free_slabs, total_slabs,
-@@ -2162,7 +2162,7 @@ static void check_spinlock_acquired(stru
+@@ -2165,7 +2165,7 @@ static void check_spinlock_acquired(stru
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -159,7 +159,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2170,7 +2170,7 @@ static void check_spinlock_acquired_node
+@@ -2173,7 +2173,7 @@ static void check_spinlock_acquired_node
{
#ifdef CONFIG_SMP
check_irq_off();
@@ -168,7 +168,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
}
-@@ -2210,9 +2210,9 @@ static void do_drain(void *arg)
+@@ -2213,9 +2213,9 @@ static void do_drain(void *arg)
check_irq_off();
ac = cpu_cache_get(cachep);
n = get_node(cachep, node);
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail = 0;
}
-@@ -2230,9 +2230,9 @@ static void drain_cpu_caches(struct kmem
+@@ -2233,9 +2233,9 @@ static void drain_cpu_caches(struct kmem
drain_alien_cache(cachep, n->alien);
for_each_kmem_cache_node(cachep, node, n) {
@@ -192,7 +192,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -2254,10 +2254,10 @@ static int drain_freelist(struct kmem_ca
+@@ -2257,10 +2257,10 @@ static int drain_freelist(struct kmem_ca
nr_freed = 0;
while (nr_freed < tofree && !list_empty(&n->slabs_free)) {
@@ -205,7 +205,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
}
-@@ -2270,7 +2270,7 @@ static int drain_freelist(struct kmem_ca
+@@ -2273,7 +2273,7 @@ static int drain_freelist(struct kmem_ca
* to the cache.
*/
n->free_objects -= cache->num;
@@ -214,7 +214,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_destroy(cache, page);
nr_freed++;
}
-@@ -2725,7 +2725,7 @@ static void cache_grow_end(struct kmem_c
+@@ -2728,7 +2728,7 @@ static void cache_grow_end(struct kmem_c
INIT_LIST_HEAD(&page->lru);
n = get_node(cachep, page_to_nid(page));
@@ -223,7 +223,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
n->total_slabs++;
if (!page->active) {
list_add_tail(&page->lru, &(n->slabs_free));
-@@ -2735,7 +2735,7 @@ static void cache_grow_end(struct kmem_c
+@@ -2738,7 +2738,7 @@ static void cache_grow_end(struct kmem_c
STATS_INC_GROWN(cachep);
n->free_objects += cachep->num - page->active;
@@ -232,7 +232,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
}
-@@ -2903,7 +2903,7 @@ static struct page *get_first_slab(struc
+@@ -2906,7 +2906,7 @@ static struct page *get_first_slab(struc
{
struct page *page;
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
if (!page) {
n->free_touched = 1;
-@@ -2929,10 +2929,10 @@ static noinline void *cache_alloc_pfmema
+@@ -2932,10 +2932,10 @@ static noinline void *cache_alloc_pfmema
if (!gfp_pfmemalloc_allowed(flags))
return NULL;
@@ -254,7 +254,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return NULL;
}
-@@ -2941,7 +2941,7 @@ static noinline void *cache_alloc_pfmema
+@@ -2944,7 +2944,7 @@ static noinline void *cache_alloc_pfmema
fixup_slab_list(cachep, n, page, &list);
@@ -263,7 +263,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
return obj;
-@@ -3000,7 +3000,7 @@ static void *cache_alloc_refill(struct k
+@@ -3003,7 +3003,7 @@ static void *cache_alloc_refill(struct k
if (!n->free_objects && (!shared || !shared->avail))
goto direct_grow;
@@ -272,7 +272,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
shared = READ_ONCE(n->shared);
/* See if we can refill from the shared array */
-@@ -3024,7 +3024,7 @@ static void *cache_alloc_refill(struct k
+@@ -3027,7 +3027,7 @@ static void *cache_alloc_refill(struct k
must_grow:
n->free_objects -= ac->avail;
alloc_done:
@@ -281,7 +281,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
fixup_objfreelist_debug(cachep, &list);
direct_grow:
-@@ -3249,7 +3249,7 @@ static void *____cache_alloc_node(struct
+@@ -3252,7 +3252,7 @@ static void *____cache_alloc_node(struct
BUG_ON(!n);
check_irq_off();
@@ -290,7 +290,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = get_first_slab(n, false);
if (!page)
goto must_grow;
-@@ -3267,12 +3267,12 @@ static void *____cache_alloc_node(struct
+@@ -3270,12 +3270,12 @@ static void *____cache_alloc_node(struct
fixup_slab_list(cachep, n, page, &list);
@@ -305,7 +305,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
page = cache_grow_begin(cachep, gfp_exact_node(flags), nodeid);
if (page) {
/* This slab isn't counted yet so don't update free_objects */
-@@ -3448,7 +3448,7 @@ static void cache_flusharray(struct kmem
+@@ -3451,7 +3451,7 @@ static void cache_flusharray(struct kmem
check_irq_off();
n = get_node(cachep, node);
@@ -314,7 +314,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (n->shared) {
struct array_cache *shared_array = n->shared;
int max = shared_array->limit - shared_array->avail;
-@@ -3477,7 +3477,7 @@ static void cache_flusharray(struct kmem
+@@ -3480,7 +3480,7 @@ static void cache_flusharray(struct kmem
STATS_SET_FREEABLE(cachep, i);
}
#endif
@@ -323,7 +323,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
ac->avail -= batchcount;
memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
-@@ -3885,9 +3885,9 @@ static int __do_tune_cpucache(struct kme
+@@ -3888,9 +3888,9 @@ static int __do_tune_cpucache(struct kme
node = cpu_to_mem(cpu);
n = get_node(cachep, node);
@@ -335,7 +335,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
free_percpu(prev);
-@@ -4012,9 +4012,9 @@ static void drain_array(struct kmem_cach
+@@ -4015,9 +4015,9 @@ static void drain_array(struct kmem_cach
return;
}
@@ -347,7 +347,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slabs_destroy(cachep, &list);
}
-@@ -4098,7 +4098,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4101,7 +4101,7 @@ void get_slabinfo(struct kmem_cache *cac
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -356,7 +356,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
total_slabs += n->total_slabs;
free_slabs += n->free_slabs;
-@@ -4107,7 +4107,7 @@ void get_slabinfo(struct kmem_cache *cac
+@@ -4110,7 +4110,7 @@ void get_slabinfo(struct kmem_cache *cac
if (n->shared)
shared_avail += n->shared->avail;
@@ -365,7 +365,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
num_objs = total_slabs * cachep->num;
active_slabs = total_slabs - free_slabs;
-@@ -4322,13 +4322,13 @@ static int leaks_show(struct seq_file *m
+@@ -4325,13 +4325,13 @@ static int leaks_show(struct seq_file *m
for_each_kmem_cache_node(cachep, node, n) {
check_irq_on();
@@ -383,7 +383,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slab.h
+++ b/mm/slab.h
-@@ -452,7 +452,7 @@ static inline void slab_post_alloc_hook(
+@@ -453,7 +453,7 @@ static inline void slab_post_alloc_hook(
* The slab lists for all objects.
*/
struct kmem_cache_node {
@@ -537,7 +537,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
atomic_long_set(&n->nr_slabs, 0);
-@@ -3703,7 +3703,7 @@ static void free_partial(struct kmem_cac
+@@ -3706,7 +3706,7 @@ static void free_partial(struct kmem_cac
struct page *page, *h;
BUG_ON(irqs_disabled());
@@ -546,7 +546,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
remove_partial(n, page);
-@@ -3713,7 +3713,7 @@ static void free_partial(struct kmem_cac
+@@ -3716,7 +3716,7 @@ static void free_partial(struct kmem_cac
"Objects remaining in %s on __kmem_cache_shutdown()");
}
}
@@ -555,7 +555,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry_safe(page, h, &discard, lru)
discard_slab(s, page);
-@@ -3987,7 +3987,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -3990,7 +3990,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
INIT_LIST_HEAD(promote + i);
@@ -564,7 +564,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Build lists of slabs to discard or promote.
-@@ -4018,7 +4018,7 @@ int __kmem_cache_shrink(struct kmem_cach
+@@ -4021,7 +4021,7 @@ int __kmem_cache_shrink(struct kmem_cach
for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
list_splice(promote + i, &n->partial);
@@ -573,7 +573,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Release empty slabs */
list_for_each_entry_safe(page, t, &discard, lru)
-@@ -4432,7 +4432,7 @@ static int validate_slab_node(struct kme
+@@ -4435,7 +4435,7 @@ static int validate_slab_node(struct kme
struct page *page;
unsigned long flags;
@@ -582,7 +582,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
list_for_each_entry(page, &n->partial, lru) {
validate_slab_slab(s, page, map);
-@@ -4454,7 +4454,7 @@ static int validate_slab_node(struct kme
+@@ -4457,7 +4457,7 @@ static int validate_slab_node(struct kme
s->name, count, atomic_long_read(&n->nr_slabs));
out:
@@ -591,7 +591,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return count;
}
-@@ -4640,12 +4640,12 @@ static int list_locations(struct kmem_ca
+@@ -4643,12 +4643,12 @@ static int list_locations(struct kmem_ca
if (!atomic_long_read(&n->nr_slabs))
continue;
diff --git a/patches/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch b/patches/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
index 825381c053b5..5eada5b78908 100644
--- a/patches/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
+++ b/patches/0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 17 Oct 2018 14:58:28 +0200
-Subject: [PATCH 03/22] x86/fpu: Remove preempt_disable() in fpu__clear()
+Subject: [PATCH 03/27] x86/fpu: Remove preempt_disable() in fpu__clear()
The preempt_disable() section was introduced in commit
diff --git a/patches/0004-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch b/patches/0004-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
new file mode 100644
index 000000000000..e1db641b3b57
--- /dev/null
+++ b/patches/0004-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
@@ -0,0 +1,241 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:12 +0200
+Subject: [PATCH 04/12] clocksource/drivers/tcb_clksrc: stop depending on
+ atmel_tclib
+
+atmel_tclib is probed too late in the boot process to be able to use the
+TCB as the boot clocksource. This is an issue for SoCs without the PIT
+(sams70, samv70 and samv71 families) as they simply currently can't boot.
+
+Get rid of the atmel_tclib dependency and probe everything on our own using
+the correct device tree binding.
+
+This also allows getting rid of ATMEL_TCB_CLKSRC_BLOCK and makes the driver
+a bit more flexible as the TCB is not hardcoded in the kernel anymore.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/tcb_clksrc.c | 103 ++++++++++++++++++++++++---------------
+ drivers/misc/Kconfig | 14 -----
+ 2 files changed, 66 insertions(+), 51 deletions(-)
+
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -9,7 +9,8 @@
+ #include <linux/err.h>
+ #include <linux/ioport.h>
+ #include <linux/io.h>
+-#include <linux/platform_device.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
+ #include <linux/syscore_ops.h>
+ #include <soc/at91/atmel_tcb.h>
+
+@@ -28,13 +29,6 @@
+ * source, used in either periodic or oneshot mode. This runs
+ * at 32 KiHZ, and can handle delays of up to two seconds.
+ *
+- * A boot clocksource and clockevent source are also currently needed,
+- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
+- * this code can be used when init_timers() is called, well before most
+- * devices are set up. (Some low end AT91 parts, which can run uClinux,
+- * have only the timers in one TC block... they currently don't support
+- * the tclib code, because of that initialization issue.)
+- *
+ * REVISIT behavior during system suspend states... we should disable
+ * all clocks and save the power. Easily done for clockevent devices,
+ * but clocksources won't necessarily get the needed notifications.
+@@ -112,7 +106,6 @@ void tc_clksrc_resume(struct clocksource
+ }
+
+ static struct clocksource clksrc = {
+- .name = "tcb_clksrc",
+ .rating = 200,
+ .read = tc_get_cycles,
+ .mask = CLOCKSOURCE_MASK(32),
+@@ -214,7 +207,6 @@ static int tc_next_event(unsigned long d
+
+ static struct tc_clkevt_device clkevt = {
+ .clkevt = {
+- .name = "tc_clkevt",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ /* Should be lower than at91rm9200's system timer */
+@@ -330,33 +322,64 @@ static void __init tcb_setup_single_chan
+ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+ }
+
+-static int __init tcb_clksrc_init(void)
++static int __init tcb_clksrc_init(struct device_node *node)
+ {
+- static char bootinfo[] __initdata
+- = KERN_DEBUG "%s: tc%d at %d.%03d MHz\n";
+-
+- struct platform_device *pdev;
+- struct atmel_tc *tc;
++ struct atmel_tc tc;
+ struct clk *t0_clk;
++ const struct of_device_id *match;
++ int irq;
+ u32 rate, divided_rate = 0;
+ int best_divisor_idx = -1;
+ int clk32k_divisor_idx = -1;
+ int i;
+ int ret;
+
+- tc = atmel_tc_alloc(CONFIG_ATMEL_TCB_CLKSRC_BLOCK);
+- if (!tc) {
+- pr_debug("can't alloc TC for clocksource\n");
+- return -ENODEV;
+- }
+- tcaddr = tc->regs;
+- pdev = tc->pdev;
++ /* Protect against multiple calls */
++ if (tcaddr)
++ return 0;
++
++ tc.regs = of_iomap(node->parent, 0);
++ if (!tc.regs)
++ return -ENXIO;
++
++ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
++ if (IS_ERR(t0_clk))
++ return PTR_ERR(t0_clk);
++
++ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
++ if (IS_ERR(tc.slow_clk))
++ return PTR_ERR(tc.slow_clk);
++
++ irq = of_irq_get(node->parent, 0);
++ if (irq <= 0)
++ return -EINVAL;
++
++ tc.clk[0] = t0_clk;
++ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
++ if (IS_ERR(tc.clk[1]))
++ tc.clk[1] = t0_clk;
++ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
++ if (IS_ERR(tc.clk[2]))
++ tc.clk[2] = t0_clk;
++
++ tc.irq[0] = irq;
++ tc.irq[1] = of_irq_get(node->parent, 1);
++ if (tc.irq[1] <= 0)
++ tc.irq[1] = irq;
++ tc.irq[2] = of_irq_get(node->parent, 2);
++ if (tc.irq[2] <= 0)
++ tc.irq[2] = irq;
++
++ match = of_match_node(atmel_tcb_dt_ids, node->parent);
++ tc.tcb_config = match->data;
++
++ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
++ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+
+- t0_clk = tc->clk[0];
+ ret = clk_prepare_enable(t0_clk);
+ if (ret) {
+ pr_debug("can't enable T0 clk\n");
+- goto err_free_tc;
++ return ret;
+ }
+
+ /* How fast will we be counting? Pick something over 5 MHz. */
+@@ -381,27 +404,29 @@ static int __init tcb_clksrc_init(void)
+ best_divisor_idx = i;
+ }
+
+-
+- printk(bootinfo, clksrc.name, CONFIG_ATMEL_TCB_CLKSRC_BLOCK,
+- divided_rate / 1000000,
++ clksrc.name = kbasename(node->parent->full_name);
++ clkevt.clkevt.name = kbasename(node->parent->full_name);
++ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
+ ((divided_rate % 1000000) + 500) / 1000);
+
+- if (tc->tcb_config && tc->tcb_config->counter_width == 32) {
++ tcaddr = tc.regs;
++
++ if (tc.tcb_config->counter_width == 32) {
+ /* use apropriate function to read 32 bit counter */
+ clksrc.read = tc_get_cycles32;
+ /* setup ony channel 0 */
+- tcb_setup_single_chan(tc, best_divisor_idx);
++ tcb_setup_single_chan(&tc, best_divisor_idx);
+ } else {
+- /* tclib will give us three clocks no matter what the
++ /* we have three clocks no matter what the
+ * underlying platform supports.
+ */
+- ret = clk_prepare_enable(tc->clk[1]);
++ ret = clk_prepare_enable(tc.clk[1]);
+ if (ret) {
+ pr_debug("can't enable T1 clk\n");
+ goto err_disable_t0;
+ }
+ /* setup both channel 0 & 1 */
+- tcb_setup_dual_chan(tc, best_divisor_idx);
++ tcb_setup_dual_chan(&tc, best_divisor_idx);
+ }
+
+ /* and away we go! */
+@@ -410,7 +435,7 @@ static int __init tcb_clksrc_init(void)
+ goto err_disable_t1;
+
+ /* channel 2: periodic and oneshot timer support */
+- ret = setup_clkevents(tc, clk32k_divisor_idx);
++ ret = setup_clkevents(&tc, clk32k_divisor_idx);
+ if (ret)
+ goto err_unregister_clksrc;
+
+@@ -420,14 +445,14 @@ static int __init tcb_clksrc_init(void)
+ clocksource_unregister(&clksrc);
+
+ err_disable_t1:
+- if (!tc->tcb_config || tc->tcb_config->counter_width != 32)
+- clk_disable_unprepare(tc->clk[1]);
++ if (tc.tcb_config->counter_width != 32)
++ clk_disable_unprepare(tc.clk[1]);
+
+ err_disable_t0:
+ clk_disable_unprepare(t0_clk);
+
+-err_free_tc:
+- atmel_tc_free(tc);
++ tcaddr = NULL;
++
+ return ret;
+ }
+-arch_initcall(tcb_clksrc_init);
++TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -61,7 +61,8 @@ config ATMEL_TCLIB
+
+ config ATMEL_TCB_CLKSRC
+ bool "TC Block Clocksource"
+- depends on ATMEL_TCLIB
++ depends on ARCH_AT91
++ select TIMER_OF if OF
+ default y
+ help
+ Select this to get a high precision clocksource based on a
+@@ -72,17 +73,6 @@ config ATMEL_TCB_CLKSRC
+ may be used as a clock event device supporting oneshot mode
+ (delays of up to two seconds) based on the 32 KiHz clock.
+
+-config ATMEL_TCB_CLKSRC_BLOCK
+- int
+- depends on ATMEL_TCB_CLKSRC
+- default 0
+- range 0 1
+- help
+- Some chips provide more than one TC block, so you have the
+- choice of which one to use for the clock framework. The other
+- TC can be used for other purposes, such as PWM generation and
+- interval timing.
+-
+ config DUMMY_IRQ
+ tristate "Dummy IRQ handler"
+ default n
diff --git a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
index 760cd91c67d8..00903cfd5664 100644
--- a/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
+++ b/patches/0004-mm-SLUB-delay-giving-back-empty-slubs-to-IRQ-enabled.patch
@@ -201,7 +201,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
slab_post_alloc_hook(s, flags, i, p);
__kmem_cache_free_bulk(s, i, p);
return 0;
-@@ -4231,6 +4289,12 @@ void __init kmem_cache_init(void)
+@@ -4234,6 +4292,12 @@ void __init kmem_cache_init(void)
{
static __initdata struct kmem_cache boot_kmem_cache,
boot_kmem_cache_node;
diff --git a/patches/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch b/patches/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
index 55af66919ee5..85a39a8dc8ca 100644
--- a/patches/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
+++ b/patches/0004-x86-fpu-Always-init-the-state-in-fpu__clear.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 17 Oct 2018 15:27:34 +0200
-Subject: [PATCH 04/22] x86/fpu: Always init the `state' in fpu__clear()
+Subject: [PATCH 04/27] x86/fpu: Always init the `state' in fpu__clear()
fpu__clear() only initializes the `state' if the FPU is present. This
initialisation is also required for the FPU-less system and takes place
diff --git a/patches/0005-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch b/patches/0005-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
new file mode 100644
index 000000000000..358553c15312
--- /dev/null
+++ b/patches/0005-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
@@ -0,0 +1,73 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:13 +0200
+Subject: [PATCH 05/12] clocksource/drivers/tcb_clksrc: Use tcb as sched_clock
+
+Now that the driver is registered early enough, use the TCB as the
+sched_clock which is much more accurate than the jiffies implementation.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/tcb_clksrc.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/clocksource/tcb_clksrc.c
++++ b/drivers/clocksource/tcb_clksrc.c
+@@ -11,6 +11,7 @@
+ #include <linux/io.h>
+ #include <linux/of_address.h>
+ #include <linux/of_irq.h>
++#include <linux/sched_clock.h>
+ #include <linux/syscore_ops.h>
+ #include <soc/at91/atmel_tcb.h>
+
+@@ -114,6 +115,16 @@ static struct clocksource clksrc = {
+ .resume = tc_clksrc_resume,
+ };
+
++static u64 notrace tc_sched_clock_read(void)
++{
++ return tc_get_cycles(&clksrc);
++}
++
++static u64 notrace tc_sched_clock_read32(void)
++{
++ return tc_get_cycles32(&clksrc);
++}
++
+ #ifdef CONFIG_GENERIC_CLOCKEVENTS
+
+ struct tc_clkevt_device {
+@@ -327,6 +338,7 @@ static int __init tcb_clksrc_init(struct
+ struct atmel_tc tc;
+ struct clk *t0_clk;
+ const struct of_device_id *match;
++ u64 (*tc_sched_clock)(void);
+ int irq;
+ u32 rate, divided_rate = 0;
+ int best_divisor_idx = -1;
+@@ -416,6 +428,7 @@ static int __init tcb_clksrc_init(struct
+ clksrc.read = tc_get_cycles32;
+ /* setup ony channel 0 */
+ tcb_setup_single_chan(&tc, best_divisor_idx);
++ tc_sched_clock = tc_sched_clock_read32;
+ } else {
+ /* we have three clocks no matter what the
+ * underlying platform supports.
+@@ -427,6 +440,7 @@ static int __init tcb_clksrc_init(struct
+ }
+ /* setup both channel 0 & 1 */
+ tcb_setup_dual_chan(&tc, best_divisor_idx);
++ tc_sched_clock = tc_sched_clock_read;
+ }
+
+ /* and away we go! */
+@@ -439,6 +453,8 @@ static int __init tcb_clksrc_init(struct
+ if (ret)
+ goto err_unregister_clksrc;
+
++ sched_clock_register(tc_sched_clock, 32, divided_rate);
++
+ return 0;
+
+ err_unregister_clksrc:
diff --git a/patches/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch b/patches/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
index 9b0fb60b04c1..e73c2efc9922 100644
--- a/patches/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
+++ b/patches/0005-x86-fpu-Remove-fpu-initialized-usage-in-copy_fpstate.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 Oct 2018 16:57:14 +0200
-Subject: [PATCH 05/22] x86/fpu: Remove fpu->initialized usage in
+Subject: [PATCH 05/27] x86/fpu: Remove fpu->initialized usage in
copy_fpstate_to_sigframe()
With lazy-FPU support the (now named variable) ->initialized was set to true if
diff --git a/patches/0005-ARM-at91-Implement-clocksource-selection.patch b/patches/0006-ARM-at91-Implement-clocksource-selection.patch
index b044504c9ffb..710bd9122336 100644
--- a/patches/0005-ARM-at91-Implement-clocksource-selection.patch
+++ b/patches/0006-ARM-at91-Implement-clocksource-selection.patch
@@ -1,20 +1,19 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:22 +0200
-Subject: [PATCH 5/7] ARM: at91: Implement clocksource selection
+Date: Wed, 3 Apr 2019 16:11:14 +0200
+Subject: [PATCH 06/12] ARM: at91: Implement clocksource selection
Allow selecting and unselecting the PIT clocksource driver so it doesn't
-have to be compile when unused.
+have to be compiled when unused.
-Tested-by: Alexander Dahl <ada@thorsis.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/arm/mach-at91/Kconfig | 25 +++++++++++++++++++++++++
- 1 file changed, 25 insertions(+)
+ arch/arm/mach-at91/Kconfig | 23 +++++++++++++++++++++++
+ 1 file changed, 23 insertions(+)
--- a/arch/arm/mach-at91/Kconfig
+++ b/arch/arm/mach-at91/Kconfig
-@@ -107,6 +107,31 @@ config SOC_AT91SAM9
+@@ -107,6 +107,29 @@ config SOC_AT91SAM9
AT91SAM9X35
AT91SAM9XE
@@ -32,10 +31,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+config ATMEL_CLOCKSOURCE_TCB
+ bool "Timer Counter Blocks (TCB) support"
-+ depends on SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5 || COMPILE_TEST
+ default SOC_AT91RM9200 || SOC_AT91SAM9 || SOC_SAMA5
-+ depends on !ATMEL_TCLIB
-+ select ATMEL_ARM_TCB_CLKSRC
++ select ATMEL_TCB_CLKSRC
+ help
+ Select this to get a high precision clocksource based on a
+ TC block with a 5+ MHz base clock rate.
diff --git a/patches/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch b/patches/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch
deleted file mode 100644
index aaeac81386ad..000000000000
--- a/patches/0006-ARM-configs-at91-use-new-TCB-timer-driver.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:23 +0200
-Subject: [PATCH 6/7] ARM: configs: at91: use new TCB timer driver
-
-Unselecting ATMEL_TCLIB switches the TCB timer driver from tcb_clksrc to
-timer-atmel-tcb.
-
-Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm/configs/at91_dt_defconfig | 1 -
- arch/arm/configs/sama5_defconfig | 1 -
- 2 files changed, 2 deletions(-)
-
---- a/arch/arm/configs/at91_dt_defconfig
-+++ b/arch/arm/configs/at91_dt_defconfig
-@@ -64,7 +64,6 @@ CONFIG_BLK_DEV_LOOP=y
- CONFIG_BLK_DEV_RAM=y
- CONFIG_BLK_DEV_RAM_COUNT=4
- CONFIG_BLK_DEV_RAM_SIZE=8192
--CONFIG_ATMEL_TCLIB=y
- CONFIG_ATMEL_SSC=y
- CONFIG_SCSI=y
- CONFIG_BLK_DEV_SD=y
---- a/arch/arm/configs/sama5_defconfig
-+++ b/arch/arm/configs/sama5_defconfig
-@@ -75,7 +75,6 @@ CONFIG_BLK_DEV_LOOP=y
- CONFIG_BLK_DEV_RAM=y
- CONFIG_BLK_DEV_RAM_COUNT=4
- CONFIG_BLK_DEV_RAM_SIZE=8192
--CONFIG_ATMEL_TCLIB=y
- CONFIG_ATMEL_SSC=y
- CONFIG_EEPROM_AT24=y
- CONFIG_SCSI=y
diff --git a/patches/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch b/patches/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
index e401adfb11f1..f4b13a706c75 100644
--- a/patches/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
+++ b/patches/0006-x86-fpu-Don-t-save-fxregs-for-ia32-frames-in-copy_fp.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Nov 2018 13:08:50 +0100
-Subject: [PATCH 06/22] x86/fpu: Don't save fxregs for ia32 frames in
+Subject: [PATCH 06/27] x86/fpu: Don't save fxregs for ia32 frames in
copy_fpstate_to_sigframe()
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
diff --git a/patches/0007-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch b/patches/0007-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
new file mode 100644
index 000000000000..883fc957af99
--- /dev/null
+++ b/patches/0007-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
@@ -0,0 +1,53 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:15 +0200
+Subject: [PATCH 07/12] clocksource/drivers/tcb_clksrc: move Kconfig option
+
+Move the ATMEL_TCB_CLKSRC option to drivers/clocksource and make it silent
+if COMPILE_TEST is not selected.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/Kconfig | 7 +++++++
+ drivers/misc/Kconfig | 14 --------------
+ 2 files changed, 7 insertions(+), 14 deletions(-)
+
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -409,6 +409,13 @@ config ATMEL_ST
+ help
+ Support for the Atmel ST timer.
+
++config ATMEL_TCB_CLKSRC
++ bool "Atmel TC Block timer driver" if COMPILE_TEST
++ depends on HAS_IOMEM
++ select TIMER_OF if OF
++ help
++ Support for Timer Counter Blocks on Atmel SoCs.
++
+ config CLKSRC_EXYNOS_MCT
+ bool "Exynos multi core timer driver" if COMPILE_TEST
+ depends on ARM || ARM64
+--- a/drivers/misc/Kconfig
++++ b/drivers/misc/Kconfig
+@@ -59,20 +59,6 @@ config ATMEL_TCLIB
+ blocks found on many Atmel processors. This facilitates using
+ these blocks by different drivers despite processor differences.
+
+-config ATMEL_TCB_CLKSRC
+- bool "TC Block Clocksource"
+- depends on ARCH_AT91
+- select TIMER_OF if OF
+- default y
+- help
+- Select this to get a high precision clocksource based on a
+- TC block with a 5+ MHz base clock rate. Two timer channels
+- are combined to make a single 32-bit timer.
+-
+- When GENERIC_CLOCKEVENTS is defined, the third timer channel
+- may be used as a clock event device supporting oneshot mode
+- (delays of up to two seconds) based on the 32 KiHz clock.
+-
+ config DUMMY_IRQ
+ tristate "Dummy IRQ handler"
+ default n
diff --git a/patches/0007-x86-fpu-Remove-fpu-initialized.patch b/patches/0007-x86-fpu-Remove-fpu-initialized.patch
index c56b5d1c017f..cb7124478b38 100644
--- a/patches/0007-x86-fpu-Remove-fpu-initialized.patch
+++ b/patches/0007-x86-fpu-Remove-fpu-initialized.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 17 Oct 2018 18:08:35 +0200
-Subject: [PATCH 07/22] x86/fpu: Remove fpu->initialized
+Subject: [PATCH 07/27] x86/fpu: Remove fpu->initialized
The `initialized' member of the fpu struct is always set to one for user
tasks and zero for kernel tasks. This avoids saving/restoring the FPU
diff --git a/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch b/patches/0008-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
index 7293785122f9..65212342d6dd 100644
--- a/patches/0004-clocksource-drivers-atmel-pit-make-option-silent.patch
+++ b/patches/0008-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
@@ -1,11 +1,11 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:21 +0200
-Subject: [PATCH 4/7] clocksource/drivers: atmel-pit: make option silent
+Date: Wed, 3 Apr 2019 16:11:16 +0200
+Subject: [PATCH 08/12] clocksource/drivers/timer-atmel-pit: rework Kconfig
+ option
-To conform with the other option, make the ATMEL_PIT option silent so it
-can be selected from the platform
+Allow building the PIT driver when COMPILE_TEST is enabled. Also remove its
+default value so it can be disabled.
-Tested-by: Alexander Dahl <ada@thorsis.com>
Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
@@ -14,16 +14,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
-@@ -388,8 +388,11 @@ config ARMV7M_SYSTICK
+@@ -398,8 +398,11 @@ config ARMV7M_SYSTICK
This options enables support for the ARMv7M system timer unit
config ATMEL_PIT
-+ bool "Microchip ARM Periodic Interval Timer (PIT)" if COMPILE_TEST
++ bool "Atmel PIT support" if COMPILE_TEST
++ depends on HAS_IOMEM
select TIMER_OF if OF
- def_bool SOC_AT91SAM9 || SOC_SAMA5
+ help
-+ This enables build of clocksource and clockevent driver for
-+ the integrated PIT in Microchip ARM SoCs.
++ Support for the Periodic Interval Timer found on Atmel SoCs.
config ATMEL_ST
bool "Atmel ST timer support" if COMPILE_TEST
diff --git a/patches/0008-x86-fpu-Remove-user_fpu_begin.patch b/patches/0008-x86-fpu-Remove-user_fpu_begin.patch
index a9dc3e074e1f..ba4b924a37a5 100644
--- a/patches/0008-x86-fpu-Remove-user_fpu_begin.patch
+++ b/patches/0008-x86-fpu-Remove-user_fpu_begin.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 18 Oct 2018 18:34:11 +0200
-Subject: [PATCH 08/22] x86/fpu: Remove user_fpu_begin()
+Subject: [PATCH 08/27] x86/fpu: Remove user_fpu_begin()
user_fpu_begin() sets fpu_fpregs_owner_ctx to task's fpu struct. This is
always the case since there is no lazy FPU anymore.
diff --git a/patches/0009-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch b/patches/0009-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
new file mode 100644
index 000000000000..3cd0c377e7fd
--- /dev/null
+++ b/patches/0009-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
@@ -0,0 +1,984 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:17 +0200
+Subject: [PATCH 09/12] clocksource/drivers/tcb_clksrc: Rename the file for
+ consistency
+
+For the sake of consistency, let's rename the file to a name similar
+to other file names in this directory.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/Makefile | 2 +-
+ drivers/clocksource/{tcb_clksrc.c => timer-atmel-tcb.c} | 0
+ drivers/clocksource/Makefile | 2
+ drivers/clocksource/tcb_clksrc.c | 474 ----------------------------------
+ drivers/clocksource/timer-atmel-tcb.c | 474 ++++++++++++++++++++++++++++++++++
+ 3 files changed, 475 insertions(+), 475 deletions(-)
+ rename drivers/clocksource/{tcb_clksrc.c => timer-atmel-tcb.c} (100%)
+
+--- a/drivers/clocksource/Makefile
++++ b/drivers/clocksource/Makefile
+@@ -3,7 +3,7 @@ obj-$(CONFIG_TIMER_OF) += timer-of.o
+ obj-$(CONFIG_TIMER_PROBE) += timer-probe.o
+ obj-$(CONFIG_ATMEL_PIT) += timer-atmel-pit.o
+ obj-$(CONFIG_ATMEL_ST) += timer-atmel-st.o
+-obj-$(CONFIG_ATMEL_TCB_CLKSRC) += tcb_clksrc.o
++obj-$(CONFIG_ATMEL_TCB_CLKSRC) += timer-atmel-tcb.o
+ obj-$(CONFIG_X86_PM_TIMER) += acpi_pm.o
+ obj-$(CONFIG_SCx200HR_TIMER) += scx200_hrt.o
+ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
+--- a/drivers/clocksource/tcb_clksrc.c
++++ /dev/null
+@@ -1,474 +0,0 @@
+-// SPDX-License-Identifier: GPL-2.0
+-#include <linux/init.h>
+-#include <linux/clocksource.h>
+-#include <linux/clockchips.h>
+-#include <linux/interrupt.h>
+-#include <linux/irq.h>
+-
+-#include <linux/clk.h>
+-#include <linux/err.h>
+-#include <linux/ioport.h>
+-#include <linux/io.h>
+-#include <linux/of_address.h>
+-#include <linux/of_irq.h>
+-#include <linux/sched_clock.h>
+-#include <linux/syscore_ops.h>
+-#include <soc/at91/atmel_tcb.h>
+-
+-
+-/*
+- * We're configured to use a specific TC block, one that's not hooked
+- * up to external hardware, to provide a time solution:
+- *
+- * - Two channels combine to create a free-running 32 bit counter
+- * with a base rate of 5+ MHz, packaged as a clocksource (with
+- * resolution better than 200 nsec).
+- * - Some chips support 32 bit counter. A single channel is used for
+- * this 32 bit free-running counter. the second channel is not used.
+- *
+- * - The third channel may be used to provide a 16-bit clockevent
+- * source, used in either periodic or oneshot mode. This runs
+- * at 32 KiHZ, and can handle delays of up to two seconds.
+- *
+- * REVISIT behavior during system suspend states... we should disable
+- * all clocks and save the power. Easily done for clockevent devices,
+- * but clocksources won't necessarily get the needed notifications.
+- * For deeper system sleep states, this will be mandatory...
+- */
+-
+-static void __iomem *tcaddr;
+-static struct
+-{
+- u32 cmr;
+- u32 imr;
+- u32 rc;
+- bool clken;
+-} tcb_cache[3];
+-static u32 bmr_cache;
+-
+-static u64 tc_get_cycles(struct clocksource *cs)
+-{
+- unsigned long flags;
+- u32 lower, upper;
+-
+- raw_local_irq_save(flags);
+- do {
+- upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
+- lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+- } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
+-
+- raw_local_irq_restore(flags);
+- return (upper << 16) | lower;
+-}
+-
+-static u64 tc_get_cycles32(struct clocksource *cs)
+-{
+- return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
+-}
+-
+-void tc_clksrc_suspend(struct clocksource *cs)
+-{
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+- tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
+- tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
+- tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
+- tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
+- ATMEL_TC_CLKSTA);
+- }
+-
+- bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
+-}
+-
+-void tc_clksrc_resume(struct clocksource *cs)
+-{
+- int i;
+-
+- for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
+- /* Restore registers for the channel, RA and RB are not used */
+- writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
+- writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
+- writel(0, tcaddr + ATMEL_TC_REG(i, RA));
+- writel(0, tcaddr + ATMEL_TC_REG(i, RB));
+- /* Disable all the interrupts */
+- writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
+- /* Reenable interrupts that were enabled before suspending */
+- writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
+- /* Start the clock if it was used */
+- if (tcb_cache[i].clken)
+- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
+- }
+-
+- /* Dual channel, chain channels */
+- writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
+- /* Finally, trigger all the channels*/
+- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+-}
+-
+-static struct clocksource clksrc = {
+- .rating = 200,
+- .read = tc_get_cycles,
+- .mask = CLOCKSOURCE_MASK(32),
+- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+- .suspend = tc_clksrc_suspend,
+- .resume = tc_clksrc_resume,
+-};
+-
+-static u64 notrace tc_sched_clock_read(void)
+-{
+- return tc_get_cycles(&clksrc);
+-}
+-
+-static u64 notrace tc_sched_clock_read32(void)
+-{
+- return tc_get_cycles32(&clksrc);
+-}
+-
+-#ifdef CONFIG_GENERIC_CLOCKEVENTS
+-
+-struct tc_clkevt_device {
+- struct clock_event_device clkevt;
+- struct clk *clk;
+- void __iomem *regs;
+-};
+-
+-static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
+-{
+- return container_of(clkevt, struct tc_clkevt_device, clkevt);
+-}
+-
+-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
+- * because using one of the divided clocks would usually mean the
+- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
+- *
+- * A divided clock could be good for high resolution timers, since
+- * 30.5 usec resolution can seem "low".
+- */
+-static u32 timer_clock;
+-
+-static int tc_shutdown(struct clock_event_device *d)
+-{
+- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+- void __iomem *regs = tcd->regs;
+-
+- writel(0xff, regs + ATMEL_TC_REG(2, IDR));
+- writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
+- if (!clockevent_state_detached(d))
+- clk_disable(tcd->clk);
+-
+- return 0;
+-}
+-
+-static int tc_set_oneshot(struct clock_event_device *d)
+-{
+- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+- void __iomem *regs = tcd->regs;
+-
+- if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+- tc_shutdown(d);
+-
+- clk_enable(tcd->clk);
+-
+- /* slow clock, count up to RC, then irq and stop */
+- writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
+- ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
+- writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+-
+- /* set_next_event() configures and starts the timer */
+- return 0;
+-}
+-
+-static int tc_set_periodic(struct clock_event_device *d)
+-{
+- struct tc_clkevt_device *tcd = to_tc_clkevt(d);
+- void __iomem *regs = tcd->regs;
+-
+- if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
+- tc_shutdown(d);
+-
+- /* By not making the gentime core emulate periodic mode on top
+- * of oneshot, we get lower overhead and improved accuracy.
+- */
+- clk_enable(tcd->clk);
+-
+- /* slow clock, count up to RC, then irq and restart */
+- writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
+- regs + ATMEL_TC_REG(2, CMR));
+- writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+-
+- /* Enable clock and interrupts on RC compare */
+- writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
+-
+- /* go go gadget! */
+- writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
+- ATMEL_TC_REG(2, CCR));
+- return 0;
+-}
+-
+-static int tc_next_event(unsigned long delta, struct clock_event_device *d)
+-{
+- writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
+-
+- /* go go gadget! */
+- writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
+- tcaddr + ATMEL_TC_REG(2, CCR));
+- return 0;
+-}
+-
+-static struct tc_clkevt_device clkevt = {
+- .clkevt = {
+- .features = CLOCK_EVT_FEAT_PERIODIC |
+- CLOCK_EVT_FEAT_ONESHOT,
+- /* Should be lower than at91rm9200's system timer */
+- .rating = 125,
+- .set_next_event = tc_next_event,
+- .set_state_shutdown = tc_shutdown,
+- .set_state_periodic = tc_set_periodic,
+- .set_state_oneshot = tc_set_oneshot,
+- },
+-};
+-
+-static irqreturn_t ch2_irq(int irq, void *handle)
+-{
+- struct tc_clkevt_device *dev = handle;
+- unsigned int sr;
+-
+- sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
+- if (sr & ATMEL_TC_CPCS) {
+- dev->clkevt.event_handler(&dev->clkevt);
+- return IRQ_HANDLED;
+- }
+-
+- return IRQ_NONE;
+-}
+-
+-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+-{
+- int ret;
+- struct clk *t2_clk = tc->clk[2];
+- int irq = tc->irq[2];
+-
+- ret = clk_prepare_enable(tc->slow_clk);
+- if (ret)
+- return ret;
+-
+- /* try to enable t2 clk to avoid future errors in mode change */
+- ret = clk_prepare_enable(t2_clk);
+- if (ret) {
+- clk_disable_unprepare(tc->slow_clk);
+- return ret;
+- }
+-
+- clk_disable(t2_clk);
+-
+- clkevt.regs = tc->regs;
+- clkevt.clk = t2_clk;
+-
+- timer_clock = clk32k_divisor_idx;
+-
+- clkevt.clkevt.cpumask = cpumask_of(0);
+-
+- ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
+- if (ret) {
+- clk_unprepare(t2_clk);
+- clk_disable_unprepare(tc->slow_clk);
+- return ret;
+- }
+-
+- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+-
+- return ret;
+-}
+-
+-#else /* !CONFIG_GENERIC_CLOCKEVENTS */
+-
+-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+-{
+- /* NOTHING */
+- return 0;
+-}
+-
+-#endif
+-
+-static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
+-{
+- /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
+- writel(mck_divisor_idx /* likely divide-by-8 */
+- | ATMEL_TC_WAVE
+- | ATMEL_TC_WAVESEL_UP /* free-run */
+- | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
+- | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
+- tcaddr + ATMEL_TC_REG(0, CMR));
+- writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
+- writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
+- writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
+- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+-
+- /* channel 1: waveform mode, input TIOA0 */
+- writel(ATMEL_TC_XC1 /* input: TIOA0 */
+- | ATMEL_TC_WAVE
+- | ATMEL_TC_WAVESEL_UP, /* free-run */
+- tcaddr + ATMEL_TC_REG(1, CMR));
+- writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
+- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
+-
+- /* chain channel 0 to channel 1*/
+- writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
+- /* then reset all the timers */
+- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+-}
+-
+-static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
+-{
+- /* channel 0: waveform mode, input mclk/8 */
+- writel(mck_divisor_idx /* likely divide-by-8 */
+- | ATMEL_TC_WAVE
+- | ATMEL_TC_WAVESEL_UP, /* free-run */
+- tcaddr + ATMEL_TC_REG(0, CMR));
+- writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
+- writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
+-
+- /* then reset all the timers */
+- writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
+-}
+-
+-static int __init tcb_clksrc_init(struct device_node *node)
+-{
+- struct atmel_tc tc;
+- struct clk *t0_clk;
+- const struct of_device_id *match;
+- u64 (*tc_sched_clock)(void);
+- int irq;
+- u32 rate, divided_rate = 0;
+- int best_divisor_idx = -1;
+- int clk32k_divisor_idx = -1;
+- int i;
+- int ret;
+-
+- /* Protect against multiple calls */
+- if (tcaddr)
+- return 0;
+-
+- tc.regs = of_iomap(node->parent, 0);
+- if (!tc.regs)
+- return -ENXIO;
+-
+- t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
+- if (IS_ERR(t0_clk))
+- return PTR_ERR(t0_clk);
+-
+- tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
+- if (IS_ERR(tc.slow_clk))
+- return PTR_ERR(tc.slow_clk);
+-
+- irq = of_irq_get(node->parent, 0);
+- if (irq <= 0)
+- return -EINVAL;
+-
+- tc.clk[0] = t0_clk;
+- tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
+- if (IS_ERR(tc.clk[1]))
+- tc.clk[1] = t0_clk;
+- tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
+- if (IS_ERR(tc.clk[2]))
+- tc.clk[2] = t0_clk;
+-
+- tc.irq[0] = irq;
+- tc.irq[1] = of_irq_get(node->parent, 1);
+- if (tc.irq[1] <= 0)
+- tc.irq[1] = irq;
+- tc.irq[2] = of_irq_get(node->parent, 2);
+- if (tc.irq[2] <= 0)
+- tc.irq[2] = irq;
+-
+- match = of_match_node(atmel_tcb_dt_ids, node->parent);
+- tc.tcb_config = match->data;
+-
+- for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
+- writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
+-
+- ret = clk_prepare_enable(t0_clk);
+- if (ret) {
+- pr_debug("can't enable T0 clk\n");
+- return ret;
+- }
+-
+- /* How fast will we be counting? Pick something over 5 MHz. */
+- rate = (u32) clk_get_rate(t0_clk);
+- for (i = 0; i < 5; i++) {
+- unsigned divisor = atmel_tc_divisors[i];
+- unsigned tmp;
+-
+- /* remember 32 KiHz clock for later */
+- if (!divisor) {
+- clk32k_divisor_idx = i;
+- continue;
+- }
+-
+- tmp = rate / divisor;
+- pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
+- if (best_divisor_idx > 0) {
+- if (tmp < 5 * 1000 * 1000)
+- continue;
+- }
+- divided_rate = tmp;
+- best_divisor_idx = i;
+- }
+-
+- clksrc.name = kbasename(node->parent->full_name);
+- clkevt.clkevt.name = kbasename(node->parent->full_name);
+- pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
+- ((divided_rate % 1000000) + 500) / 1000);
+-
+- tcaddr = tc.regs;
+-
+- if (tc.tcb_config->counter_width == 32) {
+- /* use apropriate function to read 32 bit counter */
+- clksrc.read = tc_get_cycles32;
+- /* setup ony channel 0 */
+- tcb_setup_single_chan(&tc, best_divisor_idx);
+- tc_sched_clock = tc_sched_clock_read32;
+- } else {
+- /* we have three clocks no matter what the
+- * underlying platform supports.
+- */
+- ret = clk_prepare_enable(tc.clk[1]);
+- if (ret) {
+- pr_debug("can't enable T1 clk\n");
+- goto err_disable_t0;
+- }
+- /* setup both channel 0 & 1 */
+- tcb_setup_dual_chan(&tc, best_divisor_idx);
+- tc_sched_clock = tc_sched_clock_read;
+- }
+-
+- /* and away we go! */
+- ret = clocksource_register_hz(&clksrc, divided_rate);
+- if (ret)
+- goto err_disable_t1;
+-
+- /* channel 2: periodic and oneshot timer support */
+- ret = setup_clkevents(&tc, clk32k_divisor_idx);
+- if (ret)
+- goto err_unregister_clksrc;
+-
+- sched_clock_register(tc_sched_clock, 32, divided_rate);
+-
+- return 0;
+-
+-err_unregister_clksrc:
+- clocksource_unregister(&clksrc);
+-
+-err_disable_t1:
+- if (tc.tcb_config->counter_width != 32)
+- clk_disable_unprepare(tc.clk[1]);
+-
+-err_disable_t0:
+- clk_disable_unprepare(t0_clk);
+-
+- tcaddr = NULL;
+-
+- return ret;
+-}
+-TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
+--- /dev/null
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -0,0 +1,474 @@
++// SPDX-License-Identifier: GPL-2.0
++#include <linux/init.h>
++#include <linux/clocksource.h>
++#include <linux/clockchips.h>
++#include <linux/interrupt.h>
++#include <linux/irq.h>
++
++#include <linux/clk.h>
++#include <linux/err.h>
++#include <linux/ioport.h>
++#include <linux/io.h>
++#include <linux/of_address.h>
++#include <linux/of_irq.h>
++#include <linux/sched_clock.h>
++#include <linux/syscore_ops.h>
++#include <soc/at91/atmel_tcb.h>
++
++
++/*
++ * We're configured to use a specific TC block, one that's not hooked
++ * up to external hardware, to provide a time solution:
++ *
++ * - Two channels combine to create a free-running 32 bit counter
++ * with a base rate of 5+ MHz, packaged as a clocksource (with
++ * resolution better than 200 nsec).
++ * - Some chips support 32 bit counter. A single channel is used for
++ * this 32 bit free-running counter. the second channel is not used.
++ *
++ * - The third channel may be used to provide a 16-bit clockevent
++ * source, used in either periodic or oneshot mode. This runs
++ * at 32 KiHZ, and can handle delays of up to two seconds.
++ *
++ * REVISIT behavior during system suspend states... we should disable
++ * all clocks and save the power. Easily done for clockevent devices,
++ * but clocksources won't necessarily get the needed notifications.
++ * For deeper system sleep states, this will be mandatory...
++ */
++
++static void __iomem *tcaddr;
++static struct
++{
++ u32 cmr;
++ u32 imr;
++ u32 rc;
++ bool clken;
++} tcb_cache[3];
++static u32 bmr_cache;
++
++static u64 tc_get_cycles(struct clocksource *cs)
++{
++ unsigned long flags;
++ u32 lower, upper;
++
++ raw_local_irq_save(flags);
++ do {
++ upper = readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV));
++ lower = readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
++ } while (upper != readl_relaxed(tcaddr + ATMEL_TC_REG(1, CV)));
++
++ raw_local_irq_restore(flags);
++ return (upper << 16) | lower;
++}
++
++static u64 tc_get_cycles32(struct clocksource *cs)
++{
++ return readl_relaxed(tcaddr + ATMEL_TC_REG(0, CV));
++}
++
++void tc_clksrc_suspend(struct clocksource *cs)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
++ tcb_cache[i].cmr = readl(tcaddr + ATMEL_TC_REG(i, CMR));
++ tcb_cache[i].imr = readl(tcaddr + ATMEL_TC_REG(i, IMR));
++ tcb_cache[i].rc = readl(tcaddr + ATMEL_TC_REG(i, RC));
++ tcb_cache[i].clken = !!(readl(tcaddr + ATMEL_TC_REG(i, SR)) &
++ ATMEL_TC_CLKSTA);
++ }
++
++ bmr_cache = readl(tcaddr + ATMEL_TC_BMR);
++}
++
++void tc_clksrc_resume(struct clocksource *cs)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(tcb_cache); i++) {
++ /* Restore registers for the channel, RA and RB are not used */
++ writel(tcb_cache[i].cmr, tcaddr + ATMEL_TC_REG(i, CMR));
++ writel(tcb_cache[i].rc, tcaddr + ATMEL_TC_REG(i, RC));
++ writel(0, tcaddr + ATMEL_TC_REG(i, RA));
++ writel(0, tcaddr + ATMEL_TC_REG(i, RB));
++ /* Disable all the interrupts */
++ writel(0xff, tcaddr + ATMEL_TC_REG(i, IDR));
++ /* Reenable interrupts that were enabled before suspending */
++ writel(tcb_cache[i].imr, tcaddr + ATMEL_TC_REG(i, IER));
++ /* Start the clock if it was used */
++ if (tcb_cache[i].clken)
++ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(i, CCR));
++ }
++
++ /* Dual channel, chain channels */
++ writel(bmr_cache, tcaddr + ATMEL_TC_BMR);
++ /* Finally, trigger all the channels*/
++ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
++}
++
++static struct clocksource clksrc = {
++ .rating = 200,
++ .read = tc_get_cycles,
++ .mask = CLOCKSOURCE_MASK(32),
++ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
++ .suspend = tc_clksrc_suspend,
++ .resume = tc_clksrc_resume,
++};
++
++static u64 notrace tc_sched_clock_read(void)
++{
++ return tc_get_cycles(&clksrc);
++}
++
++static u64 notrace tc_sched_clock_read32(void)
++{
++ return tc_get_cycles32(&clksrc);
++}
++
++#ifdef CONFIG_GENERIC_CLOCKEVENTS
++
++struct tc_clkevt_device {
++ struct clock_event_device clkevt;
++ struct clk *clk;
++ void __iomem *regs;
++};
++
++static struct tc_clkevt_device *to_tc_clkevt(struct clock_event_device *clkevt)
++{
++ return container_of(clkevt, struct tc_clkevt_device, clkevt);
++}
++
++/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
++ * because using one of the divided clocks would usually mean the
++ * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
++ *
++ * A divided clock could be good for high resolution timers, since
++ * 30.5 usec resolution can seem "low".
++ */
++static u32 timer_clock;
++
++static int tc_shutdown(struct clock_event_device *d)
++{
++ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
++ void __iomem *regs = tcd->regs;
++
++ writel(0xff, regs + ATMEL_TC_REG(2, IDR));
++ writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
++ if (!clockevent_state_detached(d))
++ clk_disable(tcd->clk);
++
++ return 0;
++}
++
++static int tc_set_oneshot(struct clock_event_device *d)
++{
++ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
++ void __iomem *regs = tcd->regs;
++
++ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
++ tc_shutdown(d);
++
++ clk_enable(tcd->clk);
++
++ /* slow clock, count up to RC, then irq and stop */
++ writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
++ ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
++ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
++
++ /* set_next_event() configures and starts the timer */
++ return 0;
++}
++
++static int tc_set_periodic(struct clock_event_device *d)
++{
++ struct tc_clkevt_device *tcd = to_tc_clkevt(d);
++ void __iomem *regs = tcd->regs;
++
++ if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
++ tc_shutdown(d);
++
++ /* By not making the gentime core emulate periodic mode on top
++ * of oneshot, we get lower overhead and improved accuracy.
++ */
++ clk_enable(tcd->clk);
++
++ /* slow clock, count up to RC, then irq and restart */
++ writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
++ regs + ATMEL_TC_REG(2, CMR));
++ writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
++
++ /* Enable clock and interrupts on RC compare */
++ writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
++
++ /* go go gadget! */
++ writel(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG, regs +
++ ATMEL_TC_REG(2, CCR));
++ return 0;
++}
++
++static int tc_next_event(unsigned long delta, struct clock_event_device *d)
++{
++ writel_relaxed(delta, tcaddr + ATMEL_TC_REG(2, RC));
++
++ /* go go gadget! */
++ writel_relaxed(ATMEL_TC_CLKEN | ATMEL_TC_SWTRG,
++ tcaddr + ATMEL_TC_REG(2, CCR));
++ return 0;
++}
++
++static struct tc_clkevt_device clkevt = {
++ .clkevt = {
++ .features = CLOCK_EVT_FEAT_PERIODIC |
++ CLOCK_EVT_FEAT_ONESHOT,
++ /* Should be lower than at91rm9200's system timer */
++ .rating = 125,
++ .set_next_event = tc_next_event,
++ .set_state_shutdown = tc_shutdown,
++ .set_state_periodic = tc_set_periodic,
++ .set_state_oneshot = tc_set_oneshot,
++ },
++};
++
++static irqreturn_t ch2_irq(int irq, void *handle)
++{
++ struct tc_clkevt_device *dev = handle;
++ unsigned int sr;
++
++ sr = readl_relaxed(dev->regs + ATMEL_TC_REG(2, SR));
++ if (sr & ATMEL_TC_CPCS) {
++ dev->clkevt.event_handler(&dev->clkevt);
++ return IRQ_HANDLED;
++ }
++
++ return IRQ_NONE;
++}
++
++static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++{
++ int ret;
++ struct clk *t2_clk = tc->clk[2];
++ int irq = tc->irq[2];
++
++ ret = clk_prepare_enable(tc->slow_clk);
++ if (ret)
++ return ret;
++
++ /* try to enable t2 clk to avoid future errors in mode change */
++ ret = clk_prepare_enable(t2_clk);
++ if (ret) {
++ clk_disable_unprepare(tc->slow_clk);
++ return ret;
++ }
++
++ clk_disable(t2_clk);
++
++ clkevt.regs = tc->regs;
++ clkevt.clk = t2_clk;
++
++ timer_clock = clk32k_divisor_idx;
++
++ clkevt.clkevt.cpumask = cpumask_of(0);
++
++ ret = request_irq(irq, ch2_irq, IRQF_TIMER, "tc_clkevt", &clkevt);
++ if (ret) {
++ clk_unprepare(t2_clk);
++ clk_disable_unprepare(tc->slow_clk);
++ return ret;
++ }
++
++ clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
++
++ return ret;
++}
++
++#else /* !CONFIG_GENERIC_CLOCKEVENTS */
++
++static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
++{
++ /* NOTHING */
++ return 0;
++}
++
++#endif
++
++static void __init tcb_setup_dual_chan(struct atmel_tc *tc, int mck_divisor_idx)
++{
++ /* channel 0: waveform mode, input mclk/8, clock TIOA0 on overflow */
++ writel(mck_divisor_idx /* likely divide-by-8 */
++ | ATMEL_TC_WAVE
++ | ATMEL_TC_WAVESEL_UP /* free-run */
++ | ATMEL_TC_ACPA_SET /* TIOA0 rises at 0 */
++ | ATMEL_TC_ACPC_CLEAR, /* (duty cycle 50%) */
++ tcaddr + ATMEL_TC_REG(0, CMR));
++ writel(0x0000, tcaddr + ATMEL_TC_REG(0, RA));
++ writel(0x8000, tcaddr + ATMEL_TC_REG(0, RC));
++ writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
++ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
++
++ /* channel 1: waveform mode, input TIOA0 */
++ writel(ATMEL_TC_XC1 /* input: TIOA0 */
++ | ATMEL_TC_WAVE
++ | ATMEL_TC_WAVESEL_UP, /* free-run */
++ tcaddr + ATMEL_TC_REG(1, CMR));
++ writel(0xff, tcaddr + ATMEL_TC_REG(1, IDR)); /* no irqs */
++ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(1, CCR));
++
++ /* chain channel 0 to channel 1*/
++ writel(ATMEL_TC_TC1XC1S_TIOA0, tcaddr + ATMEL_TC_BMR);
++ /* then reset all the timers */
++ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
++}
++
++static void __init tcb_setup_single_chan(struct atmel_tc *tc, int mck_divisor_idx)
++{
++ /* channel 0: waveform mode, input mclk/8 */
++ writel(mck_divisor_idx /* likely divide-by-8 */
++ | ATMEL_TC_WAVE
++ | ATMEL_TC_WAVESEL_UP, /* free-run */
++ tcaddr + ATMEL_TC_REG(0, CMR));
++ writel(0xff, tcaddr + ATMEL_TC_REG(0, IDR)); /* no irqs */
++ writel(ATMEL_TC_CLKEN, tcaddr + ATMEL_TC_REG(0, CCR));
++
++ /* then reset all the timers */
++ writel(ATMEL_TC_SYNC, tcaddr + ATMEL_TC_BCR);
++}
++
++static int __init tcb_clksrc_init(struct device_node *node)
++{
++ struct atmel_tc tc;
++ struct clk *t0_clk;
++ const struct of_device_id *match;
++ u64 (*tc_sched_clock)(void);
++ int irq;
++ u32 rate, divided_rate = 0;
++ int best_divisor_idx = -1;
++ int clk32k_divisor_idx = -1;
++ int i;
++ int ret;
++
++ /* Protect against multiple calls */
++ if (tcaddr)
++ return 0;
++
++ tc.regs = of_iomap(node->parent, 0);
++ if (!tc.regs)
++ return -ENXIO;
++
++ t0_clk = of_clk_get_by_name(node->parent, "t0_clk");
++ if (IS_ERR(t0_clk))
++ return PTR_ERR(t0_clk);
++
++ tc.slow_clk = of_clk_get_by_name(node->parent, "slow_clk");
++ if (IS_ERR(tc.slow_clk))
++ return PTR_ERR(tc.slow_clk);
++
++ irq = of_irq_get(node->parent, 0);
++ if (irq <= 0)
++ return -EINVAL;
++
++ tc.clk[0] = t0_clk;
++ tc.clk[1] = of_clk_get_by_name(node->parent, "t1_clk");
++ if (IS_ERR(tc.clk[1]))
++ tc.clk[1] = t0_clk;
++ tc.clk[2] = of_clk_get_by_name(node->parent, "t2_clk");
++ if (IS_ERR(tc.clk[2]))
++ tc.clk[2] = t0_clk;
++
++ tc.irq[0] = irq;
++ tc.irq[1] = of_irq_get(node->parent, 1);
++ if (tc.irq[1] <= 0)
++ tc.irq[1] = irq;
++ tc.irq[2] = of_irq_get(node->parent, 2);
++ if (tc.irq[2] <= 0)
++ tc.irq[2] = irq;
++
++ match = of_match_node(atmel_tcb_dt_ids, node->parent);
++ tc.tcb_config = match->data;
++
++ for (i = 0; i < ARRAY_SIZE(tc.irq); i++)
++ writel(ATMEL_TC_ALL_IRQ, tc.regs + ATMEL_TC_REG(i, IDR));
++
++ ret = clk_prepare_enable(t0_clk);
++ if (ret) {
++ pr_debug("can't enable T0 clk\n");
++ return ret;
++ }
++
++ /* How fast will we be counting? Pick something over 5 MHz. */
++ rate = (u32) clk_get_rate(t0_clk);
++ for (i = 0; i < 5; i++) {
++ unsigned divisor = atmel_tc_divisors[i];
++ unsigned tmp;
++
++ /* remember 32 KiHz clock for later */
++ if (!divisor) {
++ clk32k_divisor_idx = i;
++ continue;
++ }
++
++ tmp = rate / divisor;
++ pr_debug("TC: %u / %-3u [%d] --> %u\n", rate, divisor, i, tmp);
++ if (best_divisor_idx > 0) {
++ if (tmp < 5 * 1000 * 1000)
++ continue;
++ }
++ divided_rate = tmp;
++ best_divisor_idx = i;
++ }
++
++ clksrc.name = kbasename(node->parent->full_name);
++ clkevt.clkevt.name = kbasename(node->parent->full_name);
++ pr_debug("%s at %d.%03d MHz\n", clksrc.name, divided_rate / 1000000,
++ ((divided_rate % 1000000) + 500) / 1000);
++
++ tcaddr = tc.regs;
++
++ if (tc.tcb_config->counter_width == 32) {
++ /* use apropriate function to read 32 bit counter */
++ clksrc.read = tc_get_cycles32;
++ /* setup ony channel 0 */
++ tcb_setup_single_chan(&tc, best_divisor_idx);
++ tc_sched_clock = tc_sched_clock_read32;
++ } else {
++ /* we have three clocks no matter what the
++ * underlying platform supports.
++ */
++ ret = clk_prepare_enable(tc.clk[1]);
++ if (ret) {
++ pr_debug("can't enable T1 clk\n");
++ goto err_disable_t0;
++ }
++ /* setup both channel 0 & 1 */
++ tcb_setup_dual_chan(&tc, best_divisor_idx);
++ tc_sched_clock = tc_sched_clock_read;
++ }
++
++ /* and away we go! */
++ ret = clocksource_register_hz(&clksrc, divided_rate);
++ if (ret)
++ goto err_disable_t1;
++
++ /* channel 2: periodic and oneshot timer support */
++ ret = setup_clkevents(&tc, clk32k_divisor_idx);
++ if (ret)
++ goto err_unregister_clksrc;
++
++ sched_clock_register(tc_sched_clock, 32, divided_rate);
++
++ return 0;
++
++err_unregister_clksrc:
++ clocksource_unregister(&clksrc);
++
++err_disable_t1:
++ if (tc.tcb_config->counter_width != 32)
++ clk_disable_unprepare(tc.clk[1]);
++
++err_disable_t0:
++ clk_disable_unprepare(t0_clk);
++
++ tcaddr = NULL;
++
++ return ret;
++}
++TIMER_OF_DECLARE(atmel_tcb_clksrc, "atmel,tcb-timer", tcb_clksrc_init);
diff --git a/patches/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch b/patches/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
index 269d8bdcfa5e..dcf1d0b767fc 100644
--- a/patches/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
+++ b/patches/0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
@@ -1,6 +1,6 @@
From: Rik van Riel <riel@surriel.com>
Date: Sun, 9 Sep 2018 18:30:45 +0200
-Subject: [PATCH 09/22] x86/fpu: Add (__)make_fpregs_active helpers
+Subject: [PATCH 09/27] x86/fpu: Add (__)make_fpregs_active helpers
Add helper function that ensures the floating point registers for
the current task are active. Use with preemption disabled.
diff --git a/patches/0007-ARM-configs-at91-unselect-PIT.patch b/patches/0010-ARM-configs-at91-unselect-PIT.patch
index f5694ce09cb6..625ed09b3677 100644
--- a/patches/0007-ARM-configs-at91-unselect-PIT.patch
+++ b/patches/0010-ARM-configs-at91-unselect-PIT.patch
@@ -1,6 +1,6 @@
From: Alexandre Belloni <alexandre.belloni@bootlin.com>
-Date: Thu, 13 Sep 2018 13:30:24 +0200
-Subject: [PATCH 7/7] ARM: configs: at91: unselect PIT
+Date: Wed, 3 Apr 2019 16:11:18 +0200
+Subject: [PATCH 10/12] ARM: configs: at91: unselect PIT
The PIT is not required anymore to successfully boot and may actually harm
in case preempt-rt is used because the PIT interrupt is shared.
diff --git a/patches/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch b/patches/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
index 15cae6f84ba4..02d68b99b270 100644
--- a/patches/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
+++ b/patches/0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 2 Oct 2018 10:28:15 +0200
-Subject: [PATCH 10/22] x86/fpu: Make __raw_xsave_addr() use feature number
+Subject: [PATCH 10/27] x86/fpu: Make __raw_xsave_addr() use feature number
instead of mask
Most users of __raw_xsave_addr() use a feature number, shift it to a
diff --git a/patches/0011-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch b/patches/0011-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
new file mode 100644
index 000000000000..ba907067bbc7
--- /dev/null
+++ b/patches/0011-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
@@ -0,0 +1,25 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:19 +0200
+Subject: [PATCH 11/12] misc: atmel_tclib: do not probe already used TCBs
+
+The TCBs that have children are using the new (proper) DT bindings and
+don't need to be handled by tclib.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/misc/atmel_tclib.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/misc/atmel_tclib.c
++++ b/drivers/misc/atmel_tclib.c
+@@ -79,6 +79,9 @@ static int __init tc_probe(struct platfo
+ struct resource *r;
+ unsigned int i;
+
++ if (of_get_child_count(pdev->dev.of_node))
++ return 0;
++
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -EINVAL;
diff --git a/patches/0011-printk_safe-remove-printk-safe-code.patch b/patches/0011-printk_safe-remove-printk-safe-code.patch
index cfc39420685f..390bbd607f91 100644
--- a/patches/0011-printk_safe-remove-printk-safe-code.patch
+++ b/patches/0011-printk_safe-remove-printk-safe-code.patch
@@ -677,7 +677,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-}
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
-@@ -8364,7 +8364,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8366,7 +8366,6 @@ void ftrace_dump(enum ftrace_dump_mode o
tracing_off();
local_irq_save(flags);
@@ -685,7 +685,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Simulate the iterator */
trace_init_global_iter(&iter);
-@@ -8445,7 +8444,6 @@ void ftrace_dump(enum ftrace_dump_mode o
+@@ -8447,7 +8446,6 @@ void ftrace_dump(enum ftrace_dump_mode o
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
}
atomic_dec(&dump_running);
diff --git a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
index 1d0ee41c9ca3..0fc44f71910d 100644
--- a/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
+++ b/patches/0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Thu, 18 Oct 2018 12:58:06 +0200
-Subject: [PATCH 11/22] x86/fpu: Make get_xsave_field_ptr() and
+Subject: [PATCH 11/27] x86/fpu: Make get_xsave_field_ptr() and
get_xsave_addr() use feature number instead of mask
After changing the argument of __raw_xsave_addr() from a mask to number
@@ -127,7 +127,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -3662,15 +3662,15 @@ static void fill_xsave(u8 *dest, struct
+@@ -3673,15 +3673,15 @@ static void fill_xsave(u8 *dest, struct
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
@@ -148,7 +148,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
memcpy(dest + offset, &vcpu->arch.pkru,
sizeof(vcpu->arch.pkru));
else
-@@ -3678,7 +3678,7 @@ static void fill_xsave(u8 *dest, struct
+@@ -3689,7 +3689,7 @@ static void fill_xsave(u8 *dest, struct
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -3705,22 +3705,22 @@ static void load_xsave(struct kvm_vcpu *
+@@ -3716,22 +3716,22 @@ static void load_xsave(struct kvm_vcpu *
*/
valid = xstate_bv & ~XFEATURE_MASK_FPSSE;
while (valid) {
@@ -186,7 +186,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -8813,11 +8813,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
+@@ -8841,11 +8841,11 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcp
if (init_event)
kvm_put_guest_fpu(vcpu);
mpx_state_buffer = get_xsave_addr(&vcpu->arch.guest_fpu->state.xsave,
diff --git a/patches/0012-clocksource-drivers-timer-atmel-tcb-Use-ARRAY_SIZE-i.patch b/patches/0012-clocksource-drivers-timer-atmel-tcb-Use-ARRAY_SIZE-i.patch
new file mode 100644
index 000000000000..474fe589120a
--- /dev/null
+++ b/patches/0012-clocksource-drivers-timer-atmel-tcb-Use-ARRAY_SIZE-i.patch
@@ -0,0 +1,25 @@
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Date: Wed, 3 Apr 2019 16:11:20 +0200
+Subject: [PATCH 12/12] clocksource/drivers/timer-atmel-tcb: Use ARRAY_SIZE
+ instead of hardcoded size
+
+Use ARRAY_SIZE to replace the hardcoded size so we will never have a
+mismatch.
+
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/clocksource/timer-atmel-tcb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -396,7 +396,7 @@ static int __init tcb_clksrc_init(struct
+
+ /* How fast will we be counting? Pick something over 5 MHz. */
+ rate = (u32) clk_get_rate(t0_clk);
+- for (i = 0; i < 5; i++) {
++ for (i = 0; i < ARRAY_SIZE(atmel_tc_divisors); i++) {
+ unsigned divisor = atmel_tc_divisors[i];
+ unsigned tmp;
+
diff --git a/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch b/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
new file mode 100644
index 000000000000..adc38e1c89c8
--- /dev/null
+++ b/patches/0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
@@ -0,0 +1,77 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Mar 2019 13:15:32 +0100
+Subject: [PATCH 12/27] x86/pkru: Provide .*_pkru_ins() functions
+
+Dave Hansen has asked for __read_pkru() and __write_pkru() to be symmetrical.
+As part of the series __write_pkru() will read back the value and only write it
+if it is different.
+In order to make both functions symmetrical move the function containing only
+the opcode into a function with _isn() suffix. __write_pkru() will just invoke
+__write_pkru_isn() but in a flowup patch will also read back the value.
+
+Suggested-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/include/asm/pgtable.h | 2 +-
+ arch/x86/include/asm/special_insns.h | 12 +++++++++---
+ arch/x86/kvm/vmx/vmx.c | 2 +-
+ 3 files changed, 11 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -127,7 +127,7 @@ static inline int pte_dirty(pte_t pte)
+ static inline u32 read_pkru(void)
+ {
+ if (boot_cpu_has(X86_FEATURE_OSPKE))
+- return __read_pkru();
++ return __read_pkru_ins();
+ return 0;
+ }
+
+--- a/arch/x86/include/asm/special_insns.h
++++ b/arch/x86/include/asm/special_insns.h
+@@ -92,7 +92,7 @@ static inline void native_write_cr8(unsi
+ #endif
+
+ #ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
+-static inline u32 __read_pkru(void)
++static inline u32 __read_pkru_ins(void)
+ {
+ u32 ecx = 0;
+ u32 edx, pkru;
+@@ -107,7 +107,7 @@ static inline u32 __read_pkru(void)
+ return pkru;
+ }
+
+-static inline void __write_pkru(u32 pkru)
++static inline void __write_pkru_ins(u32 pkru)
+ {
+ u32 ecx = 0, edx = 0;
+
+@@ -118,8 +118,14 @@ static inline void __write_pkru(u32 pkru
+ asm volatile(".byte 0x0f,0x01,0xef\n\t"
+ : : "a" (pkru), "c"(ecx), "d"(edx));
+ }
++
++static inline void __write_pkru(u32 pkru)
++{
++ __write_pkru_ins(pkru);
++}
++
+ #else
+-static inline u32 __read_pkru(void)
++static inline u32 __read_pkru_ins(void)
+ {
+ return 0;
+ }
+--- a/arch/x86/kvm/vmx/vmx.c
++++ b/arch/x86/kvm/vmx/vmx.c
+@@ -6630,7 +6630,7 @@ static void vmx_vcpu_run(struct kvm_vcpu
+ */
+ if (static_cpu_has(X86_FEATURE_PKU) &&
+ kvm_read_cr4_bits(vcpu, X86_CR4_PKE)) {
+- vcpu->arch.pkru = __read_pkru();
++ vcpu->arch.pkru = __read_pkru_ins();
+ if (vcpu->arch.pkru != vmx->host_pkru)
+ __write_pkru(vmx->host_pkru);
+ }
diff --git a/patches/0012-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch b/patches/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
index 9180d2a9a49c..e31c83d3c69d 100644
--- a/patches/0012-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
+++ b/patches/0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 Oct 2018 12:46:53 +0200
-Subject: [PATCH 12/22] x86/fpu: Only write PKRU if it is different from
+Subject: [PATCH 13/27] x86/fpu: Only write PKRU if it is different from
current
Dave Hansen says that the `wrpkru' is more expensive than `rdpkru'. It
@@ -17,16 +17,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/include/asm/special_insns.h
+++ b/arch/x86/include/asm/special_insns.h
-@@ -112,6 +112,12 @@ static inline void __write_pkru(u32 pkru
- u32 ecx = 0, edx = 0;
+@@ -121,6 +121,12 @@ static inline void __write_pkru_ins(u32
- /*
+ static inline void __write_pkru(u32 pkru)
+ {
++ /*
+ * WRPKRU is relatively expensive compared to RDPKRU.
+ * Avoid WRPKRU when it would not change the value.
+ */
-+ if (pkru == __read_pkru())
++ if (pkru == __read_pkru_ins())
+ return;
-+ /*
- * "wrpkru" instruction. Loads contents in EAX to PKRU,
- * requires that ecx = edx = 0.
- */
+ __write_pkru_ins(pkru);
+ }
+
diff --git a/patches/0013-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch b/patches/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
index cb15ca1b6cc8..f65a581d8b5d 100644
--- a/patches/0013-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
+++ b/patches/0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 Oct 2018 13:59:26 +0200
-Subject: [PATCH 13/22] x86/pkeys: Don't check if PKRU is zero before writting
+Subject: [PATCH 14/27] x86/pkeys: Don't check if PKRU is zero before writting
it
write_pkru() checks if the current value is the same as the expected
diff --git a/patches/0014-x86-fpu-Eager-switch-PKRU-state.patch b/patches/0015-x86-fpu-Eager-switch-PKRU-state.patch
index 4f76333470bf..85764fd14c41 100644
--- a/patches/0014-x86-fpu-Eager-switch-PKRU-state.patch
+++ b/patches/0015-x86-fpu-Eager-switch-PKRU-state.patch
@@ -1,6 +1,6 @@
From: Rik van Riel <riel@surriel.com>
Date: Sun, 9 Sep 2018 18:30:47 +0200
-Subject: [PATCH 14/22] x86/fpu: Eager switch PKRU state
+Subject: [PATCH 15/27] x86/fpu: Eager switch PKRU state
While most of a task's FPU state is only needed in user space, the
protection keys need to be in place immediately after a context switch.
@@ -19,28 +19,38 @@ accessed from pgtable.h and fpu/internal.h.
For user tasks we should always get the PKRU from the xsave area and it
should not change anything because the PKRU value was loaded as part of
FPU restore.
-For kernel kernel threads we now will have the default "allow
-everything" written. Before this commit the kernel thread would end up
-with a random value which it inherited from the previous user task.
+For kernel threads we now will have the default "init_pkru_value"
+written. Before this commit the kernel thread would end up with a
+random value which it inherited from the previous user task.
Signed-off-by: Rik van Riel <riel@surriel.com>
[bigeasy: save pkru to xstate, no cache, don't use __raw_xsave_addr()]
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/include/asm/fpu/internal.h | 20 ++++++++++++++++++--
+ arch/x86/include/asm/fpu/internal.h | 24 ++++++++++++++++++++++--
arch/x86/include/asm/fpu/xstate.h | 1 +
- 2 files changed, 19 insertions(+), 2 deletions(-)
+ arch/x86/include/asm/pgtable.h | 6 ++++++
+ arch/x86/mm/pkeys.c | 1 -
+ 4 files changed, 29 insertions(+), 3 deletions(-)
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -562,8 +562,24 @@ switch_fpu_prepare(struct fpu *old_fpu,
+@@ -14,6 +14,7 @@
+ #include <linux/compat.h>
+ #include <linux/sched.h>
+ #include <linux/slab.h>
++#include <linux/mm.h>
+
+ #include <asm/user.h>
+ #include <asm/fpu/api.h>
+@@ -562,8 +563,27 @@ switch_fpu_prepare(struct fpu *old_fpu,
*/
static inline void switch_fpu_finish(struct fpu *new_fpu, int cpu)
{
- if (static_cpu_has(X86_FEATURE_FPU))
- __fpregs_load_activate(new_fpu, cpu);
+ struct pkru_state *pk;
-+ u32 pkru_val = 0;
++ u32 pkru_val = init_pkru_value;
+
+ if (!static_cpu_has(X86_FEATURE_FPU))
+ return;
@@ -50,9 +60,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ if (!cpu_feature_enabled(X86_FEATURE_OSPKE))
+ return;
+
++ /*
++ * PKRU state is switched eagerly because it needs to be valid before we
++ * return to userland e.g. for a copy_to_user() operation.
++ */
+ if (current->mm) {
+ pk = get_xsave_addr(&new_fpu->state.xsave, XFEATURE_PKRU);
-+ WARN_ON_ONCE(!pk);
+ if (pk)
+ pkru_val = pk->pkru;
+ }
@@ -70,3 +83,28 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Bit 63 of XCR0 is reserved for future expansion */
#define XFEATURE_MASK_EXTEND (~(XFEATURE_MASK_FPSSE | (1ULL << 63)))
+--- a/arch/x86/include/asm/pgtable.h
++++ b/arch/x86/include/asm/pgtable.h
+@@ -1355,6 +1355,12 @@ static inline pmd_t pmd_swp_clear_soft_d
+ #define PKRU_WD_BIT 0x2
+ #define PKRU_BITS_PER_PKEY 2
+
++#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
++extern u32 init_pkru_value;
++#else
++#define init_pkru_value 0
++#endif
++
+ static inline bool __pkru_allows_read(u32 pkru, u16 pkey)
+ {
+ int pkru_pkey_bits = pkey * PKRU_BITS_PER_PKEY;
+--- a/arch/x86/mm/pkeys.c
++++ b/arch/x86/mm/pkeys.c
+@@ -126,7 +126,6 @@ int __arch_override_mprotect_pkey(struct
+ * in the process's lifetime will not accidentally get access
+ * to data which is pkey-protected later on.
+ */
+-static
+ u32 init_pkru_value = PKRU_AD_KEY( 1) | PKRU_AD_KEY( 2) | PKRU_AD_KEY( 3) |
+ PKRU_AD_KEY( 4) | PKRU_AD_KEY( 5) | PKRU_AD_KEY( 6) |
+ PKRU_AD_KEY( 7) | PKRU_AD_KEY( 8) | PKRU_AD_KEY( 9) |
diff --git a/patches/0015-x86-entry-Add-TIF_NEED_FPU_LOAD.patch b/patches/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
index 3cfb1d42d8c5..f6b05495a134 100644
--- a/patches/0015-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
+++ b/patches/0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Wed, 5 Sep 2018 18:34:47 +0200
-Subject: [PATCH 15/22] x86/entry: Add TIF_NEED_FPU_LOAD
+Subject: [PATCH 16/27] x86/entry: Add TIF_NEED_FPU_LOAD
Add TIF_NEED_FPU_LOAD. This is reserved for loading the FPU registers
before returning to userland. This flag must not be set for systems
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -537,6 +537,12 @@ static inline void __fpregs_load_activat
+@@ -538,6 +538,12 @@ static inline void __fpregs_load_activat
*
* The FPU context is only stored/restore for user task and ->mm is used to
* distinguish between kernel and user threads.
diff --git a/patches/0016-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch b/patches/0016-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
deleted file mode 100644
index 232ab73d2cf9..000000000000
--- a/patches/0016-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
+++ /dev/null
@@ -1,154 +0,0 @@
-From: Rik van Riel <riel@surriel.com>
-Date: Sun, 9 Sep 2018 18:30:50 +0200
-Subject: [PATCH 16/22] x86/fpu: Always store the registers in
- copy_fpstate_to_sigframe()
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-copy_fpstate_to_sigframe() stores the registers directly to user space.
-This is okay because the FPU register are valid and saving it directly
-avoids saving it into kernel memory and making a copy.
-However… We can't keep doing this if we are going to restore the FPU
-registers on the return to userland. It is possible that the FPU
-registers will be invalidated in the middle of the save operation and
-this should be done with disabled preemption / BH.
-
-Save the FPU registers to task's FPU struct and copy them to the user
-memory later on.
-
-This code is extracted from an earlier version of the patchset while
-there still was lazy-FPU on x86.
-
-Signed-off-by: Rik van Riel <riel@surriel.com>
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/include/asm/fpu/internal.h | 45 ------------------------------------
- arch/x86/kernel/fpu/signal.c | 34 ++++++++++-----------------
- 2 files changed, 13 insertions(+), 66 deletions(-)
-
---- a/arch/x86/include/asm/fpu/internal.h
-+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -126,22 +126,6 @@ extern void fpstate_sanitize_xstate(stru
- _ASM_EXTABLE_HANDLE(1b, 2b, ex_handler_fprestore) \
- : output : input)
-
--static inline int copy_fregs_to_user(struct fregs_state __user *fx)
--{
-- return user_insn(fnsave %[fx]; fwait, [fx] "=m" (*fx), "m" (*fx));
--}
--
--static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
--{
-- if (IS_ENABLED(CONFIG_X86_32))
-- return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
-- else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
-- return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
--
-- /* See comment in copy_fxregs_to_kernel() below. */
-- return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
--}
--
- static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
- {
- if (IS_ENABLED(CONFIG_X86_32)) {
-@@ -353,35 +337,6 @@ static inline void copy_kernel_to_xregs(
- }
-
- /*
-- * Save xstate to user space xsave area.
-- *
-- * We don't use modified optimization because xrstor/xrstors might track
-- * a different application.
-- *
-- * We don't use compacted format xsave area for
-- * backward compatibility for old applications which don't understand
-- * compacted format of xsave area.
-- */
--static inline int copy_xregs_to_user(struct xregs_state __user *buf)
--{
-- int err;
--
-- /*
-- * Clear the xsave header first, so that reserved fields are
-- * initialized to zero.
-- */
-- err = __clear_user(&buf->header, sizeof(buf->header));
-- if (unlikely(err))
-- return -EFAULT;
--
-- stac();
-- XSTATE_OP(XSAVE, buf, -1, -1, err);
-- clac();
--
-- return err;
--}
--
--/*
- * Restore xstate from user space xsave area.
- */
- static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -118,22 +118,6 @@ static inline int save_xstate_epilog(voi
- return err;
- }
-
--static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
--{
-- int err;
--
-- if (use_xsave())
-- err = copy_xregs_to_user(buf);
-- else if (use_fxsr())
-- err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
-- else
-- err = copy_fregs_to_user((struct fregs_state __user *) buf);
--
-- if (unlikely(err) && __clear_user(buf, fpu_user_xstate_size))
-- err = -EFAULT;
-- return err;
--}
--
- /*
- * Save the fpu, extended register state to the user signal frame.
- *
-@@ -144,8 +128,8 @@ static inline int copy_fpregs_to_sigfram
- * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- * buf != buf_fx for 32-bit frames with fxstate.
- *
-- * Save the state directly to the user frame pointed by the aligned pointer
-- * 'buf_fx'.
-+ * Save the state to task's fpu->state and then copy it to the user frame
-+ * pointed by the aligned pointer 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
-@@ -155,6 +139,8 @@ static inline int copy_fpregs_to_sigfram
- */
- int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
- {
-+ struct fpu *fpu = &current->thread.fpu;
-+ struct xregs_state *xsave = &fpu->state.xsave;
- struct task_struct *tsk = current;
- int ia32_fxstate = (buf != buf_fx);
-
-@@ -169,9 +155,15 @@ int copy_fpstate_to_sigframe(void __user
- sizeof(struct user_i387_ia32_struct), NULL,
- (struct _fpstate_32 __user *) buf) ? -1 : 1;
-
-- /* Save the live register state to the user directly. */
-- if (copy_fpregs_to_sigframe(buf_fx))
-- return -1;
-+ copy_fpregs_to_fpstate(fpu);
-+
-+ if (using_compacted_format()) {
-+ copy_xstate_to_user(buf_fx, xsave, 0, size);
-+ } else {
-+ fpstate_sanitize_xstate(fpu);
-+ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
-+ return -1;
-+ }
-
- /* Save the fsave header for the 32-bit frames. */
- if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
diff --git a/patches/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch b/patches/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
new file mode 100644
index 000000000000..f44bec586a02
--- /dev/null
+++ b/patches/0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
@@ -0,0 +1,70 @@
+From: Rik van Riel <riel@surriel.com>
+Date: Sun, 9 Sep 2018 18:30:50 +0200
+Subject: [PATCH 17/27] x86/fpu: Always store the registers in
+ copy_fpstate_to_sigframe()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+copy_fpstate_to_sigframe() stores the registers directly to user space.
+This is okay because the FPU register are valid and saving it directly
+avoids saving it into kernel memory and making a copy.
+However… We can't keep doing this if we are going to restore the FPU
+registers on the return to userland. It is possible that the FPU
+registers will be invalidated in the middle of the save operation and
+this should be done with disabled preemption / BH.
+
+Save the FPU registers to task's FPU struct and copy them to the user
+memory later on.
+
+This code is extracted from an earlier version of the patchset while
+there still was lazy-FPU on x86.
+
+Signed-off-by: Rik van Riel <riel@surriel.com>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -144,8 +144,8 @@ static inline int copy_fpregs_to_sigfram
+ * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
+ * buf != buf_fx for 32-bit frames with fxstate.
+ *
+- * Save the state directly to the user frame pointed by the aligned pointer
+- * 'buf_fx'.
++ * Save the state to task's fpu->state and then copy it to the user frame
++ * pointed by the aligned pointer 'buf_fx'.
+ *
+ * If this is a 32-bit frame with fxstate, put a fsave header before
+ * the aligned state at 'buf_fx'.
+@@ -155,6 +155,8 @@ static inline int copy_fpregs_to_sigfram
+ */
+ int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+ {
++ struct fpu *fpu = &current->thread.fpu;
++ struct xregs_state *xsave = &fpu->state.xsave;
+ struct task_struct *tsk = current;
+ int ia32_fxstate = (buf != buf_fx);
+
+@@ -169,9 +171,16 @@ int copy_fpstate_to_sigframe(void __user
+ sizeof(struct user_i387_ia32_struct), NULL,
+ (struct _fpstate_32 __user *) buf) ? -1 : 1;
+
+- /* Save the live register state to the user directly. */
+- if (copy_fpregs_to_sigframe(buf_fx))
+- return -1;
++ copy_fpregs_to_fpstate(fpu);
++
++ if (using_compacted_format()) {
++ if (copy_xstate_to_user(buf_fx, xsave, 0, size))
++ return -1;
++ } else {
++ fpstate_sanitize_xstate(fpu);
++ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
++ return -1;
++ }
+
+ /* Save the fsave header for the 32-bit frames. */
+ if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
diff --git a/patches/0017-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch b/patches/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
index bf2d4840d248..4b07e024cf31 100644
--- a/patches/0017-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
+++ b/patches/0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
@@ -1,6 +1,6 @@
From: Rik van Riel <riel@surriel.com>
Date: Sun, 9 Sep 2018 18:30:51 +0200
-Subject: [PATCH 17/22] x86/fpu: Prepare copy_fpstate_to_sigframe() for
+Subject: [PATCH 18/27] x86/fpu: Prepare copy_fpstate_to_sigframe() for
TIF_NEED_FPU_LOAD
The FPU registers need only to be saved if TIF_NEED_FPU_LOAD is not set.
@@ -9,12 +9,12 @@ Otherwise this has been already done and can be skipped.
Signed-off-by: Rik van Riel <riel@surriel.com>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/kernel/fpu/signal.c | 11 ++++++++++-
- 1 file changed, 10 insertions(+), 1 deletion(-)
+ arch/x86/kernel/fpu/signal.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
-@@ -155,7 +155,16 @@ int copy_fpstate_to_sigframe(void __user
+@@ -171,7 +171,17 @@ int copy_fpstate_to_sigframe(void __user
sizeof(struct user_i387_ia32_struct), NULL,
(struct _fpstate_32 __user *) buf) ? -1 : 1;
@@ -25,10 +25,11 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ * then the CPU has the current state and we need to save it. Otherwise
+ * it is already done and we can skip it.
+ */
-+ if (!test_thread_flag(TIF_NEED_FPU_LOAD))
++ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+ copy_fpregs_to_fpstate(fpu);
-+
++ set_thread_flag(TIF_NEED_FPU_LOAD);
++ }
+ fpregs_unlock();
if (using_compacted_format()) {
- copy_xstate_to_user(buf_fx, xsave, 0, size);
+ if (copy_xstate_to_user(buf_fx, xsave, 0, size))
diff --git a/patches/0019-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch b/patches/0019-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
deleted file mode 100644
index 70dd1df25b6c..000000000000
--- a/patches/0019-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Wed, 7 Nov 2018 15:06:06 +0100
-Subject: [PATCH 19/22] x86/fpu: Inline copy_user_to_fpregs_zeroing()
-
-Start refactoring __fpu__restore_sig() by inlining
-copy_user_to_fpregs_zeroing().
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/x86/kernel/fpu/signal.c | 42 +++++++++++++++++++-----------------------
- 1 file changed, 19 insertions(+), 23 deletions(-)
-
---- a/arch/x86/kernel/fpu/signal.c
-+++ b/arch/x86/kernel/fpu/signal.c
-@@ -220,28 +220,6 @@ sanitize_restored_xstate(union fpregs_st
- }
- }
-
--/*
-- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
-- */
--static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
--{
-- if (use_xsave()) {
-- if ((unsigned long)buf % 64 || fx_only) {
-- u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- return copy_user_to_fxregs(buf);
-- } else {
-- u64 init_bv = xfeatures_mask & ~xbv;
-- if (unlikely(init_bv))
-- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- return copy_user_to_xregs(buf, xbv);
-- }
-- } else if (use_fxsr()) {
-- return copy_user_to_fxregs(buf);
-- } else
-- return copy_user_to_fregs(buf);
--}
--
- static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
- {
- int ia32_fxstate = (buf != buf_fx);
-@@ -319,11 +297,29 @@ static int __fpu__restore_sig(void __use
- kfree(tmp);
- return err;
- } else {
-+ int ret;
-+
- /*
- * For 64-bit frames and 32-bit fsave frames, restore the user
- * state to the registers directly (with exceptions handled).
- */
-- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
-+ if (use_xsave()) {
-+ if ((unsigned long)buf_fx % 64 || fx_only) {
-+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_user_to_fxregs(buf_fx);
-+ } else {
-+ u64 init_bv = xfeatures_mask & ~xfeatures;
-+ if (unlikely(init_bv))
-+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_user_to_xregs(buf_fx, xfeatures);
-+ }
-+ } else if (use_fxsr()) {
-+ ret = copy_user_to_fxregs(buf_fx);
-+ } else
-+ ret = copy_user_to_fregs(buf_fx);
-+
-+ if (ret) {
- fpu__clear(fpu);
- return -1;
- }
diff --git a/patches/0018-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch b/patches/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
index 9273878f497c..1a68f892cb10 100644
--- a/patches/0018-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
+++ b/patches/0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Fri, 19 Oct 2018 23:50:27 +0200
-Subject: [PATCH 18/22] x86/fpu: Update xstate's PKRU value on write_pkru()
+Subject: [PATCH 19/27] x86/fpu: Update xstate's PKRU value on write_pkru()
During the context switch the xstate is loaded which also includes the
PKRU value.
diff --git a/patches/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch b/patches/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
new file mode 100644
index 000000000000..8d61f7e24052
--- /dev/null
+++ b/patches/0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
@@ -0,0 +1,46 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 7 Nov 2018 15:06:06 +0100
+Subject: [PATCH 20/27] x86/fpu: Inline copy_user_to_fpregs_zeroing()
+
+Start refactoring __fpu__restore_sig() by inlining
+copy_user_to_fpregs_zeroing(). The orignal function remains and will be
+used to restore from userland memory if possible.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -337,11 +337,29 @@ static int __fpu__restore_sig(void __use
+ kfree(tmp);
+ return err;
+ } else {
++ int ret;
++
+ /*
+ * For 64-bit frames and 32-bit fsave frames, restore the user
+ * state to the registers directly (with exceptions handled).
+ */
+- if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
++ if (use_xsave()) {
++ if ((unsigned long)buf_fx % 64 || fx_only) {
++ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
++ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
++ ret = copy_user_to_fxregs(buf_fx);
++ } else {
++ u64 init_bv = xfeatures_mask & ~xfeatures;
++ if (unlikely(init_bv))
++ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
++ ret = copy_user_to_xregs(buf_fx, xfeatures);
++ }
++ } else if (use_fxsr()) {
++ ret = copy_user_to_fxregs(buf_fx);
++ } else
++ ret = copy_user_to_fregs(buf_fx);
++
++ if (ret) {
+ fpu__clear(fpu);
+ return -1;
+ }
diff --git a/patches/0020-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch b/patches/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
index a2dc0c57fb0c..d1400fd99432 100644
--- a/patches/0020-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
+++ b/patches/0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Nov 2018 17:48:32 +0100
-Subject: [PATCH 20/22] x86/fpu: Let __fpu__restore_sig() restore the
+Subject: [PATCH 21/27] x86/fpu: Let __fpu__restore_sig() restore the
!32bit+fxsr frame from kernel memory
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
@@ -23,19 +23,22 @@ it and then load it. The copy_users_…() helper are basically the old
helper except that they operate on kernel memory and the fault handler
just sets the error value and the caller handles it.
+copy_user_to_fpregs_zeroing() and its helpers remain and will be used
+later for a fastpath optimisation.
+
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/x86/include/asm/fpu/internal.h | 35 +++++++++++++-------
+ arch/x86/include/asm/fpu/internal.h | 43 ++++++++++++++++++++++++
arch/x86/kernel/fpu/signal.c | 62 ++++++++++++++++++++++++++++--------
- 2 files changed, 73 insertions(+), 24 deletions(-)
+ 2 files changed, 92 insertions(+), 13 deletions(-)
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -120,6 +120,21 @@ extern void fpstate_sanitize_xstate(stru
+@@ -121,6 +121,21 @@ extern void fpstate_sanitize_xstate(stru
err; \
})
-+#define kernel_insn_norestore(insn, output, input...) \
++#define kernel_insn_err(insn, output, input...) \
+({ \
+ int err; \
+ asm volatile("1:" #insn "\n\t" \
@@ -53,63 +56,58 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#define kernel_insn(insn, output, input...) \
asm volatile("1:" #insn "\n\t" \
"2:\n" \
-@@ -140,15 +155,15 @@ static inline void copy_kernel_to_fxregs
+@@ -157,6 +172,14 @@ static inline void copy_kernel_to_fxregs
}
}
--static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
-+static inline int copy_users_to_fxregs(struct fxregs_state *fx)
++static inline int copy_kernel_to_fxregs_err(struct fxregs_state *fx)
++{
++ if (IS_ENABLED(CONFIG_X86_32))
++ return kernel_insn_err(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
++ else
++ return kernel_insn_err(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
++}
++
+ static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
{
if (IS_ENABLED(CONFIG_X86_32))
-- return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-+ return kernel_insn_norestore(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
- else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
-- return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-+ return kernel_insn_norestore(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-
- /* See comment in copy_fxregs_to_kernel() below. */
-- return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
-+ return kernel_insn_norestore(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
- "m" (*fx));
- }
-
-@@ -157,9 +172,9 @@ static inline void copy_kernel_to_fregs(
+@@ -174,6 +197,11 @@ static inline void copy_kernel_to_fregs(
kernel_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
}
--static inline int copy_user_to_fregs(struct fregs_state __user *fx)
-+static inline int copy_users_to_fregs(struct fregs_state *fx)
++static inline int copy_kernel_to_fregs_err(struct fregs_state *fx)
++{
++ return kernel_insn_err(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
++}
++
+ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
{
-- return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-+ return kernel_insn_norestore(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
- }
+ return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+@@ -398,6 +426,21 @@ static inline int copy_user_to_xregs(str
- static inline void copy_fxregs_to_kernel(struct fpu *fpu)
-@@ -337,18 +352,16 @@ static inline void copy_kernel_to_xregs(
+ return err;
}
-
- /*
-- * Restore xstate from user space xsave area.
++
++/*
+ * Restore xstate from kernel space xsave area, return an error code instead an
+ * exception.
- */
--static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
-+static inline int copy_users_to_xregs(struct xregs_state *xstate, u64 mask)
- {
-- struct xregs_state *xstate = ((__force struct xregs_state *)buf);
- u32 lmask = mask;
- u32 hmask = mask >> 32;
- int err;
-
-- stac();
- XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
-- clac();
++ */
++static inline int copy_kernel_to_xregs_err(struct xregs_state *xstate, u64 mask)
++{
++ u32 lmask = mask;
++ u32 hmask = mask >> 32;
++ int err;
++
++ XSTATE_OP(XRSTOR, xstate, lmask, hmask, err);
++
++ return err;
++}
- return err;
- }
+ /*
+ * These must be called with preempt disabled. Returns
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
-@@ -216,7 +216,8 @@ sanitize_restored_xstate(union fpregs_st
+@@ -234,7 +234,8 @@ sanitize_restored_xstate(union fpregs_st
*/
xsave->i387.mxcsr &= mxcsr_feature_mask;
@@ -119,7 +117,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
}
-@@ -297,28 +298,63 @@ static int __fpu__restore_sig(void __use
+@@ -337,28 +338,63 @@ static int __fpu__restore_sig(void __use
kfree(tmp);
return err;
} else {
@@ -166,7 +164,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_users_to_xregs(&state->xsave, xfeatures);
++ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
+
} else if (use_fxsr()) {
- ret = copy_user_to_fxregs(buf_fx);
@@ -182,12 +180,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ }
+ state->fxsave.mxcsr &= mxcsr_feature_mask;
+
-+ ret = copy_users_to_fxregs(&state->fxsave);
++ ret = copy_kernel_to_fxregs_err(&state->fxsave);
+ } else {
+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
-+ ret = copy_users_to_fregs(buf_fx);
++ ret = copy_kernel_to_fregs_err(&state->fsave);
+ }
+err_out:
diff --git a/patches/0021-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch b/patches/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
index 275491feb3de..736f93284ccb 100644
--- a/patches/0021-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
+++ b/patches/0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
@@ -1,6 +1,6 @@
From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Date: Tue, 27 Nov 2018 21:08:01 +0100
-Subject: [PATCH 21/22] x86/fpu: Merge the two code paths in
+Subject: [PATCH 22/27] x86/fpu: Merge the two code paths in
__fpu__restore_sig()
The ia32_fxstate case (32bit with fxsr) and the other (64bit, 32bit
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
-@@ -223,12 +223,17 @@ sanitize_restored_xstate(union fpregs_st
+@@ -263,12 +263,17 @@ static inline int copy_user_to_fpregs_ze
static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
{
@@ -36,7 +36,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -263,105 +268,68 @@ static int __fpu__restore_sig(void __use
+@@ -303,105 +308,68 @@ static int __fpu__restore_sig(void __use
}
}
@@ -135,7 +135,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-
- if (unlikely(init_bv))
- copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_users_to_xregs(&state->xsave, xfeatures);
+- ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
-
- } else if (use_fxsr()) {
- ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
@@ -148,15 +148,15 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- }
- state->fxsave.mxcsr &= mxcsr_feature_mask;
-
-- ret = copy_users_to_fxregs(&state->fxsave);
+- ret = copy_kernel_to_fxregs_err(&state->fxsave);
- } else {
- ret = __copy_from_user(&state->fsave, buf_fx, state_size);
- if (ret)
- goto err_out;
-- ret = copy_users_to_fregs(buf_fx);
+- ret = copy_kernel_to_fregs_err(&state->fsave);
+ if (unlikely(init_bv))
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-+ ret = copy_users_to_xregs(&state->xsave, xfeatures);
++ ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
+
+ } else if (use_fxsr()) {
+ ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
@@ -175,12 +175,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
- fpu__clear(fpu);
- return -1;
- }
-+ ret = copy_users_to_fxregs(&state->fxsave);
++ ret = copy_kernel_to_fxregs_err(&state->fxsave);
+ } else {
+ ret = __copy_from_user(&state->fsave, buf_fx, state_size);
+ if (ret)
+ goto err_out;
-+ ret = copy_users_to_fregs(buf_fx);
++ ret = copy_kernel_to_fregs_err(&state->fsave);
}
- return 0;
diff --git a/patches/0022-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
index 506aaf4dc81d..233b8fd6df23 100644
--- a/patches/0022-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
+++ b/patches/0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
@@ -1,6 +1,6 @@
From: Rik van Riel <riel@surriel.com>
Date: Sun, 9 Sep 2018 18:30:53 +0200
-Subject: [PATCH 22/22] x86/fpu: Defer FPU state load until return to userspace
+Subject: [PATCH 23/27] x86/fpu: Defer FPU state load until return to userspace
Defer loading of FPU state until return to userspace. This gives
the kernel the potential to skip loading FPU state for tasks that
@@ -32,12 +32,12 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
arch/x86/include/asm/fpu/internal.h | 27 +++++----
arch/x86/include/asm/trace/fpu.h | 5 +
arch/x86/kernel/fpu/core.c | 105 +++++++++++++++++++++++++++---------
- arch/x86/kernel/fpu/signal.c | 46 +++++++++------
+ arch/x86/kernel/fpu/signal.c | 48 +++++++++-------
arch/x86/kernel/process.c | 2
arch/x86/kernel/process_32.c | 5 +
arch/x86/kernel/process_64.c | 5 +
arch/x86/kvm/x86.c | 20 +++++-
- 10 files changed, 180 insertions(+), 65 deletions(-)
+ 10 files changed, 181 insertions(+), 66 deletions(-)
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -114,7 +114,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
-@@ -29,7 +29,7 @@ extern void fpu__prepare_write(struct fp
+@@ -30,7 +30,7 @@ extern void fpu__prepare_write(struct fp
extern void fpu__save(struct fpu *fpu);
extern int fpu__restore_sig(void __user *buf, int ia32_frame);
extern void fpu__drop(struct fpu *fpu);
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
extern void fpu__clear(struct fpu *fpu);
extern int fpu__exception_code(struct fpu *fpu, int trap_nr);
extern int dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
-@@ -483,13 +483,20 @@ static inline void fpregs_activate(struc
+@@ -559,13 +559,20 @@ static inline void fpregs_activate(struc
trace_x86_fpu_regs_activated(fpu);
}
@@ -147,7 +147,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -500,8 +507,8 @@ static inline void __fpregs_load_activat
+@@ -576,8 +583,8 @@ static inline void __fpregs_load_activat
* - switch_fpu_prepare() saves the old state.
* This is done within the context of the old process.
*
@@ -158,7 +158,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* The FPU context is only stored/restore for user task and ->mm is used to
* distinguish between kernel and user threads.
-@@ -531,10 +538,10 @@ switch_fpu_prepare(struct fpu *old_fpu,
+@@ -607,10 +614,10 @@ switch_fpu_prepare(struct fpu *old_fpu,
*/
/*
@@ -171,8 +171,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+static inline void switch_fpu_finish(struct fpu *new_fpu)
{
struct pkru_state *pk;
- u32 pkru_val = 0;
-@@ -542,7 +549,7 @@ static inline void switch_fpu_finish(str
+ u32 pkru_val = init_pkru_value;
+@@ -618,7 +625,7 @@ static inline void switch_fpu_finish(str
if (!static_cpu_has(X86_FEATURE_FPU))
return;
@@ -387,7 +387,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
-@@ -229,11 +229,9 @@ static int __fpu__restore_sig(void __use
+@@ -269,11 +269,9 @@ static int __fpu__restore_sig(void __use
struct fpu *fpu = &tsk->thread.fpu;
int state_size = fpu_kernel_xstate_size;
struct user_i387_ia32_struct env;
@@ -399,7 +399,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
IS_ENABLED(CONFIG_IA32_EMULATION));
-@@ -268,14 +266,18 @@ static int __fpu__restore_sig(void __use
+@@ -308,14 +306,18 @@ static int __fpu__restore_sig(void __use
}
}
@@ -423,7 +423,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* For 32-bit frames with fxstate, copy the fxstate so it can be
* reconstructed later.
-@@ -290,43 +292,51 @@ static int __fpu__restore_sig(void __use
+@@ -330,43 +332,51 @@ static int __fpu__restore_sig(void __use
u64 init_bv = xfeatures_mask & ~xfeatures;
if (using_compacted_format()) {
@@ -446,8 +446,8 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ fpregs_lock();
if (unlikely(init_bv))
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
-- ret = copy_users_to_xregs(&state->xsave, xfeatures);
-+ ret = copy_users_to_xregs(&fpu->state.xsave, xfeatures);
+- ret = copy_kernel_to_xregs_err(&state->xsave, xfeatures);
++ ret = copy_kernel_to_xregs_err(&fpu->state.xsave, xfeatures);
} else if (use_fxsr()) {
- ret = __copy_from_user(&state->fxsave, buf_fx, state_size);
@@ -467,15 +467,16 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
}
-- ret = copy_users_to_fxregs(&state->fxsave);
-+ ret = copy_users_to_fxregs(&fpu->state.fxsave);
+- ret = copy_kernel_to_fxregs_err(&state->fxsave);
++ ret = copy_kernel_to_fxregs_err(&fpu->state.fxsave);
} else {
- ret = __copy_from_user(&state->fsave, buf_fx, state_size);
+ ret = __copy_from_user(&fpu->state.fsave, buf_fx, state_size);
if (ret)
goto err_out;
+- ret = copy_kernel_to_fregs_err(&state->fsave);
+ fpregs_lock();
- ret = copy_users_to_fregs(buf_fx);
++ ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
}
+ if (!ret)
+ fpregs_mark_activate();
@@ -541,7 +542,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
update_task_stack(next_p);
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -7841,6 +7841,10 @@ static int vcpu_enter_guest(struct kvm_v
+@@ -7868,6 +7868,10 @@ static int vcpu_enter_guest(struct kvm_v
wait_lapic_expire(vcpu);
guest_enter_irqoff();
@@ -552,7 +553,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (unlikely(vcpu->arch.switch_db_regs)) {
set_debugreg(0, 7);
set_debugreg(vcpu->arch.eff_db[0], 0);
-@@ -8101,22 +8105,30 @@ static int complete_emulated_mmio(struct
+@@ -8128,22 +8132,30 @@ static int complete_emulated_mmio(struct
/* Swap (qemu) user FPU context for the guest FPU context. */
static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
{
diff --git a/patches/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch b/patches/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
new file mode 100644
index 000000000000..08266fabf806
--- /dev/null
+++ b/patches/0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
@@ -0,0 +1,52 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 2 Apr 2019 13:02:25 +0200
+Subject: [PATCH 24/27] x86/fpu: Add a fastpath to __fpu__restore_sig()
+
+The previous commits refactor the restoration of the FPU registers so
+that they can be loaded from in-kernel memory. This overhead can be
+avoided if the load can be performed without a pagefault.
+
+Attempt to restore FPU registers by invoking
+copy_user_to_fpregs_zeroing(). If it fails try the slowpath which can handle
+pagefaults.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -242,10 +242,10 @@ sanitize_restored_xstate(union fpregs_st
+ /*
+ * Restore the extended state if present. Otherwise, restore the FP/SSE state.
+ */
+-static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
++static int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+ {
+ if (use_xsave()) {
+- if ((unsigned long)buf % 64 || fx_only) {
++ if (fx_only) {
+ u64 init_bv = xfeatures_mask & ~XFEATURE_MASK_FPSSE;
+ copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+ return copy_user_to_fxregs(buf);
+@@ -327,7 +327,19 @@ static int __fpu__restore_sig(void __use
+ if (ret)
+ goto err_out;
+ envp = &env;
++ } else {
++ fpregs_lock();
++ pagefault_disable();
++ ret = copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only);
++ pagefault_enable();
++ if (!ret) {
++ fpregs_mark_activate();
++ fpregs_unlock();
++ return 0;
++ }
++ fpregs_unlock();
+ }
++
+ if (use_xsave() && !fx_only) {
+ u64 init_bv = xfeatures_mask & ~xfeatures;
+
diff --git a/patches/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch b/patches/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
new file mode 100644
index 000000000000..6b7917569bdb
--- /dev/null
+++ b/patches/0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
@@ -0,0 +1,78 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 3 Apr 2019 15:59:12 +0200
+Subject: [PATCH 25/27] x86/fpu: Add a fastpath to copy_fpstate_to_sigframe()
+
+If the CPU holds the FPU register for the current task then we can try to save
+them directly to the userland stack frame. This has to be done with the
+pagefault disabled because we can't fault (while the FPU registers are locked)
+and therefore the operation might fail.
+If it fails try the slowpath which can handle faults.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 34 ++++++++++++++++++++++------------
+ 1 file changed, 22 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -144,8 +144,10 @@ static inline int copy_fpregs_to_sigfram
+ * buf == buf_fx for 64-bit frames and 32-bit fsave frame.
+ * buf != buf_fx for 32-bit frames with fxstate.
+ *
+- * Save the state to task's fpu->state and then copy it to the user frame
+- * pointed by the aligned pointer 'buf_fx'.
++ * Try to save it directly to the user frame with disabled page fault handler.
++ * If this fails then do the slow path where the FPU state is first saved to
++ * task's fpu->state and then copy it to the user frame pointed by the aligned
++ * pointer 'buf_fx'.
+ *
+ * If this is a 32-bit frame with fxstate, put a fsave header before
+ * the aligned state at 'buf_fx'.
+@@ -159,6 +161,7 @@ int copy_fpstate_to_sigframe(void __user
+ struct xregs_state *xsave = &fpu->state.xsave;
+ struct task_struct *tsk = current;
+ int ia32_fxstate = (buf != buf_fx);
++ int ret = -EFAULT;
+
+ ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
+ IS_ENABLED(CONFIG_IA32_EMULATION));
+@@ -174,22 +177,29 @@ int copy_fpstate_to_sigframe(void __user
+ fpregs_lock();
+ /*
+ * If we do not need to load the FPU registers at return to userspace
+- * then the CPU has the current state and we need to save it. Otherwise
+- * it is already done and we can skip it.
++ * then the CPU has the current state. Try to save it directly to
++ * userland's stack frame if it does not cause a pagefault. If it does,
++ * try the slowpath.
+ */
+ if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+- copy_fpregs_to_fpstate(fpu);
++ pagefault_disable();
++ ret = copy_fpregs_to_sigframe(buf_fx);
++ pagefault_enable();
++ if (ret)
++ copy_fpregs_to_fpstate(fpu);
+ set_thread_flag(TIF_NEED_FPU_LOAD);
+ }
+ fpregs_unlock();
+
+- if (using_compacted_format()) {
+- if (copy_xstate_to_user(buf_fx, xsave, 0, size))
+- return -1;
+- } else {
+- fpstate_sanitize_xstate(fpu);
+- if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
+- return -1;
++ if (ret) {
++ if (using_compacted_format()) {
++ if (copy_xstate_to_user(buf_fx, xsave, 0, size))
++ return -1;
++ } else {
++ fpstate_sanitize_xstate(fpu);
++ if (__copy_to_user(buf_fx, xsave, fpu_user_xstate_size))
++ return -1;
++ }
+ }
+
+ /* Save the fsave header for the 32-bit frames. */
diff --git a/patches/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch b/patches/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
new file mode 100644
index 000000000000..6615e8ae6c74
--- /dev/null
+++ b/patches/0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
@@ -0,0 +1,67 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 3 Apr 2019 15:59:13 +0200
+Subject: [PATCH 26/27] x86/fpu: Restore FPU register in
+ copy_fpstate_to_sigframe() in order to use the fastpath
+
+If a task is scheduled out and receives a signal then it won't be able to take
+the fastpath because the register aren't available. The slowpath is more
+expensive compared to xrstor + xsave which usually succeeds.
+
+Some clock_gettime() numbers from a bigger box with AVX512 during bootup:
+- __fpregs_load_activate() takes 140ns - 350ns. If it was the most recent FPU
+ context on the CPU then the optimisation in __fpregs_load_activate() will
+ skip the load (which was disabled during the test).
+
+- copy_fpregs_to_sigframe() takes 200ns - 450ns if it succeeds. On a
+ pagefault it is 1.8us - 3us usually in the 2.6us area.
+
+- The slowpath takes 1.5 - 6us. Usually in the 2.6us area.
+
+My testcases (including lat_sig) take the fastpath without
+__fpregs_load_activate(). I expect this to be the majority.
+
+Since the slowpath is in the >1us area it makes sense to load the
+registers and attempt to save them directly. The direct save may fail
+but should only happen on the first invocation or after fork() while the
+page is RO.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/fpu/signal.c | 25 +++++++++++++------------
+ 1 file changed, 13 insertions(+), 12 deletions(-)
+
+--- a/arch/x86/kernel/fpu/signal.c
++++ b/arch/x86/kernel/fpu/signal.c
+@@ -176,19 +176,20 @@ int copy_fpstate_to_sigframe(void __user
+
+ fpregs_lock();
+ /*
+- * If we do not need to load the FPU registers at return to userspace
+- * then the CPU has the current state. Try to save it directly to
+- * userland's stack frame if it does not cause a pagefault. If it does,
+- * try the slowpath.
++ * Load the FPU register if they are not valid for the current task.
++ * With a valid FPU state we can attempt to save the state directly to
++ * userland's stack frame which will likely succeed. If it does not, do
++ * the slowpath.
+ */
+- if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
+- pagefault_disable();
+- ret = copy_fpregs_to_sigframe(buf_fx);
+- pagefault_enable();
+- if (ret)
+- copy_fpregs_to_fpstate(fpu);
+- set_thread_flag(TIF_NEED_FPU_LOAD);
+- }
++ if (test_thread_flag(TIF_NEED_FPU_LOAD))
++ __fpregs_load_activate();
++
++ pagefault_disable();
++ ret = copy_fpregs_to_sigframe(buf_fx);
++ pagefault_enable();
++ if (ret && !test_thread_flag(TIF_NEED_FPU_LOAD))
++ copy_fpregs_to_fpstate(fpu);
++ set_thread_flag(TIF_NEED_FPU_LOAD);
+ fpregs_unlock();
+
+ if (ret) {
diff --git a/patches/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch b/patches/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
new file mode 100644
index 000000000000..5f57e977fd04
--- /dev/null
+++ b/patches/0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
@@ -0,0 +1,73 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 21 Mar 2019 17:24:27 +0100
+Subject: [PATCH 27/27] x86/pkeys: add PKRU value to init_fpstate
+
+The task's initiall PKRU value is set part for fpu__clear()/
+copy_init_pkru_to_fpregs(). It is not part of init_fpstate.xsave and
+instead it is set explictly.
+If the user removes the PKRU state from XSAVE in the signal handler then
+__fpu__restore_sig() will restore the missing bits from `init_fpstate'
+and initialize the PKRU value to 0.
+
+Add the `init_pkru_value' to `init_fpstate' so it is set to the init
+value in such a case.
+
+In theory we could drop copy_init_pkru_to_fpregs() because restoring the
+PKRU at return-to-userland should be enough.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/cpu/common.c | 5 +++++
+ arch/x86/mm/pkeys.c | 6 ++++++
+ 2 files changed, 11 insertions(+)
+
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -372,6 +372,8 @@ static bool pku_disabled;
+
+ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
+ {
++ struct pkru_state *pk;
++
+ /* check the boot processor, plus compile options for PKU: */
+ if (!cpu_feature_enabled(X86_FEATURE_PKU))
+ return;
+@@ -382,6 +384,9 @@ static __always_inline void setup_pku(st
+ return;
+
+ cr4_set_bits(X86_CR4_PKE);
++ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
++ if (pk)
++ pk->pkru = init_pkru_value;
+ /*
+ * Seting X86_CR4_PKE will cause the X86_FEATURE_OSPKE
+ * cpuid bit to be set. We need to ensure that we
+--- a/arch/x86/mm/pkeys.c
++++ b/arch/x86/mm/pkeys.c
+@@ -18,6 +18,7 @@
+
+ #include <asm/cpufeature.h> /* boot_cpu_has, ... */
+ #include <asm/mmu_context.h> /* vma_pkey() */
++#include <asm/fpu/internal.h> /* init_fpstate */
+
+ int __execute_only_pkey(struct mm_struct *mm)
+ {
+@@ -161,6 +162,7 @@ static ssize_t init_pkru_read_file(struc
+ static ssize_t init_pkru_write_file(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+ {
++ struct pkru_state *pk;
+ char buf[32];
+ ssize_t len;
+ u32 new_init_pkru;
+@@ -183,6 +185,10 @@ static ssize_t init_pkru_write_file(stru
+ return -EINVAL;
+
+ WRITE_ONCE(init_pkru_value, new_init_pkru);
++ pk = get_xsave_addr(&init_fpstate.xsave, XFEATURE_PKRU);
++ if (!pk)
++ return -EINVAL;
++ pk->pkru = new_init_pkru;
+ return count;
+ }
+
diff --git a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
index 2fca3afb9a1d..884910ded31a 100644
--- a/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
+++ b/patches/NFSv4-replace-seqcount_t-with-a-seqlock_t.patch
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
-@@ -2888,7 +2888,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2892,7 +2892,7 @@ static int _nfs4_open_and_get_state(stru
unsigned int seq;
int ret;
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
-@@ -2929,7 +2929,7 @@ static int _nfs4_open_and_get_state(stru
+@@ -2933,7 +2933,7 @@ static int _nfs4_open_and_get_state(stru
if (d_inode(dentry) == state->inode) {
nfs_inode_attach_open_context(ctx);
diff --git a/patches/add_migrate_disable.patch b/patches/add_migrate_disable.patch
index 108fff1ee93f..d09bdb2f9c70 100644
--- a/patches/add_migrate_disable.patch
+++ b/patches/add_migrate_disable.patch
@@ -79,7 +79,7 @@ Subject: kernel/sched/core: add migrate_disable()
* boot command line:
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1025,7 +1025,15 @@ void set_cpus_allowed_common(struct task
+@@ -1026,7 +1026,15 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -96,7 +96,7 @@ Subject: kernel/sched/core: add migrate_disable()
{
struct rq *rq = task_rq(p);
bool queued, running;
-@@ -1054,6 +1062,20 @@ void do_set_cpus_allowed(struct task_str
+@@ -1055,6 +1063,20 @@ void do_set_cpus_allowed(struct task_str
set_curr_task(rq, p);
}
@@ -117,7 +117,7 @@ Subject: kernel/sched/core: add migrate_disable()
/*
* Change a given task's CPU affinity. Migrate the thread to a
* proper CPU and schedule it away if the CPU it's executing on
-@@ -1112,9 +1134,16 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1113,9 +1135,16 @@ static int __set_cpus_allowed_ptr(struct
}
/* Can the task run on the task's current CPU? If so, we're done */
@@ -135,7 +135,7 @@ Subject: kernel/sched/core: add migrate_disable()
dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
-@@ -7071,3 +7100,100 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7072,3 +7101,100 @@ const u32 sched_prio_to_wmult[40] = {
};
#undef CREATE_TRACE_POINTS
@@ -238,7 +238,7 @@ Subject: kernel/sched/core: add migrate_disable()
+#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -978,6 +978,10 @@ void proc_sched_show_task(struct task_st
+@@ -982,6 +982,10 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
diff --git a/patches/arm-remove-printk_nmi_.patch b/patches/arm-remove-printk_nmi_.patch
index 616948ff1680..3ad859f47efa 100644
--- a/patches/arm-remove-printk_nmi_.patch
+++ b/patches/arm-remove-printk_nmi_.patch
@@ -11,7 +11,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
-@@ -682,11 +682,9 @@ void handle_IPI(int ipinr, struct pt_reg
+@@ -684,11 +684,9 @@ void handle_IPI(int ipinr, struct pt_reg
break;
case IPI_CPU_BACKTRACE:
diff --git a/patches/at91_dont_enable_disable_clock.patch b/patches/at91_dont_enable_disable_clock.patch
index 417e2e792c14..f3e3942a4678 100644
--- a/patches/at91_dont_enable_disable_clock.patch
+++ b/patches/at91_dont_enable_disable_clock.patch
@@ -8,12 +8,12 @@ This can be avoided and causes a needless warning on -RT.
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/clocksource/tcb_clksrc.c | 33 +++++++++++++++++++++++++++++----
+ drivers/clocksource/timer-atmel-tcb.c | 33 +++++++++++++++++++++++++++++----
1 file changed, 29 insertions(+), 4 deletions(-)
---- a/drivers/clocksource/tcb_clksrc.c
-+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -126,6 +126,7 @@ static struct clocksource clksrc = {
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -130,6 +130,7 @@ static u64 notrace tc_sched_clock_read32
struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void __iomem *regs;
};
-@@ -143,6 +144,24 @@ static struct tc_clkevt_device *to_tc_cl
+@@ -147,6 +148,24 @@ static struct tc_clkevt_device *to_tc_cl
*/
static u32 timer_clock;
@@ -46,7 +46,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static int tc_shutdown(struct clock_event_device *d)
{
struct tc_clkevt_device *tcd = to_tc_clkevt(d);
-@@ -150,8 +169,14 @@ static int tc_shutdown(struct clock_even
+@@ -154,8 +173,14 @@ static int tc_shutdown(struct clock_even
writel(0xff, regs + ATMEL_TC_REG(2, IDR));
writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR));
@@ -62,7 +62,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
}
-@@ -164,7 +189,7 @@ static int tc_set_oneshot(struct clock_e
+@@ -168,7 +193,7 @@ static int tc_set_oneshot(struct clock_e
if (clockevent_state_oneshot(d) || clockevent_state_periodic(d))
tc_shutdown(d);
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* slow clock, count up to RC, then irq and stop */
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
-@@ -186,7 +211,7 @@ static int tc_set_periodic(struct clock_
+@@ -190,7 +215,7 @@ static int tc_set_periodic(struct clock_
/* By not making the gentime core emulate periodic mode on top
* of oneshot, we get lower overhead and improved accuracy.
*/
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* slow clock, count up to RC, then irq and restart */
writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
-@@ -220,7 +245,7 @@ static struct tc_clkevt_device clkevt =
+@@ -223,7 +248,7 @@ static struct tc_clkevt_device clkevt =
/* Should be lower than at91rm9200's system timer */
.rating = 125,
.set_next_event = tc_next_event,
diff --git a/patches/block-mq-drop-preempt-disable.patch b/patches/block-mq-drop-preempt-disable.patch
index 56aac504a2af..86b9c04daa77 100644
--- a/patches/block-mq-drop-preempt-disable.patch
+++ b/patches/block-mq-drop-preempt-disable.patch
@@ -31,7 +31,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
static void hctx_unlock(struct blk_mq_hw_ctx *hctx, int srcu_idx)
-@@ -1443,14 +1443,14 @@ static void __blk_mq_delay_run_hw_queue(
+@@ -1452,14 +1452,14 @@ static void __blk_mq_delay_run_hw_queue(
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
diff --git a/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch b/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
index 92feca23459a..31bec069deaf 100644
--- a/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
+++ b/patches/cgroup-use-irqsave-in-cgroup_rstat_flush_locked.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
-@@ -157,8 +157,9 @@ static void cgroup_rstat_flush_locked(st
+@@ -159,8 +159,9 @@ static void cgroup_rstat_flush_locked(st
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
cpu);
struct cgroup *pos = NULL;
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
struct cgroup_subsys_state *css;
-@@ -170,7 +171,7 @@ static void cgroup_rstat_flush_locked(st
+@@ -172,7 +173,7 @@ static void cgroup_rstat_flush_locked(st
css->ss->css_rstat_flush(css, cpu);
rcu_read_unlock();
}
diff --git a/patches/cgroups-use-simple-wait-in-css_release.patch b/patches/cgroups-use-simple-wait-in-css_release.patch
index c8fb742bccec..3f48b3673a73 100644
--- a/patches/cgroups-use-simple-wait-in-css_release.patch
+++ b/patches/cgroups-use-simple-wait-in-css_release.patch
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
-@@ -4694,10 +4694,10 @@ static void css_free_rwork_fn(struct wor
+@@ -4697,10 +4697,10 @@ static void css_free_rwork_fn(struct wor
}
}
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cgroup_subsys *ss = css->ss;
struct cgroup *cgrp = css->cgroup;
-@@ -4757,8 +4757,8 @@ static void css_release(struct percpu_re
+@@ -4760,8 +4760,8 @@ static void css_release(struct percpu_re
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
diff --git a/patches/clocksource-tclib-allow-higher-clockrates.patch b/patches/clocksource-tclib-allow-higher-clockrates.patch
index 8fba8081a08a..7c9a84236e91 100644
--- a/patches/clocksource-tclib-allow-higher-clockrates.patch
+++ b/patches/clocksource-tclib-allow-higher-clockrates.patch
@@ -10,13 +10,29 @@ Add a compile time selection to allow higher clock resulution.
Signed-off-by: Benedikt Spranger <b.spranger@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
- drivers/clocksource/tcb_clksrc.c | 36 +++++++++++++++++++++---------------
- drivers/misc/Kconfig | 12 ++++++++++--
- 2 files changed, 31 insertions(+), 17 deletions(-)
+ drivers/clocksource/Kconfig | 7 ++++++
+ drivers/clocksource/timer-atmel-tcb.c | 36 +++++++++++++++++++---------------
+ 2 files changed, 28 insertions(+), 15 deletions(-)
---- a/drivers/clocksource/tcb_clksrc.c
-+++ b/drivers/clocksource/tcb_clksrc.c
-@@ -25,8 +25,7 @@
+--- a/drivers/clocksource/Kconfig
++++ b/drivers/clocksource/Kconfig
+@@ -419,6 +419,13 @@ config ATMEL_TCB_CLKSRC
+ help
+ Support for Timer Counter Blocks on Atmel SoCs.
+
++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
++ bool "TC Block use 32 KiHz clock"
++ depends on ATMEL_TCB_CLKSRC
++ default y
++ help
++ Select this to use 32 KiHz base clock rate as TC block clock.
++
+ config CLKSRC_EXYNOS_MCT
+ bool "Exynos multi core timer driver" if COMPILE_TEST
+ depends on ARM || ARM64
+--- a/drivers/clocksource/timer-atmel-tcb.c
++++ b/drivers/clocksource/timer-atmel-tcb.c
+@@ -27,8 +27,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
* - The third channel may be used to provide a 16-bit clockevent
@@ -24,9 +40,9 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
- * at 32 KiHZ, and can handle delays of up to two seconds.
+ * source, used in either periodic or oneshot mode.
*
- * A boot clocksource and clockevent source are also currently needed,
- * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
-@@ -127,6 +126,7 @@ struct tc_clkevt_device {
+ * REVISIT behavior during system suspend states... we should disable
+ * all clocks and save the power. Easily done for clockevent devices,
+@@ -131,6 +130,7 @@ struct tc_clkevt_device {
struct clock_event_device clkevt;
struct clk *clk;
bool clk_enabled;
@@ -34,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void __iomem *regs;
};
-@@ -135,13 +135,6 @@ static struct tc_clkevt_device *to_tc_cl
+@@ -139,13 +139,6 @@ static struct tc_clkevt_device *to_tc_cl
return container_of(clkevt, struct tc_clkevt_device, clkevt);
}
@@ -48,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
static u32 timer_clock;
static void tc_clk_disable(struct clock_event_device *d)
-@@ -191,7 +184,7 @@ static int tc_set_oneshot(struct clock_e
+@@ -195,7 +188,7 @@ static int tc_set_oneshot(struct clock_e
tc_clk_enable(d);
@@ -57,7 +73,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE |
ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR));
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -213,10 +206,10 @@ static int tc_set_periodic(struct clock_
+@@ -217,10 +210,10 @@ static int tc_set_periodic(struct clock_
*/
tc_clk_enable(d);
@@ -70,7 +86,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Enable clock and interrupts on RC compare */
writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
-@@ -243,7 +236,11 @@ static struct tc_clkevt_device clkevt =
+@@ -246,7 +239,11 @@ static struct tc_clkevt_device clkevt =
.features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT,
/* Should be lower than at91rm9200's system timer */
@@ -82,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
.set_next_event = tc_next_event,
.set_state_shutdown = tc_shutdown_clk_off,
.set_state_periodic = tc_set_periodic,
-@@ -265,8 +262,9 @@ static irqreturn_t ch2_irq(int irq, void
+@@ -268,8 +265,9 @@ static irqreturn_t ch2_irq(int irq, void
return IRQ_NONE;
}
@@ -93,7 +109,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
int ret;
struct clk *t2_clk = tc->clk[2];
int irq = tc->irq[2];
-@@ -287,7 +285,11 @@ static int __init setup_clkevents(struct
+@@ -290,7 +288,11 @@ static int __init setup_clkevents(struct
clkevt.regs = tc->regs;
clkevt.clk = t2_clk;
@@ -106,7 +122,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clkevt.clkevt.cpumask = cpumask_of(0);
-@@ -298,7 +300,7 @@ static int __init setup_clkevents(struct
+@@ -301,7 +303,7 @@ static int __init setup_clkevents(struct
return ret;
}
@@ -115,43 +131,15 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return ret;
}
-@@ -435,7 +437,11 @@ static int __init tcb_clksrc_init(void)
+@@ -474,7 +476,11 @@ static int __init tcb_clksrc_init(struct
goto err_disable_t1;
/* channel 2: periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
- ret = setup_clkevents(tc, clk32k_divisor_idx);
+ ret = setup_clkevents(&tc, clk32k_divisor_idx);
+#else
+ ret = setup_clkevents(tc, best_divisor_idx);
+#endif
if (ret)
goto err_unregister_clksrc;
---- a/drivers/misc/Kconfig
-+++ b/drivers/misc/Kconfig
-@@ -69,8 +69,7 @@ config ATMEL_TCB_CLKSRC
- are combined to make a single 32-bit timer.
-
- When GENERIC_CLOCKEVENTS is defined, the third timer channel
-- may be used as a clock event device supporting oneshot mode
-- (delays of up to two seconds) based on the 32 KiHz clock.
-+ may be used as a clock event device supporting oneshot mode.
-
- config ATMEL_TCB_CLKSRC_BLOCK
- int
-@@ -83,6 +82,15 @@ config ATMEL_TCB_CLKSRC_BLOCK
- TC can be used for other purposes, such as PWM generation and
- interval timing.
-
-+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
-+ bool "TC Block use 32 KiHz clock"
-+ depends on ATMEL_TCB_CLKSRC
-+ default y
-+ help
-+ Select this to use 32 KiHz base clock rate as TC block clock
-+ source for clock events.
-+
-+
- config DUMMY_IRQ
- tristate "Dummy IRQ handler"
- default n
diff --git a/patches/completion-use-simple-wait-queues.patch b/patches/completion-use-simple-wait-queues.patch
index d14277dd6fe2..1f5814fd581e 100644
--- a/patches/completion-use-simple-wait-queues.patch
+++ b/patches/completion-use-simple-wait-queues.patch
@@ -50,7 +50,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
default:
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
-@@ -1703,7 +1703,7 @@ static void ffs_data_put(struct ffs_data
+@@ -1704,7 +1704,7 @@ static void ffs_data_put(struct ffs_data
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
EXPORT_SYMBOL(completion_done);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7120,7 +7120,10 @@ void migrate_disable(void)
+@@ -7121,7 +7121,10 @@ void migrate_disable(void)
return;
}
#ifdef CONFIG_SCHED_DEBUG
@@ -311,7 +311,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#endif
if (p->migrate_disable) {
-@@ -7150,7 +7153,10 @@ void migrate_enable(void)
+@@ -7151,7 +7154,10 @@ void migrate_enable(void)
}
#ifdef CONFIG_SCHED_DEBUG
diff --git a/patches/cpu-hotplug--Implement-CPU-pinning.patch b/patches/cpu-hotplug--Implement-CPU-pinning.patch
index 110e62239821..984756e93c6e 100644
--- a/patches/cpu-hotplug--Implement-CPU-pinning.patch
+++ b/patches/cpu-hotplug--Implement-CPU-pinning.patch
@@ -75,7 +75,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -857,6 +890,7 @@ static int take_cpu_down(void *_param)
+@@ -882,6 +915,7 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -83,7 +83,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -869,11 +903,14 @@ static int takedown_cpu(unsigned int cpu
+@@ -894,11 +928,14 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -98,7 +98,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -892,6 +929,7 @@ static int takedown_cpu(unsigned int cpu
+@@ -917,6 +954,7 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
index 5e5d5c412473..b4cd2f29d00f 100644
--- a/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
+++ b/patches/drivers-block-zram-Replace-bit-spinlocks-with-rtmute.patch
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline bool init_done(struct zram *zram)
{
-@@ -1173,6 +1208,7 @@ static bool zram_meta_alloc(struct zram
+@@ -1153,6 +1188,7 @@ static bool zram_meta_alloc(struct zram
if (!huge_class_size)
huge_class_size = zs_huge_class_size(zram->mem_pool);
diff --git a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
index 0bb321e9c0e9..62427bbf0caf 100644
--- a/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
+++ b/patches/drivers-zram-Don-t-disable-preemption-in-zcomp_strea.patch
@@ -63,7 +63,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* dynamic per-device compression frontend */
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
-@@ -1271,6 +1271,7 @@ static int __zram_bvec_read(struct zram
+@@ -1251,6 +1251,7 @@ static int __zram_bvec_read(struct zram
unsigned long handle;
unsigned int size;
void *src, *dst;
@@ -71,7 +71,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
zram_slot_lock(zram, index);
if (zram_test_flag(zram, index, ZRAM_WB)) {
-@@ -1301,6 +1302,7 @@ static int __zram_bvec_read(struct zram
+@@ -1281,6 +1282,7 @@ static int __zram_bvec_read(struct zram
size = zram_get_obj_size(zram, index);
@@ -79,7 +79,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
src = zs_map_object(zram->mem_pool, handle, ZS_MM_RO);
if (size == PAGE_SIZE) {
dst = kmap_atomic(page);
-@@ -1308,14 +1310,13 @@ static int __zram_bvec_read(struct zram
+@@ -1288,14 +1290,13 @@ static int __zram_bvec_read(struct zram
kunmap_atomic(dst);
ret = 0;
} else {
diff --git a/patches/drm-i915-Don-t-disable-interrupts-independently-of-t.patch b/patches/drm-i915-Don-t-disable-interrupts-independently-of-t.patch
new file mode 100644
index 000000000000..86c8b1a7ddd7
--- /dev/null
+++ b/patches/drm-i915-Don-t-disable-interrupts-independently-of-t.patch
@@ -0,0 +1,43 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 10 Apr 2019 11:01:37 +0200
+Subject: [PATCH] drm/i915: Don't disable interrupts independently of the
+ lock
+
+The locks (timeline->lock and rq->lock) need to be taken with disabled
+interrupts. This is done in __retire_engine_request() by disabling the
+interrupts independently of the locks itself.
+While local_irq_disable()+spin_lock() equals spin_lock_irq() on vanilla
+it does not on RT. Also, it is not obvious if there is a special reason
+to why the interrupts are disabled independently of the lock.
+
+Enable/disable interrupts as part of the locking instruction.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/gpu/drm/i915/i915_request.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -278,9 +278,7 @@ static void __retire_engine_request(stru
+
+ GEM_BUG_ON(!i915_request_completed(rq));
+
+- local_irq_disable();
+-
+- spin_lock(&engine->timeline.lock);
++ spin_lock_irq(&engine->timeline.lock);
+ GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests));
+ list_del_init(&rq->link);
+ spin_unlock(&engine->timeline.lock);
+@@ -294,9 +292,7 @@ static void __retire_engine_request(stru
+ GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters));
+ atomic_dec(&rq->i915->gt_pm.rps.num_waiters);
+ }
+- spin_unlock(&rq->lock);
+-
+- local_irq_enable();
++ spin_unlock_irq(&rq->lock);
+
+ /*
+ * The backing object for the context is done after switching to the
diff --git a/patches/fs-aio-simple-simple-work.patch b/patches/fs-aio-simple-simple-work.patch
index 3bb801052332..fd88a910bf0f 100644
--- a/patches/fs-aio-simple-simple-work.patch
+++ b/patches/fs-aio-simple-simple-work.patch
@@ -37,7 +37,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* signals when all in-flight requests are done
-@@ -602,9 +603,9 @@ static void free_ioctx_reqs(struct percp
+@@ -613,9 +614,9 @@ static void free_ioctx_reqs(struct percp
* and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
* now it's safe to cancel any that need to be.
*/
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct aio_kiocb *req;
spin_lock_irq(&ctx->ctx_lock);
-@@ -622,6 +623,14 @@ static void free_ioctx_users(struct perc
+@@ -633,6 +634,14 @@ static void free_ioctx_users(struct perc
percpu_ref_put(&ctx->reqs);
}
diff --git a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
index 692fbcb413a9..d62fd7769841 100644
--- a/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
+++ b/patches/fs-dcache-disable-preemption-on-i_dir_seq-s-write-si.patch
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
-@@ -688,7 +688,7 @@ struct inode {
+@@ -694,7 +694,7 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
char *i_link;
diff --git a/patches/fs-replace-bh_uptodate_lock-for-rt.patch b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
index 06c587f5b94b..df1ea262fe09 100644
--- a/patches/fs-replace-bh_uptodate_lock-for-rt.patch
+++ b/patches/fs-replace-bh_uptodate_lock-for-rt.patch
@@ -74,7 +74,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
EXPORT_SYMBOL(end_buffer_async_write);
-@@ -3361,6 +3353,7 @@ struct buffer_head *alloc_buffer_head(gf
+@@ -3368,6 +3360,7 @@ struct buffer_head *alloc_buffer_head(gf
struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
if (ret) {
INIT_LIST_HEAD(&ret->b_assoc_buffers);
diff --git a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
index ed1bfc8fa527..afb2ded6b3d9 100644
--- a/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
+++ b/patches/hotplug-duct-tape-RT-rwlock-usage-for-non-RT.patch
@@ -54,7 +54,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
-@@ -890,7 +894,9 @@ static int take_cpu_down(void *_param)
+@@ -915,7 +919,9 @@ static int take_cpu_down(void *_param)
static int takedown_cpu(unsigned int cpu)
{
@@ -64,7 +64,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int err;
-@@ -903,14 +909,18 @@ static int takedown_cpu(unsigned int cpu
+@@ -928,14 +934,18 @@ static int takedown_cpu(unsigned int cpu
*/
irq_lock_sparse();
@@ -83,7 +83,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* CPU refused to die */
irq_unlock_sparse();
/* Unpark the hotplug thread so we can rollback there */
-@@ -929,7 +939,9 @@ static int takedown_cpu(unsigned int cpu
+@@ -954,7 +964,9 @@ static int takedown_cpu(unsigned int cpu
wait_for_ap_thread(st, false);
BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
diff --git a/patches/hotplug-light-get-online-cpus.patch b/patches/hotplug-light-get-online-cpus.patch
index daffc72e18b0..e51ea6a7d519 100644
--- a/patches/hotplug-light-get-online-cpus.patch
+++ b/patches/hotplug-light-get-online-cpus.patch
@@ -64,7 +64,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void cpus_read_lock(void)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7215,6 +7215,7 @@ void migrate_disable(void)
+@@ -7216,6 +7216,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -72,7 +72,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
migrate_disable_update_cpus_allowed(p);
p->migrate_disable = 1;
-@@ -7280,12 +7281,15 @@ void migrate_enable(void)
+@@ -7281,12 +7282,15 @@ void migrate_enable(void)
arg.task = p;
arg.dest_cpu = dest_cpu;
diff --git a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
index 4c7f5ec4e865..a96787eea90e 100644
--- a/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
+++ b/patches/hrtimer-by-timers-by-default-into-the-softirq-context.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -314,7 +314,7 @@ static void hrtick_rq_init(struct rq *rq
+@@ -315,7 +315,7 @@ static void hrtick_rq_init(struct rq *rq
rq->hrtick_csd.info = rq;
#endif
diff --git a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
index 602c57e7a051..b8c1fb76840d 100644
--- a/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
+++ b/patches/hrtimer-consolidate-hrtimer_init-hrtimer_init_sleepe.patch
@@ -26,7 +26,7 @@ Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
-@@ -3364,10 +3364,9 @@ static bool blk_mq_poll_hybrid_sleep(str
+@@ -3373,10 +3373,9 @@ static bool blk_mq_poll_hybrid_sleep(str
kt = nsecs;
mode = HRTIMER_MODE_REL;
diff --git a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
index 3c1bba66bf70..dd1e7013dedd 100644
--- a/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
+++ b/patches/kernel-sched-Provide-a-pointer-to-the-valid-CPU-mask.patch
@@ -304,7 +304,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* One for us, one for whoever does the "release_task()" (usually
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -895,7 +895,7 @@ static inline bool is_per_cpu_kthread(st
+@@ -896,7 +896,7 @@ static inline bool is_per_cpu_kthread(st
*/
static inline bool is_cpu_allowed(struct task_struct *p, int cpu)
{
@@ -313,7 +313,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return false;
if (is_per_cpu_kthread(p))
-@@ -990,7 +990,7 @@ static int migration_cpu_stop(void *data
+@@ -991,7 +991,7 @@ static int migration_cpu_stop(void *data
local_irq_disable();
/*
* We need to explicitly wake pending tasks before running
@@ -322,7 +322,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* during wakeups, see set_cpus_allowed_ptr()'s TASK_WAKING test.
*/
sched_ttwu_pending();
-@@ -1021,7 +1021,7 @@ static int migration_cpu_stop(void *data
+@@ -1022,7 +1022,7 @@ static int migration_cpu_stop(void *data
*/
void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -331,7 +331,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
-@@ -1091,7 +1091,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1092,7 +1092,7 @@ static int __set_cpus_allowed_ptr(struct
goto out;
}
@@ -340,7 +340,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
-@@ -1254,10 +1254,10 @@ static int migrate_swap_stop(void *data)
+@@ -1255,10 +1255,10 @@ static int migrate_swap_stop(void *data)
if (task_cpu(arg->src_task) != arg->src_cpu)
goto unlock;
@@ -353,7 +353,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto unlock;
__migrate_swap_task(arg->src_task, arg->dst_cpu);
-@@ -1299,10 +1299,10 @@ int migrate_swap(struct task_struct *cur
+@@ -1300,10 +1300,10 @@ int migrate_swap(struct task_struct *cur
if (!cpu_active(arg.src_cpu) || !cpu_active(arg.dst_cpu))
goto out;
@@ -366,7 +366,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
goto out;
trace_sched_swap_numa(cur, arg.src_cpu, p, arg.dst_cpu);
-@@ -1447,7 +1447,7 @@ void kick_process(struct task_struct *p)
+@@ -1448,7 +1448,7 @@ void kick_process(struct task_struct *p)
EXPORT_SYMBOL_GPL(kick_process);
/*
@@ -375,7 +375,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* A few notes on cpu_active vs cpu_online:
*
-@@ -1487,14 +1487,14 @@ static int select_fallback_rq(int cpu, s
+@@ -1488,14 +1488,14 @@ static int select_fallback_rq(int cpu, s
for_each_cpu(dest_cpu, nodemask) {
if (!cpu_active(dest_cpu))
continue;
@@ -392,7 +392,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!is_cpu_allowed(p, dest_cpu))
continue;
-@@ -1538,7 +1538,7 @@ static int select_fallback_rq(int cpu, s
+@@ -1539,7 +1539,7 @@ static int select_fallback_rq(int cpu, s
}
/*
@@ -401,7 +401,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
static inline
int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
-@@ -1548,11 +1548,11 @@ int select_task_rq(struct task_struct *p
+@@ -1549,11 +1549,11 @@ int select_task_rq(struct task_struct *p
if (p->nr_cpus_allowed > 1)
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
else
@@ -415,7 +415,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* CPU.
*
* Since this is common to all placement strategies, this lives here.
-@@ -2420,7 +2420,7 @@ void wake_up_new_task(struct task_struct
+@@ -2421,7 +2421,7 @@ void wake_up_new_task(struct task_struct
#ifdef CONFIG_SMP
/*
* Fork balancing, do it here and not earlier because:
@@ -424,7 +424,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* - any previously selected CPU might disappear through hotplug
*
* Use __set_task_cpu() to avoid calling sched_class::migrate_task_rq,
-@@ -4286,7 +4286,7 @@ static int __sched_setscheduler(struct t
+@@ -4287,7 +4287,7 @@ static int __sched_setscheduler(struct t
* the entire root_domain to become SCHED_DEADLINE. We
* will also fail if there's no bandwidth available.
*/
@@ -433,7 +433,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
rq->rd->dl_bw.bw == 0) {
task_rq_unlock(rq, p, &rf);
return -EPERM;
-@@ -4885,7 +4885,7 @@ long sched_getaffinity(pid_t pid, struct
+@@ -4886,7 +4886,7 @@ long sched_getaffinity(pid_t pid, struct
goto out_unlock;
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -442,7 +442,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
out_unlock:
-@@ -5463,7 +5463,7 @@ int task_can_attach(struct task_struct *
+@@ -5464,7 +5464,7 @@ int task_can_attach(struct task_struct *
* allowed nodes is unnecessary. Thus, cpusets are not
* applicable for such threads. This prevents checking for
* success of set_cpus_allowed_ptr() on all attached tasks
@@ -451,7 +451,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*/
if (p->flags & PF_NO_SETAFFINITY) {
ret = -EINVAL;
-@@ -5490,7 +5490,7 @@ int migrate_task_to(struct task_struct *
+@@ -5491,7 +5491,7 @@ int migrate_task_to(struct task_struct *
if (curr_cpu == target_cpu)
return 0;
@@ -460,7 +460,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return -EINVAL;
/* TODO: This is not properly updating schedstats */
-@@ -5628,7 +5628,7 @@ static void migrate_tasks(struct rq *dea
+@@ -5629,7 +5629,7 @@ static void migrate_tasks(struct rq *dea
put_prev_task(rq, next);
/*
diff --git a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
index 9679860df17a..1484ac37cb86 100644
--- a/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
+++ b/patches/kernel-sched-move-stack-kprobe-clean-up-to-__put_tas.patch
@@ -43,7 +43,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
security_task_free(tsk);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2759,15 +2759,6 @@ static struct rq *finish_task_switch(str
+@@ -2760,15 +2760,6 @@ static struct rq *finish_task_switch(str
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
diff --git a/patches/localversion.patch b/patches/localversion.patch
index 03a80b8b0e80..72cdd2b3c760 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt4
++-rt5
diff --git a/patches/lockdep-no-softirq-accounting-on-rt.patch b/patches/lockdep-no-softirq-accounting-on-rt.patch
index a54d229161e7..5cc116f7fe18 100644
--- a/patches/lockdep-no-softirq-accounting-on-rt.patch
+++ b/patches/lockdep-no-softirq-accounting-on-rt.patch
@@ -51,7 +51,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
defined(CONFIG_PREEMPT_TRACER)
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
-@@ -3764,6 +3764,7 @@ static void check_flags(unsigned long fl
+@@ -3767,6 +3767,7 @@ static void check_flags(unsigned long fl
}
}
@@ -59,7 +59,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only
-@@ -3778,6 +3779,7 @@ static void check_flags(unsigned long fl
+@@ -3781,6 +3782,7 @@ static void check_flags(unsigned long fl
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
}
}
diff --git a/patches/mm-enable-slub.patch b/patches/mm-enable-slub.patch
index deb86fda7606..0d58ad12e092 100644
--- a/patches/mm-enable-slub.patch
+++ b/patches/mm-enable-slub.patch
@@ -13,7 +13,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/slub.c
+++ b/mm/slub.c
-@@ -3729,6 +3729,11 @@ static void list_slab_objects(struct kme
+@@ -3732,6 +3732,11 @@ static void list_slab_objects(struct kme
const char *text)
{
#ifdef CONFIG_SLUB_DEBUG
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void *addr = page_address(page);
void *p;
unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC);
-@@ -3748,8 +3753,10 @@ static void list_slab_objects(struct kme
+@@ -3751,8 +3756,10 @@ static void list_slab_objects(struct kme
slab_unlock(page);
bitmap_free(map);
#endif
diff --git a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
index 243866a98677..30e61badc077 100644
--- a/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
+++ b/patches/mm-memcontrol-Don-t-call-schedule_work_on-in-preempt.patch
@@ -48,7 +48,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
-@@ -2061,7 +2061,7 @@ static void drain_all_stock(struct mem_c
+@@ -2072,7 +2072,7 @@ static void drain_all_stock(struct mem_c
* as well as workers from this path always operate on the local
* per-cpu data. CPU up doesn't touch memcg_stock at all.
*/
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
for_each_online_cpu(cpu) {
struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu);
struct mem_cgroup *memcg;
-@@ -2081,7 +2081,7 @@ static void drain_all_stock(struct mem_c
+@@ -2092,7 +2092,7 @@ static void drain_all_stock(struct mem_c
}
css_put(&memcg->css);
}
diff --git a/patches/mm-memcontrol-do_not_disable_irq.patch b/patches/mm-memcontrol-do_not_disable_irq.patch
index 93567eb71ff7..6ebe432afd7b 100644
--- a/patches/mm-memcontrol-do_not_disable_irq.patch
+++ b/patches/mm-memcontrol-do_not_disable_irq.patch
@@ -29,7 +29,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* Whether legacy memory+swap accounting is active */
static bool do_memsw_account(void)
{
-@@ -4844,12 +4847,12 @@ static int mem_cgroup_move_account(struc
+@@ -4853,12 +4856,12 @@ static int mem_cgroup_move_account(struc
ret = 0;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
out_unlock:
unlock_page(page);
out:
-@@ -5968,10 +5971,10 @@ void mem_cgroup_commit_charge(struct pag
+@@ -5977,10 +5980,10 @@ void mem_cgroup_commit_charge(struct pag
commit_charge(page, memcg, lrucare);
@@ -57,7 +57,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (do_memsw_account() && PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) };
-@@ -6040,7 +6043,7 @@ static void uncharge_batch(const struct
+@@ -6049,7 +6052,7 @@ static void uncharge_batch(const struct
memcg_oom_recover(ug->memcg);
}
@@ -66,7 +66,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
__mod_memcg_state(ug->memcg, MEMCG_RSS, -ug->nr_anon);
__mod_memcg_state(ug->memcg, MEMCG_CACHE, -ug->nr_file);
__mod_memcg_state(ug->memcg, MEMCG_RSS_HUGE, -ug->nr_huge);
-@@ -6048,7 +6051,7 @@ static void uncharge_batch(const struct
+@@ -6057,7 +6060,7 @@ static void uncharge_batch(const struct
__count_memcg_events(ug->memcg, PGPGOUT, ug->pgpgout);
__this_cpu_add(ug->memcg->stat_cpu->nr_page_events, nr_pages);
memcg_check_events(ug->memcg, ug->dummy_page);
@@ -75,7 +75,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!mem_cgroup_is_root(ug->memcg))
css_put_many(&ug->memcg->css, nr_pages);
-@@ -6211,10 +6214,10 @@ void mem_cgroup_migrate(struct page *old
+@@ -6220,10 +6223,10 @@ void mem_cgroup_migrate(struct page *old
commit_charge(newpage, memcg, false);
@@ -88,7 +88,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
DEFINE_STATIC_KEY_FALSE(memcg_sockets_enabled_key);
-@@ -6406,6 +6409,7 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6415,6 +6418,7 @@ void mem_cgroup_swapout(struct page *pag
struct mem_cgroup *memcg, *swap_memcg;
unsigned int nr_entries;
unsigned short oldid;
@@ -96,7 +96,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
VM_BUG_ON_PAGE(PageLRU(page), page);
VM_BUG_ON_PAGE(page_count(page), page);
-@@ -6451,13 +6455,17 @@ void mem_cgroup_swapout(struct page *pag
+@@ -6460,13 +6464,17 @@ void mem_cgroup_swapout(struct page *pag
* important here to have the interrupts disabled because it is the
* only synchronisation we have for updating the per-CPU variables.
*/
diff --git a/patches/mm-vmalloc-use-get-cpu-light.patch b/patches/mm-vmalloc-use-get-cpu-light.patch
index d874456ca78a..ff5a910c7d43 100644
--- a/patches/mm-vmalloc-use-get-cpu-light.patch
+++ b/patches/mm-vmalloc-use-get-cpu-light.patch
@@ -12,7 +12,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
-@@ -848,7 +848,7 @@ static void *new_vmap_block(unsigned int
+@@ -852,7 +852,7 @@ static void *new_vmap_block(unsigned int
struct vmap_block *vb;
struct vmap_area *va;
unsigned long vb_idx;
@@ -21,7 +21,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void *vaddr;
node = numa_node_id();
-@@ -891,11 +891,12 @@ static void *new_vmap_block(unsigned int
+@@ -895,11 +895,12 @@ static void *new_vmap_block(unsigned int
BUG_ON(err);
radix_tree_preload_end();
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
return vaddr;
}
-@@ -964,6 +965,7 @@ static void *vb_alloc(unsigned long size
+@@ -968,6 +969,7 @@ static void *vb_alloc(unsigned long size
struct vmap_block *vb;
void *vaddr = NULL;
unsigned int order;
@@ -44,7 +44,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
BUG_ON(offset_in_page(size));
BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC);
-@@ -978,7 +980,8 @@ static void *vb_alloc(unsigned long size
+@@ -982,7 +984,8 @@ static void *vb_alloc(unsigned long size
order = get_order(size);
rcu_read_lock();
@@ -54,7 +54,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_rcu(vb, &vbq->free, free_list) {
unsigned long pages_off;
-@@ -1001,7 +1004,7 @@ static void *vb_alloc(unsigned long size
+@@ -1005,7 +1008,7 @@ static void *vb_alloc(unsigned long size
break;
}
diff --git a/patches/peterz-percpu-rwsem-rt.patch b/patches/peterz-percpu-rwsem-rt.patch
index c2955de28679..e1bacad793c6 100644
--- a/patches/peterz-percpu-rwsem-rt.patch
+++ b/patches/peterz-percpu-rwsem-rt.patch
@@ -45,7 +45,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
/*
* New lock request. Walk all POSIX locks and look for conflicts. If
-@@ -1312,7 +1312,7 @@ static int posix_lock_inode(struct inode
+@@ -1317,7 +1317,7 @@ static int posix_lock_inode(struct inode
}
out:
spin_unlock(&ctx->flc_lock);
@@ -54,7 +54,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
/*
* Free any unused locks.
*/
-@@ -1584,7 +1584,7 @@ int __break_lease(struct inode *inode, u
+@@ -1589,7 +1589,7 @@ int __break_lease(struct inode *inode, u
return error;
}
@@ -63,7 +63,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
-@@ -1636,13 +1636,13 @@ int __break_lease(struct inode *inode, u
+@@ -1641,13 +1641,13 @@ int __break_lease(struct inode *inode, u
locks_insert_block(fl, new_fl, leases_conflict);
trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock);
@@ -79,7 +79,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl);
-@@ -1659,7 +1659,7 @@ int __break_lease(struct inode *inode, u
+@@ -1664,7 +1664,7 @@ int __break_lease(struct inode *inode, u
}
out:
spin_unlock(&ctx->flc_lock);
@@ -88,7 +88,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
locks_free_lock(new_fl);
return error;
-@@ -1729,7 +1729,7 @@ int fcntl_getlease(struct file *filp)
+@@ -1734,7 +1734,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) {
@@ -97,7 +97,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
-@@ -1739,7 +1739,7 @@ int fcntl_getlease(struct file *filp)
+@@ -1744,7 +1744,7 @@ int fcntl_getlease(struct file *filp)
break;
}
spin_unlock(&ctx->flc_lock);
@@ -106,7 +106,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
}
-@@ -1813,7 +1813,7 @@ generic_add_lease(struct file *filp, lon
+@@ -1818,7 +1818,7 @@ generic_add_lease(struct file *filp, lon
return -EINVAL;
}
@@ -115,7 +115,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags);
-@@ -1884,7 +1884,7 @@ generic_add_lease(struct file *filp, lon
+@@ -1889,7 +1889,7 @@ generic_add_lease(struct file *filp, lon
lease->fl_lmops->lm_setup(lease, priv);
out:
spin_unlock(&ctx->flc_lock);
@@ -124,7 +124,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
if (is_deleg)
inode_unlock(inode);
-@@ -1907,7 +1907,7 @@ static int generic_delete_lease(struct f
+@@ -1912,7 +1912,7 @@ static int generic_delete_lease(struct f
return error;
}
@@ -133,7 +133,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp &&
-@@ -1920,7 +1920,7 @@ static int generic_delete_lease(struct f
+@@ -1925,7 +1925,7 @@ static int generic_delete_lease(struct f
if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock);
@@ -142,7 +142,7 @@ Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
locks_dispose_list(&dispose);
return error;
}
-@@ -2643,13 +2643,13 @@ locks_remove_lease(struct file *filp, st
+@@ -2648,13 +2648,13 @@ locks_remove_lease(struct file *filp, st
if (list_empty(&ctx->flc_lease))
return;
diff --git a/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch b/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
index 28223738e843..31d8be5e4f23 100644
--- a/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
+++ b/patches/powerpc-pseries-iommu-Use-a-locallock-instead-local_ir.patch
@@ -12,11 +12,9 @@ Use local_irq_save() instead of local_irq_disable().
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------
+ arch/powerpc/platforms/pseries/iommu.c | 16 ++++++++++------
1 file changed, 10 insertions(+), 6 deletions(-)
-diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
-index 8fc8fe0b98485..14ecedbd8ff12 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -38,6 +38,7 @@
@@ -27,7 +25,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/rtas.h>
-@@ -191,6 +192,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
+@@ -191,6 +192,7 @@ static int tce_build_pSeriesLP(struct io
}
static DEFINE_PER_CPU(__be64 *, tce_page);
@@ -35,7 +33,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
long npages, unsigned long uaddr,
-@@ -211,7 +213,8 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+@@ -211,7 +213,8 @@ static int tce_buildmulti_pSeriesLP(stru
direction, attrs);
}
@@ -45,7 +43,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
tcep = __this_cpu_read(tce_page);
-@@ -222,7 +225,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+@@ -222,7 +225,7 @@ static int tce_buildmulti_pSeriesLP(stru
tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
/* If allocation fails, fall back to the loop implementation */
if (!tcep) {
@@ -54,7 +52,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
direction, attrs);
}
-@@ -256,7 +259,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
+@@ -256,7 +259,7 @@ static int tce_buildmulti_pSeriesLP(stru
tcenum += limit;
} while (npages > 0 && !rc);
@@ -63,7 +61,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
if (unlikely(rc == H_NOT_ENOUGH_RESOURCES)) {
ret = (int)rc;
-@@ -414,13 +417,14 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+@@ -414,13 +417,14 @@ static int tce_setrange_multi_pSeriesLP(
u64 rc = 0;
long l, limit;
@@ -80,7 +78,7 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
return -ENOMEM;
}
__this_cpu_write(tce_page, tcep);
-@@ -466,7 +470,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
+@@ -466,7 +470,7 @@ static int tce_setrange_multi_pSeriesLP(
/* error cleanup: caller will clear whole range */
@@ -89,6 +87,3 @@ index 8fc8fe0b98485..14ecedbd8ff12 100644
return rc;
}
---
-2.20.1
-
diff --git a/patches/powerpc-stackprotector-work-around-stack-guard-init-.patch b/patches/powerpc-stackprotector-work-around-stack-guard-init-.patch
index 3464b5daba51..d7e03b2118b1 100644
--- a/patches/powerpc-stackprotector-work-around-stack-guard-init-.patch
+++ b/patches/powerpc-stackprotector-work-around-stack-guard-init-.patch
@@ -10,14 +10,12 @@ as the initial value.
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- arch/powerpc/include/asm/stackprotector.h | 4 ++++
+ arch/powerpc/include/asm/stackprotector.h | 4 ++++
1 file changed, 4 insertions(+)
-diff --git a/arch/powerpc/include/asm/stackprotector.h b/arch/powerpc/include/asm/stackprotector.h
-index 1c8460e235838..e764eb4b6c284 100644
--- a/arch/powerpc/include/asm/stackprotector.h
+++ b/arch/powerpc/include/asm/stackprotector.h
-@@ -24,7 +24,11 @@ static __always_inline void boot_init_stack_canary(void)
+@@ -24,7 +24,11 @@ static __always_inline void boot_init_st
unsigned long canary;
/* Try to get a semi random initial value. */
@@ -29,6 +27,3 @@ index 1c8460e235838..e764eb4b6c284 100644
canary ^= mftb();
canary ^= LINUX_VERSION_CODE;
canary &= CANARY_MASK;
---
-2.20.1
-
diff --git a/patches/preempt-lazy-support.patch b/patches/preempt-lazy-support.patch
index 58673d82253e..ce3a94694851 100644
--- a/patches/preempt-lazy-support.patch
+++ b/patches/preempt-lazy-support.patch
@@ -250,7 +250,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
goto again;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -504,6 +504,48 @@ void resched_curr(struct rq *rq)
+@@ -505,6 +505,48 @@ void resched_curr(struct rq *rq)
trace_sched_wake_idle_without_ipi(cpu);
}
@@ -299,7 +299,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
void resched_cpu(int cpu)
{
struct rq *rq = cpu_rq(cpu);
-@@ -2423,6 +2465,9 @@ int sched_fork(unsigned long clone_flags
+@@ -2424,6 +2466,9 @@ int sched_fork(unsigned long clone_flags
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -309,7 +309,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -3483,6 +3528,7 @@ static void __sched notrace __schedule(b
+@@ -3484,6 +3529,7 @@ static void __sched notrace __schedule(b
next = pick_next_task(rq, prev, &rf);
clear_tsk_need_resched(prev);
@@ -317,7 +317,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
clear_preempt_need_resched();
if (likely(prev != next)) {
-@@ -3663,6 +3709,30 @@ static void __sched notrace preempt_sche
+@@ -3664,6 +3710,30 @@ static void __sched notrace preempt_sche
} while (need_resched());
}
@@ -348,7 +348,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_PREEMPT
/*
* this is the entry point to schedule() from in-kernel preemption
-@@ -3677,7 +3747,8 @@ asmlinkage __visible void __sched notrac
+@@ -3678,7 +3748,8 @@ asmlinkage __visible void __sched notrac
*/
if (likely(!preemptible()))
return;
@@ -358,7 +358,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_schedule_common();
}
NOKPROBE_SYMBOL(preempt_schedule);
-@@ -3704,6 +3775,9 @@ asmlinkage __visible void __sched notrac
+@@ -3705,6 +3776,9 @@ asmlinkage __visible void __sched notrac
if (likely(!preemptible()))
return;
@@ -368,7 +368,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
do {
/*
* Because the function tracer can trace preempt_count_sub()
-@@ -5470,7 +5544,9 @@ void init_idle(struct task_struct *idle,
+@@ -5471,7 +5545,9 @@ void init_idle(struct task_struct *idle,
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -379,7 +379,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -7194,6 +7270,7 @@ void migrate_disable(void)
+@@ -7195,6 +7271,7 @@ void migrate_disable(void)
}
preempt_disable();
@@ -387,7 +387,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
pin_current_cpu();
migrate_disable_update_cpus_allowed(p);
-@@ -7261,6 +7338,7 @@ void migrate_enable(void)
+@@ -7262,6 +7339,7 @@ void migrate_enable(void)
arg.dest_cpu = dest_cpu;
unpin_current_cpu();
@@ -395,7 +395,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
preempt_enable();
stop_one_cpu(task_cpu(p), migration_cpu_stop, &arg);
tlb_migrate_finish(p->mm);
-@@ -7269,6 +7347,7 @@ void migrate_enable(void)
+@@ -7270,6 +7348,7 @@ void migrate_enable(void)
}
}
unpin_current_cpu();
diff --git a/patches/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch b/patches/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
index 23d2e17c084a..9f34149364b1 100644
--- a/patches/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
+++ b/patches/printk-devkmsg-llseek-reset-clear-if-it-is-lost.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -713,6 +713,7 @@ static loff_t devkmsg_llseek(struct file
+@@ -725,6 +725,7 @@ static loff_t devkmsg_llseek(struct file
{
struct devkmsg_user *user = file->private_data;
loff_t ret;
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (!user)
return -EBADF;
-@@ -735,7 +736,7 @@ static loff_t devkmsg_llseek(struct file
+@@ -747,7 +748,7 @@ static loff_t devkmsg_llseek(struct file
* changes no global state, and does not clear anything.
*/
for (;;) {
@@ -32,7 +32,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
ret = prb_iter_seek(&user->iter, clear_seq);
if (ret > 0) {
/* seeked to clear seq */
-@@ -752,6 +753,10 @@ static loff_t devkmsg_llseek(struct file
+@@ -764,6 +765,10 @@ static loff_t devkmsg_llseek(struct file
break;
}
/* iterator invalid, start over */
diff --git a/patches/printk-only-allow-kernel-to-emergency-message.patch b/patches/printk-only-allow-kernel-to-emergency-message.patch
index e3e653245fe6..c5f626649b81 100644
--- a/patches/printk-only-allow-kernel-to-emergency-message.patch
+++ b/patches/printk-only-allow-kernel-to-emergency-message.patch
@@ -15,7 +15,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1688,7 +1688,8 @@ static void printk_write_history(struct
+@@ -1700,7 +1700,8 @@ static void printk_write_history(struct
* The console_lock must be held.
*/
static void call_console_drivers(u64 seq, const char *ext_text, size_t ext_len,
@@ -25,7 +25,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
struct console *con;
-@@ -1708,13 +1709,14 @@ static void call_console_drivers(u64 seq
+@@ -1720,13 +1721,14 @@ static void call_console_drivers(u64 seq
con->wrote_history = 1;
con->printk_seq = seq - 1;
}
@@ -42,7 +42,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/* skip emergency messages, already printed */
if (con->printk_seq < seq)
con->printk_seq = seq;
-@@ -1882,7 +1884,11 @@ asmlinkage int vprintk_emit(int facility
+@@ -1894,7 +1896,11 @@ asmlinkage int vprintk_emit(int facility
* - text points to beginning of text
* - there is room before text for prefix
*/
@@ -55,7 +55,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
cont_add(ctx, cpu, facility, level, lflags, text, text_len);
-@@ -2645,8 +2651,8 @@ static int printk_kthread_func(void *dat
+@@ -2657,8 +2663,8 @@ static int printk_kthread_func(void *dat
&len, printk_time);
console_lock();
diff --git a/patches/printk-set-deferred-to-default-loglevel-enforce-mask.patch b/patches/printk-set-deferred-to-default-loglevel-enforce-mask.patch
index 41ee61ead7dc..622655d6fb78 100644
--- a/patches/printk-set-deferred-to-default-loglevel-enforce-mask.patch
+++ b/patches/printk-set-deferred-to-default-loglevel-enforce-mask.patch
@@ -18,7 +18,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -1882,7 +1882,7 @@ asmlinkage int vprintk_emit(int facility
+@@ -1894,7 +1894,7 @@ asmlinkage int vprintk_emit(int facility
* - text points to beginning of text
* - there is room before text for prefix
*/
@@ -27,7 +27,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if ((lflags & LOG_CONT) || !(lflags & LOG_NEWLINE)) {
cont_add(ctx, cpu, facility, level, lflags, text, text_len);
-@@ -2674,7 +2674,7 @@ late_initcall(init_printk_kthread);
+@@ -2686,7 +2686,7 @@ late_initcall(init_printk_kthread);
static int vprintk_deferred(const char *fmt, va_list args)
{
diff --git a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
index 9040d7722458..3b319dce8254 100644
--- a/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
+++ b/patches/ptrace-fix-ptrace-vs-tasklist_lock-race.patch
@@ -115,7 +115,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
spin_unlock_irq(&task->sighand->siglock);
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1366,6 +1366,18 @@ int migrate_swap(struct task_struct *cur
+@@ -1367,6 +1367,18 @@ int migrate_swap(struct task_struct *cur
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -134,7 +134,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1410,7 +1422,7 @@ unsigned long wait_task_inactive(struct
+@@ -1411,7 +1423,7 @@ unsigned long wait_task_inactive(struct
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -143,7 +143,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
return 0;
cpu_relax();
}
-@@ -1425,7 +1437,8 @@ unsigned long wait_task_inactive(struct
+@@ -1426,7 +1438,8 @@ unsigned long wait_task_inactive(struct
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
diff --git a/patches/random-make-it-work-on-rt.patch b/patches/random-make-it-work-on-rt.patch
index d068a6106c0c..e9a22669fbf9 100644
--- a/patches/random-make-it-work-on-rt.patch
+++ b/patches/random-make-it-work-on-rt.patch
@@ -93,7 +93,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
-@@ -70,6 +70,7 @@ struct irq_desc {
+@@ -71,6 +71,7 @@ struct irq_desc {
unsigned int irqs_unhandled;
atomic_t threads_handled;
int threads_handled_last;
diff --git a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
index a5931d0a67a1..3ef16a27f13c 100644
--- a/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
+++ b/patches/rcu-Eliminate-softirq-processing-from-rcutree.patch
@@ -49,7 +49,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#include "tree.h"
#include "rcu.h"
-@@ -2716,7 +2722,7 @@ EXPORT_SYMBOL_GPL(rcu_fwd_progress_check
+@@ -2725,7 +2731,7 @@ EXPORT_SYMBOL_GPL(rcu_fwd_progress_check
* structures. This may be called only from the CPU to whom the rdp
* belongs.
*/
@@ -58,7 +58,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
{
unsigned long flags;
struct rcu_data *rdp = raw_cpu_ptr(&rcu_data);
-@@ -2758,6 +2764,8 @@ static __latent_entropy void rcu_process
+@@ -2767,6 +2773,8 @@ static __latent_entropy void rcu_process
trace_rcu_utilization(TPS("End RCU core"));
}
@@ -67,7 +67,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Schedule RCU callback invocation. If the running implementation of RCU
* does not support RCU priority boosting, just do a direct call, otherwise
-@@ -2769,18 +2777,105 @@ static void invoke_rcu_callbacks(struct
+@@ -2778,18 +2786,105 @@ static void invoke_rcu_callbacks(struct
{
if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
return;
@@ -179,7 +179,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Handle any core-RCU processing required by a call_rcu() invocation.
-@@ -3777,7 +3872,6 @@ void __init rcu_init(void)
+@@ -3786,7 +3881,6 @@ void __init rcu_init(void)
rcu_init_one();
if (dump_tree)
rcu_dump_rcu_node_tree();
diff --git a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
index 962fe570d8b6..600d12335e9b 100644
--- a/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
+++ b/patches/rcu-enable-rcu_normal_after_boot-by-default-for-RT.patch
@@ -19,7 +19,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
-@@ -67,7 +67,7 @@ extern int rcu_expedited; /* from sysctl
+@@ -68,7 +68,7 @@ extern int rcu_expedited; /* from sysctl
module_param(rcu_expedited, int, 0);
extern int rcu_normal; /* from sysctl */
module_param(rcu_normal, int, 0);
diff --git a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
index f2537f4c4dec..6deb63893f8e 100644
--- a/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
+++ b/patches/rt-Increase-decrease-the-nr-of-migratory-tasks-when-.patch
@@ -80,7 +80,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7151,6 +7151,47 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7152,6 +7152,47 @@ const u32 sched_prio_to_wmult[40] = {
#if defined(CONFIG_PREEMPT_COUNT) && defined(CONFIG_SMP)
@@ -128,7 +128,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
void migrate_disable(void)
{
struct task_struct *p = current;
-@@ -7174,10 +7215,9 @@ void migrate_disable(void)
+@@ -7175,10 +7216,9 @@ void migrate_disable(void)
}
preempt_disable();
@@ -141,7 +141,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
preempt_enable();
}
-@@ -7209,9 +7249,8 @@ void migrate_enable(void)
+@@ -7210,9 +7250,8 @@ void migrate_enable(void)
preempt_disable();
diff --git a/patches/rtmutex-add-sleeping-lock-implementation.patch b/patches/rtmutex-add-sleeping-lock-implementation.patch
index 2f369d4105dd..be2434aa1dd1 100644
--- a/patches/rtmutex-add-sleeping-lock-implementation.patch
+++ b/patches/rtmutex-add-sleeping-lock-implementation.patch
@@ -1121,7 +1121,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
# include "rtmutex-debug.h"
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -412,9 +412,15 @@ static bool set_nr_if_polling(struct tas
+@@ -413,9 +413,15 @@ static bool set_nr_if_polling(struct tas
* This function must be used as-if it were wake_up_process(); IOW the task
* must be ready to be woken at this location.
*/
@@ -1139,7 +1139,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* Atomically grab the task, if ->wake_q is !nil already it means
-@@ -437,24 +443,32 @@ void wake_q_add(struct wake_q_head *head
+@@ -438,24 +444,32 @@ void wake_q_add(struct wake_q_head *head
head->lastp = &node->next;
}
diff --git a/patches/rtmutex-annotate-sleeping-lock-context.patch b/patches/rtmutex-annotate-sleeping-lock-context.patch
index 6e95d8d8b0e4..ea2ff1cc73e7 100644
--- a/patches/rtmutex-annotate-sleeping-lock-context.patch
+++ b/patches/rtmutex-annotate-sleeping-lock-context.patch
@@ -241,7 +241,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -7320,4 +7320,49 @@ void migrate_enable(void)
+@@ -7321,4 +7321,49 @@ void migrate_enable(void)
preempt_enable();
}
EXPORT_SYMBOL(migrate_enable);
diff --git a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
index 9c0914e9c549..e6c1e60c9c55 100644
--- a/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
+++ b/patches/sched-Allow-pinned-user-tasks-to-be-awakened-to-the-.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -921,7 +921,7 @@ static inline bool is_cpu_allowed(struct
+@@ -922,7 +922,7 @@ static inline bool is_cpu_allowed(struct
if (!cpumask_test_cpu(cpu, p->cpus_ptr))
return false;
diff --git a/patches/sched-might-sleep-do-not-account-rcu-depth.patch b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
index b57df6839ad7..2c79dda1b080 100644
--- a/patches/sched-might-sleep-do-not-account-rcu-depth.patch
+++ b/patches/sched-might-sleep-do-not-account-rcu-depth.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/* Internal to kernel */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -6167,7 +6167,7 @@ void __init sched_init(void)
+@@ -6168,7 +6168,7 @@ void __init sched_init(void)
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
diff --git a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
index 473543a84683..7e26677baf56 100644
--- a/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
+++ b/patches/sched-migrate_disable-Add-export_symbol_gpl-for-__mi.patch
@@ -21,7 +21,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1030,6 +1030,7 @@ int __migrate_disabled(struct task_struc
+@@ -1031,6 +1031,7 @@ int __migrate_disabled(struct task_struc
{
return p->migrate_disable;
}
diff --git a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
index 5a7a387f1030..92679c209138 100644
--- a/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
+++ b/patches/sched-migrate_disable-fallback-to-preempt_disable-in.patch
@@ -89,7 +89,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1048,7 +1048,7 @@ void set_cpus_allowed_common(struct task
+@@ -1049,7 +1049,7 @@ void set_cpus_allowed_common(struct task
p->nr_cpus_allowed = cpumask_weight(new_mask);
}
@@ -98,7 +98,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
int __migrate_disabled(struct task_struct *p)
{
return p->migrate_disable;
-@@ -1088,7 +1088,7 @@ static void __do_set_cpus_allowed_tail(s
+@@ -1089,7 +1089,7 @@ static void __do_set_cpus_allowed_tail(s
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -107,7 +107,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
lockdep_assert_held(&p->pi_lock);
-@@ -1161,7 +1161,7 @@ static int __set_cpus_allowed_ptr(struct
+@@ -1162,7 +1162,7 @@ static int __set_cpus_allowed_ptr(struct
if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p))
goto out;
@@ -116,7 +116,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
if (__migrate_disabled(p)) {
p->migrate_disable_update = 1;
goto out;
-@@ -7176,7 +7176,7 @@ const u32 sched_prio_to_wmult[40] = {
+@@ -7177,7 +7177,7 @@ const u32 sched_prio_to_wmult[40] = {
#undef CREATE_TRACE_POINTS
@@ -125,7 +125,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
static inline void
update_nr_migratory(struct task_struct *p, long delta)
-@@ -7324,45 +7324,44 @@ EXPORT_SYMBOL(migrate_enable);
+@@ -7325,45 +7325,44 @@ EXPORT_SYMBOL(migrate_enable);
#elif !defined(CONFIG_SMP) && defined(CONFIG_PREEMPT_RT_BASE)
void migrate_disable(void)
{
@@ -180,7 +180,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
#endif
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
-@@ -978,7 +978,7 @@ void proc_sched_show_task(struct task_st
+@@ -982,7 +982,7 @@ void proc_sched_show_task(struct task_st
P(dl.runtime);
P(dl.deadline);
}
diff --git a/patches/sched-mmdrop-delayed.patch b/patches/sched-mmdrop-delayed.patch
index bedfa7dc6192..039b11a60880 100644
--- a/patches/sched-mmdrop-delayed.patch
+++ b/patches/sched-mmdrop-delayed.patch
@@ -77,7 +77,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
struct mm_struct *mm;
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2747,9 +2747,13 @@ static struct rq *finish_task_switch(str
+@@ -2748,9 +2748,13 @@ static struct rq *finish_task_switch(str
* provided by mmdrop(),
* - a sync_core for SYNC_CORE.
*/
@@ -92,7 +92,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
-@@ -5568,6 +5572,8 @@ void sched_setnuma(struct task_struct *p
+@@ -5569,6 +5573,8 @@ void sched_setnuma(struct task_struct *p
#endif /* CONFIG_NUMA_BALANCING */
#ifdef CONFIG_HOTPLUG_CPU
@@ -101,7 +101,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
/*
* Ensure that the idle task is using init_mm right before its CPU goes
* offline.
-@@ -5583,7 +5589,11 @@ void idle_task_exit(void)
+@@ -5584,7 +5590,11 @@ void idle_task_exit(void)
current->active_mm = &init_mm;
finish_arch_post_lock_switch();
}
@@ -114,7 +114,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
}
/*
-@@ -5895,6 +5905,10 @@ int sched_cpu_dying(unsigned int cpu)
+@@ -5896,6 +5906,10 @@ int sched_cpu_dying(unsigned int cpu)
update_max_interval();
nohz_balance_exit_idle(rq);
hrtick_clear(rq);
diff --git a/patches/sched-rt-mutex-wakeup.patch b/patches/sched-rt-mutex-wakeup.patch
index 1396ef34b946..c48cc5f741f1 100644
--- a/patches/sched-rt-mutex-wakeup.patch
+++ b/patches/sched-rt-mutex-wakeup.patch
@@ -36,7 +36,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
#ifdef CONFIG_SMP
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -2016,8 +2016,27 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2017,8 +2017,27 @@ try_to_wake_up(struct task_struct *p, un
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
smp_mb__after_spinlock();
@@ -65,7 +65,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
trace_sched_waking(p);
-@@ -2182,6 +2201,18 @@ int wake_up_process(struct task_struct *
+@@ -2183,6 +2202,18 @@ int wake_up_process(struct task_struct *
}
EXPORT_SYMBOL(wake_up_process);
diff --git a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
index d416fe575088..e4b2909296de 100644
--- a/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
+++ b/patches/sched-workqueue-Only-wake-up-idle-workers-if-not-blo.patch
@@ -23,7 +23,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3509,8 +3509,10 @@ static void __sched notrace __schedule(b
+@@ -3510,8 +3510,10 @@ static void __sched notrace __schedule(b
* If a worker went to sleep, notify and ask workqueue
* whether it wants to wake up a task to maintain
* concurrency.
diff --git a/patches/scsi-fcoe-rt-aware.patch b/patches/scsi-fcoe-rt-aware.patch
index ec57dcb505d0..906fa64e13b0 100644
--- a/patches/scsi-fcoe-rt-aware.patch
+++ b/patches/scsi-fcoe-rt-aware.patch
@@ -70,7 +70,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
-@@ -835,7 +835,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -838,7 +838,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
INIT_LIST_HEAD(&del_list);
@@ -79,7 +79,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
-@@ -871,7 +871,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
+@@ -874,7 +874,7 @@ static unsigned long fcoe_ctlr_age_fcfs(
sel_time = fcf->time;
}
}
diff --git a/patches/serial-8250-export-symbols-which-are-used-by-symbols.patch b/patches/serial-8250-export-symbols-which-are-used-by-symbols.patch
index f98c1a7154a6..d7e38987dc16 100644
--- a/patches/serial-8250-export-symbols-which-are-used-by-symbols.patch
+++ b/patches/serial-8250-export-symbols-which-are-used-by-symbols.patch
@@ -28,7 +28,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
-@@ -2188,6 +2188,7 @@ int is_console_locked(void)
+@@ -2200,6 +2200,7 @@ int is_console_locked(void)
{
return console_locked;
}
diff --git a/patches/series b/patches/series
index 6a6ecc2d9e8b..811f914e0f9a 100644
--- a/patches/series
+++ b/patches/series
@@ -11,15 +11,20 @@ tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
# POSTED by others
############################################################
# AT91
-# Alexandre Belloni | [PATCH v7 0/7] clocksource: rework Atmel TCB timer driver
-# Date: Thu, 13 Sep 2018 13:30:18 +0200
-0001-ARM-at91-add-TCB-registers-definitions.patch
-0002-clocksource-drivers-Add-a-new-driver-for-the-Atmel-A.patch
-0003-clocksource-drivers-timer-atmel-tcb-add-clockevent-d.patch
-0004-clocksource-drivers-atmel-pit-make-option-silent.patch
-0005-ARM-at91-Implement-clocksource-selection.patch
-0006-ARM-configs-at91-use-new-TCB-timer-driver.patch
-0007-ARM-configs-at91-unselect-PIT.patch
+# Alexandre Belloni | [PATCH 00/12] clocksource: improve Atmel TCB timer driver
+# Date: Wed, 3 Apr 2019 16:11:08 +0200
+0001-ARM-at91-move-SoC-specific-definitions-to-SoC-folder.patch
+0002-misc-atmel_tclib-drop-AVR32-support.patch
+0003-misc-atmel_tclib-move-definitions-to-header-file.patch
+0004-clocksource-drivers-tcb_clksrc-stop-depending-on-atm.patch
+0005-clocksource-drivers-tcb_clksrc-Use-tcb-as-sched_cloc.patch
+0006-ARM-at91-Implement-clocksource-selection.patch
+0007-clocksource-drivers-tcb_clksrc-move-Kconfig-option.patch
+0008-clocksource-drivers-timer-atmel-pit-rework-Kconfig-o.patch
+0009-clocksource-drivers-tcb_clksrc-Rename-the-file-for-c.patch
+0010-ARM-configs-at91-unselect-PIT.patch
+0011-misc-atmel_tclib-do-not-probe-already-used-TCBs.patch
+0012-clocksource-drivers-timer-atmel-tcb-Use-ARRAY_SIZE-i.patch
kthread-convert-worker-lock-to-raw-spinlock.patch
sched-fair-Robustify-CFS-bandwidth-timer-locking.patch
@@ -74,9 +79,10 @@ mm-workingset-replace-IRQ-off-check-with-a-lockdep-a.patch
crypto-chtls-remove-cdev_list_lock.patch
crypto-user-remove-crypto_cfg_mutex.patch
tpm-remove-tpm_dev_wq_lock.patch
+drm-i915-Don-t-disable-interrupts-independently-of-t.patch
-# [PATCH v7] x86: load FPU registers on return to userland
-# Date: Thu, 21 Feb 2019 12:49:58 +0100
+# [PATCH v9] x86: load FPU registers on return to userland
+# Date: Wed, 3 Apr 2019 18:41:29 +0200
0001-x86-fpu-Remove-fpu-initialized-usage-in-__fpu__resto.patch
0002-x86-fpu-Remove-fpu__restore.patch
0003-x86-fpu-Remove-preempt_disable-in-fpu__clear.patch
@@ -88,17 +94,22 @@ tpm-remove-tpm_dev_wq_lock.patch
0009-x86-fpu-Add-__-make_fpregs_active-helpers.patch
0010-x86-fpu-Make-__raw_xsave_addr-use-feature-number-ins.patch
0011-x86-fpu-Make-get_xsave_field_ptr-and-get_xsave_addr-.patch
-0012-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
-0013-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
-0014-x86-fpu-Eager-switch-PKRU-state.patch
-0015-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
-0016-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
-0017-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
-0018-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
-0019-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
-0020-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
-0021-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
-0022-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
+0012-x86-pkru-Provide-.-_pkru_ins-functions.patch
+0013-x86-fpu-Only-write-PKRU-if-it-is-different-from-curr.patch
+0014-x86-pkeys-Don-t-check-if-PKRU-is-zero-before-writtin.patch
+0015-x86-fpu-Eager-switch-PKRU-state.patch
+0016-x86-entry-Add-TIF_NEED_FPU_LOAD.patch
+0017-x86-fpu-Always-store-the-registers-in-copy_fpstate_t.patch
+0018-x86-fpu-Prepare-copy_fpstate_to_sigframe-for-TIF_NEE.patch
+0019-x86-fpu-Update-xstate-s-PKRU-value-on-write_pkru.patch
+0020-x86-fpu-Inline-copy_user_to_fpregs_zeroing.patch
+0021-x86-fpu-Let-__fpu__restore_sig-restore-the-32bit-fxs.patch
+0022-x86-fpu-Merge-the-two-code-paths-in-__fpu__restore_s.patch
+0023-x86-fpu-Defer-FPU-state-load-until-return-to-userspa.patch
+0024-x86-fpu-Add-a-fastpath-to-__fpu__restore_sig.patch
+0025-x86-fpu-Add-a-fastpath-to-copy_fpstate_to_sigframe.patch
+0026-x86-fpu-Restore-FPU-register-in-copy_fpstate_to_sigf.patch
+0027-x86-pkeys-add-PKRU-value-to-init_fpstate.patch
############################################################
# Ready for posting
diff --git a/patches/srcu-Remove-srcu_queue_delayed_work_on.patch b/patches/srcu-Remove-srcu_queue_delayed_work_on.patch
index 4046dff19fa2..1f256dfdeffb 100644
--- a/patches/srcu-Remove-srcu_queue_delayed_work_on.patch
+++ b/patches/srcu-Remove-srcu_queue_delayed_work_on.patch
@@ -153,7 +153,7 @@ Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
spin_lock_irq_rcu_node(sdp);
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
-@@ -3424,8 +3424,6 @@ int rcutree_online_cpu(unsigned int cpu)
+@@ -3433,8 +3433,6 @@ int rcutree_online_cpu(unsigned int cpu)
raw_spin_lock_irqsave_rcu_node(rnp, flags);
rnp->ffmask |= rdp->grpmask;
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
@@ -162,7 +162,7 @@ Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return 0; /* Too early in boot for scheduler work. */
sync_sched_exp_online_cleanup(cpu);
-@@ -3450,8 +3448,6 @@ int rcutree_offline_cpu(unsigned int cpu
+@@ -3459,8 +3457,6 @@ int rcutree_offline_cpu(unsigned int cpu
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
rcutree_affinity_setting(cpu, cpu);
diff --git a/patches/timers-prepare-for-full-preemption.patch b/patches/timers-prepare-for-full-preemption.patch
index 79b73c42ed51..d5854f240b09 100644
--- a/patches/timers-prepare-for-full-preemption.patch
+++ b/patches/timers-prepare-for-full-preemption.patch
@@ -28,7 +28,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
# define del_timer_sync(t) del_timer(t)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -509,11 +509,14 @@ void resched_cpu(int cpu)
+@@ -510,11 +510,14 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
@@ -45,7 +45,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
rcu_read_lock();
for_each_domain(cpu, sd) {
-@@ -532,6 +535,8 @@ int get_nohz_timer_target(void)
+@@ -533,6 +536,8 @@ int get_nohz_timer_target(void)
cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
unlock:
rcu_read_unlock();
diff --git a/patches/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch b/patches/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
index 484fb05d02d0..6909026864dc 100644
--- a/patches/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
+++ b/patches/tty-sysrq-Convert-show_lock-to-raw_spinlock_t.patch
@@ -26,14 +26,12 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: stable-rt@vger.kernel.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
- drivers/tty/sysrq.c | 6 +++---
+ drivers/tty/sysrq.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
-diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
-index 1f03078ec3527..8473557c7ab2a 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
-@@ -208,7 +208,7 @@ static struct sysrq_key_op sysrq_showlocks_op = {
+@@ -208,7 +208,7 @@ static struct sysrq_key_op sysrq_showloc
#endif
#ifdef CONFIG_SMP
@@ -55,6 +53,3 @@ index 1f03078ec3527..8473557c7ab2a 100644
}
static void sysrq_showregs_othercpus(struct work_struct *dummy)
---
-2.20.1
-
diff --git a/patches/workqueue-distangle-from-rq-lock.patch b/patches/workqueue-distangle-from-rq-lock.patch
index cdf56edb9ee7..d349d632c3fb 100644
--- a/patches/workqueue-distangle-from-rq-lock.patch
+++ b/patches/workqueue-distangle-from-rq-lock.patch
@@ -33,7 +33,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -1721,10 +1721,6 @@ static inline void ttwu_activate(struct
+@@ -1722,10 +1722,6 @@ static inline void ttwu_activate(struct
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -44,7 +44,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
/*
-@@ -2162,56 +2158,6 @@ try_to_wake_up(struct task_struct *p, un
+@@ -2163,56 +2159,6 @@ try_to_wake_up(struct task_struct *p, un
}
/**
@@ -101,7 +101,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -3531,21 +3477,6 @@ static void __sched notrace __schedule(b
+@@ -3532,21 +3478,6 @@ static void __sched notrace __schedule(b
atomic_inc(&rq->nr_iowait);
delayacct_blkio_start();
}
@@ -123,7 +123,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
}
switch_count = &prev->nvcsw;
}
-@@ -3605,6 +3536,20 @@ static inline void sched_submit_work(str
+@@ -3606,6 +3537,20 @@ static inline void sched_submit_work(str
{
if (!tsk->state || tsk_is_pi_blocked(tsk))
return;
@@ -144,7 +144,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
/*
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
-@@ -3613,6 +3558,12 @@ static inline void sched_submit_work(str
+@@ -3614,6 +3559,12 @@ static inline void sched_submit_work(str
blk_schedule_flush_plug(tsk);
}
@@ -157,7 +157,7 @@ Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
asmlinkage __visible void __sched schedule(void)
{
struct task_struct *tsk = current;
-@@ -3623,6 +3574,7 @@ asmlinkage __visible void __sched schedu
+@@ -3624,6 +3575,7 @@ asmlinkage __visible void __sched schedu
__schedule(false);
sched_preempt_enable_no_resched();
} while (need_resched());
diff --git a/patches/workqueue-prevent-deadlock-stall.patch b/patches/workqueue-prevent-deadlock-stall.patch
index 181b8d728671..cde8b1ce0c70 100644
--- a/patches/workqueue-prevent-deadlock-stall.patch
+++ b/patches/workqueue-prevent-deadlock-stall.patch
@@ -43,7 +43,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
-@@ -3580,9 +3580,8 @@ void __noreturn do_task_dead(void)
+@@ -3581,9 +3581,8 @@ void __noreturn do_task_dead(void)
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -54,7 +54,7 @@ Cc: Steven Rostedt <rostedt@goodmis.org>
/*
* If a worker went to sleep, notify and ask workqueue whether
* it wants to wake up a task to maintain concurrency.
-@@ -3596,6 +3595,9 @@ static inline void sched_submit_work(str
+@@ -3597,6 +3596,9 @@ static inline void sched_submit_work(str
preempt_enable_no_resched();
}
diff --git a/patches/x86-kvm-require-const-tsc-for-rt.patch b/patches/x86-kvm-require-const-tsc-for-rt.patch
index 3e10ceb0adc1..5ba76b799fce 100644
--- a/patches/x86-kvm-require-const-tsc-for-rt.patch
+++ b/patches/x86-kvm-require-const-tsc-for-rt.patch
@@ -14,7 +14,7 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
-@@ -6907,6 +6907,14 @@ int kvm_arch_init(void *opaque)
+@@ -6934,6 +6934,14 @@ int kvm_arch_init(void *opaque)
goto out;
}