summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-07-27 20:02:03 +0200
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2018-07-27 20:02:03 +0200
commit30255b10d2525de53ec1e348b0525a9176ebabd9 (patch)
treeacfbc30a267939124068e39d6fa5c0989bba6cb5
parente46ee88a5117acd7806660c11b65ea6894a9aec1 (diff)
downloadlinux-rt-4.16.18-rt11-patches.tar.gz
[ANNOUNCE] v4.16.18-rt11v4.16.18-rt11-patches
Dear RT folks! I'm pleased to announce the v4.16.18-rt11 patch set. Changes since v4.16.18-rt10: - Finally fix the SIMD locking on arm64. - Revert the srcu based notifier in crypto code. This is no longer required since the rwsem on RT allows multiple reader. - Add a per-CPU lock in crypto's cryptd per-CPU queue. Reported by Mike Galbraith. - Add a per-CPU lock in crypto's scompress code. Patch by Mike Galbraith. - Disable runtime EFI services on ARM64. It can be enabled again via command line. The getTime function needs around 10ms to complete on one of my machines. The setVariable might take even longer. It is disabled by default, it can be enabled via `efi=runtime' on the command line. Known issues - A warning triggered in "rcu_note_context_switch" originated from SyS_timer_gettime(). The issue was always there, it is now visible. Reported by Grygorii Strashko and Daniel Wagner. The delta patch against v4.16.18-rt10 is appended below and can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/incr/patch-4.16.18-rt10-rt11.patch.xz You can get this release via the git tree at: git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v4.16.18-rt11 The RT patch against v4.16.18 can be found here: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patch-4.16.18-rt11.patch.xz The split quilt queue is available at: https://cdn.kernel.org/pub/linux/kernel/projects/rt/4.16/older/patches-4.16.18-rt11.tar.xz Sebastian Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--patches/Revert-crypto-Convert-crypto-notifier-chain-to-SRCU.patch92
-rw-r--r--patches/arm64-fpsimd-use-a-local_lock-instead-of-local_bh_di.patch149
-rw-r--r--patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch163
-rw-r--r--patches/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch83
-rw-r--r--patches/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch81
-rw-r--r--patches/efi-Allow-efi-runtime.patch30
-rw-r--r--patches/efi-Disable-runtime-services-on-RT.patch38
-rw-r--r--patches/localversion.patch2
-rw-r--r--patches/series7
9 files changed, 494 insertions, 151 deletions
diff --git a/patches/Revert-crypto-Convert-crypto-notifier-chain-to-SRCU.patch b/patches/Revert-crypto-Convert-crypto-notifier-chain-to-SRCU.patch
new file mode 100644
index 000000000000..c8e5f9923d4a
--- /dev/null
+++ b/patches/Revert-crypto-Convert-crypto-notifier-chain-to-SRCU.patch
@@ -0,0 +1,92 @@
+From 097b8f7d0078fb99516aa7c38908abf91c419b25 Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Fri, 27 Jul 2018 14:09:26 +0200
+Subject: [PATCH] Revert "crypto: Convert crypto notifier chain to SRCU"
+
+This reverts commit Revert "crypto: Convert crypto notifier chain to
+SRCU".
+This is not longer required because the rwsem is now multi reader
+capable on RT.
+The other concern was a deadlock because the rwsem is fifo writer fair
+in regard to incoming reader. This is currently not an issue because we
+have only one crypto_register_notifier() user, which is invoked before
+the reader (blocking_notifier_call_chain()) are coming.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ crypto/algapi.c | 4 ++--
+ crypto/api.c | 6 +++---
+ crypto/internal.h | 4 ++--
+ 3 files changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/crypto/algapi.c b/crypto/algapi.c
+index 99ed1c74217f..395b082d03a9 100644
+--- a/crypto/algapi.c
++++ b/crypto/algapi.c
+@@ -726,13 +726,13 @@ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
+
+ int crypto_register_notifier(struct notifier_block *nb)
+ {
+- return srcu_notifier_chain_register(&crypto_chain, nb);
++ return blocking_notifier_chain_register(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_register_notifier);
+
+ int crypto_unregister_notifier(struct notifier_block *nb)
+ {
+- return srcu_notifier_chain_unregister(&crypto_chain, nb);
++ return blocking_notifier_chain_unregister(&crypto_chain, nb);
+ }
+ EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
+
+diff --git a/crypto/api.c b/crypto/api.c
+index d4bbeba587f9..70a894e52ff3 100644
+--- a/crypto/api.c
++++ b/crypto/api.c
+@@ -32,7 +32,7 @@ EXPORT_SYMBOL_GPL(crypto_alg_list);
+ DECLARE_RWSEM(crypto_alg_sem);
+ EXPORT_SYMBOL_GPL(crypto_alg_sem);
+
+-SRCU_NOTIFIER_HEAD(crypto_chain);
++BLOCKING_NOTIFIER_HEAD(crypto_chain);
+ EXPORT_SYMBOL_GPL(crypto_chain);
+
+ static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
+@@ -237,10 +237,10 @@ int crypto_probing_notify(unsigned long val, void *v)
+ {
+ int ok;
+
+- ok = srcu_notifier_call_chain(&crypto_chain, val, v);
++ ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+ if (ok == NOTIFY_DONE) {
+ request_module("cryptomgr");
+- ok = srcu_notifier_call_chain(&crypto_chain, val, v);
++ ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ return ok;
+diff --git a/crypto/internal.h b/crypto/internal.h
+index 998a6af375d4..5ac27fba10e8 100644
+--- a/crypto/internal.h
++++ b/crypto/internal.h
+@@ -44,7 +44,7 @@ struct crypto_larval {
+
+ extern struct list_head crypto_alg_list;
+ extern struct rw_semaphore crypto_alg_sem;
+-extern struct srcu_notifier_head crypto_chain;
++extern struct blocking_notifier_head crypto_chain;
+
+ #ifdef CONFIG_PROC_FS
+ void __init crypto_init_proc(void);
+@@ -139,7 +139,7 @@ static inline int crypto_is_moribund(struct crypto_alg *alg)
+
+ static inline void crypto_notify(unsigned long val, void *v)
+ {
+- srcu_notifier_call_chain(&crypto_chain, val, v);
++ blocking_notifier_call_chain(&crypto_chain, val, v);
+ }
+
+ #endif /* _CRYPTO_INTERNAL_H */
+--
+2.18.0
+
diff --git a/patches/arm64-fpsimd-use-a-local_lock-instead-of-local_bh_di.patch b/patches/arm64-fpsimd-use-a-local_lock-instead-of-local_bh_di.patch
deleted file mode 100644
index 30d81fb6f366..000000000000
--- a/patches/arm64-fpsimd-use-a-local_lock-instead-of-local_bh_di.patch
+++ /dev/null
@@ -1,149 +0,0 @@
-From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-Date: Thu, 17 May 2018 14:05:49 +0200
-Subject: [PATCH] arm64: fpsimd: use a local_lock() in addition to local_bh_disable()
-
-In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The
-code disables BH and expects that it is not preemptible. On -RT the
-task remains preemptible but remains the same CPU. This may corrupt the
-content of the SIMD registers if the task is preempted during
-saving/restoring those registers.
-Add a locallock around this process. This avoids that the any function
-within the locallock block is invoked more than once on the same CPU.
-
-The preempt_disable() + local_bh_enable() combo in kernel_neon_begin()
-is not working on -RT. We don't use NEON in kernel mode on RT right now
-but this still should be addressed.
-
-Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
----
- arch/arm64/kernel/fpsimd.c | 20 ++++++++++++++++++--
- 1 file changed, 18 insertions(+), 2 deletions(-)
-
---- a/arch/arm64/kernel/fpsimd.c
-+++ b/arch/arm64/kernel/fpsimd.c
-@@ -38,6 +38,7 @@
- #include <linux/signal.h>
- #include <linux/slab.h>
- #include <linux/sysctl.h>
-+#include <linux/locallock.h>
-
- #include <asm/fpsimd.h>
- #include <asm/cputype.h>
-@@ -235,7 +236,7 @@ static void sve_user_enable(void)
- * whether TIF_SVE is clear or set, since these are not vector length
- * dependent.
- */
--
-+static DEFINE_LOCAL_IRQ_LOCK(fpsimd_lock);
- /*
- * Update current's FPSIMD/SVE registers from thread_struct.
- *
-@@ -594,6 +595,7 @@ int sve_set_vector_length(struct task_st
- * non-SVE thread.
- */
- if (task == current) {
-+ local_lock(fpsimd_lock);
- local_bh_disable();
-
- task_fpsimd_save();
-@@ -604,8 +606,10 @@ int sve_set_vector_length(struct task_st
- if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
- sve_to_fpsimd(task);
-
-- if (task == current)
-+ if (task == current) {
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
-+ }
-
- /*
- * Force reallocation of task SVE state to the correct size
-@@ -838,6 +842,7 @@ asmlinkage void do_sve_acc(unsigned int
- sve_alloc(current);
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
-
- task_fpsimd_save();
- fpsimd_to_sve(current);
-@@ -849,6 +854,7 @@ asmlinkage void do_sve_acc(unsigned int
- if (test_and_set_thread_flag(TIF_SVE))
- WARN_ON(1); /* SVE access shouldn't have trapped */
-
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
- }
-
-@@ -926,6 +932,7 @@ void fpsimd_flush_thread(void)
- return;
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
-
- memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
- fpsimd_flush_task_state(current);
-@@ -967,6 +974,7 @@ void fpsimd_flush_thread(void)
-
- set_thread_flag(TIF_FOREIGN_FPSTATE);
-
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
- }
-
-@@ -980,7 +988,9 @@ void fpsimd_preserve_current_state(void)
- return;
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
- task_fpsimd_save();
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
- }
-
-@@ -1022,12 +1032,14 @@ void fpsimd_restore_current_state(void)
- return;
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
-
- if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
- task_fpsimd_load();
- fpsimd_bind_to_cpu();
- }
-
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
- }
-
-@@ -1042,6 +1054,7 @@ void fpsimd_update_current_state(struct
- return;
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
-
- current->thread.fpsimd_state.user_fpsimd = *state;
- if (system_supports_sve() && test_thread_flag(TIF_SVE))
-@@ -1052,6 +1065,7 @@ void fpsimd_update_current_state(struct
- if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE))
- fpsimd_bind_to_cpu();
-
-+ local_unlock(fpsimd_lock);
- local_bh_enable();
- }
-
-@@ -1116,6 +1130,7 @@ void kernel_neon_begin(void)
- BUG_ON(!may_use_simd());
-
- local_bh_disable();
-+ local_lock(fpsimd_lock);
-
- __this_cpu_write(kernel_neon_busy, true);
-
-@@ -1128,6 +1143,7 @@ void kernel_neon_begin(void)
- /* Invalidate any task state remaining in the fpsimd regs: */
- fpsimd_flush_cpu_state();
-
-+ local_unlock(fpsimd_lock);
- preempt_disable();
-
- local_bh_enable();
diff --git a/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch b/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
new file mode 100644
index 000000000000..b7ae0d87467c
--- /dev/null
+++ b/patches/arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
@@ -0,0 +1,163 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Wed, 25 Jul 2018 14:02:38 +0200
+Subject: [PATCH] arm64: fpsimd: use preemp_disable in addition to
+ local_bh_disable()
+
+In v4.16-RT I noticed a number of warnings from task_fpsimd_load(). The
+code disables BH and expects that it is not preemptible. On -RT the
+task remains preemptible but remains the same CPU. This may corrupt the
+content of the SIMD registers if the task is preempted during
+saving/restoring those registers.
+
+Add preempt_disable()/enable() to enfore the required semantic on -RT.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/arm64/kernel/fpsimd.c | 30 ++++++++++++++++++++++++++++--
+ 1 file changed, 28 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/fpsimd.c
++++ b/arch/arm64/kernel/fpsimd.c
+@@ -157,6 +157,15 @@ static void sve_free(struct task_struct
+ __sve_free(task);
+ }
+
++static void *sve_free_atomic(struct task_struct *task)
++{
++ void *sve_state = task->thread.sve_state;
++
++ WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
++
++ task->thread.sve_state = NULL;
++ return sve_state;
++}
+
+ /* Offset of FFR in the SVE register dump */
+ static size_t sve_ffr_offset(int vl)
+@@ -594,6 +603,7 @@ int sve_set_vector_length(struct task_st
+ * non-SVE thread.
+ */
+ if (task == current) {
++ preempt_disable();
+ local_bh_disable();
+
+ task_fpsimd_save();
+@@ -604,8 +614,10 @@ int sve_set_vector_length(struct task_st
+ if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
+ sve_to_fpsimd(task);
+
+- if (task == current)
++ if (task == current) {
+ local_bh_enable();
++ preempt_enable();
++ }
+
+ /*
+ * Force reallocation of task SVE state to the correct size
+@@ -837,6 +849,7 @@ asmlinkage void do_sve_acc(unsigned int
+
+ sve_alloc(current);
+
++ preempt_disable();
+ local_bh_disable();
+
+ task_fpsimd_save();
+@@ -850,6 +863,7 @@ asmlinkage void do_sve_acc(unsigned int
+ WARN_ON(1); /* SVE access shouldn't have trapped */
+
+ local_bh_enable();
++ preempt_enable();
+ }
+
+ /*
+@@ -921,10 +935,12 @@ void fpsimd_thread_switch(struct task_st
+ void fpsimd_flush_thread(void)
+ {
+ int vl, supported_vl;
++ void *mem = NULL;
+
+ if (!system_supports_fpsimd())
+ return;
+
++ preempt_disable();
+ local_bh_disable();
+
+ memset(&current->thread.fpsimd_state, 0, sizeof(struct fpsimd_state));
+@@ -932,7 +948,7 @@ void fpsimd_flush_thread(void)
+
+ if (system_supports_sve()) {
+ clear_thread_flag(TIF_SVE);
+- sve_free(current);
++ mem = sve_free_atomic(current);
+
+ /*
+ * Reset the task vector length as required.
+@@ -968,6 +984,8 @@ void fpsimd_flush_thread(void)
+ set_thread_flag(TIF_FOREIGN_FPSTATE);
+
+ local_bh_enable();
++ preempt_enable();
++ kfree(mem);
+ }
+
+ /*
+@@ -979,9 +997,11 @@ void fpsimd_preserve_current_state(void)
+ if (!system_supports_fpsimd())
+ return;
+
++ preempt_disable();
+ local_bh_disable();
+ task_fpsimd_save();
+ local_bh_enable();
++ preempt_enable();
+ }
+
+ /*
+@@ -1021,6 +1041,7 @@ void fpsimd_restore_current_state(void)
+ if (!system_supports_fpsimd())
+ return;
+
++ preempt_disable();
+ local_bh_disable();
+
+ if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
+@@ -1029,6 +1050,7 @@ void fpsimd_restore_current_state(void)
+ }
+
+ local_bh_enable();
++ preempt_enable();
+ }
+
+ /*
+@@ -1041,6 +1063,7 @@ void fpsimd_update_current_state(struct
+ if (!system_supports_fpsimd())
+ return;
+
++ preempt_disable();
+ local_bh_disable();
+
+ current->thread.fpsimd_state.user_fpsimd = *state;
+@@ -1053,6 +1076,7 @@ void fpsimd_update_current_state(struct
+ fpsimd_bind_to_cpu();
+
+ local_bh_enable();
++ preempt_enable();
+ }
+
+ /*
+@@ -1115,6 +1139,7 @@ void kernel_neon_begin(void)
+
+ BUG_ON(!may_use_simd());
+
++ preempt_disable();
+ local_bh_disable();
+
+ __this_cpu_write(kernel_neon_busy, true);
+@@ -1131,6 +1156,7 @@ void kernel_neon_begin(void)
+ preempt_disable();
+
+ local_bh_enable();
++ preempt_enable();
+ }
+ EXPORT_SYMBOL(kernel_neon_begin);
+
diff --git a/patches/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch b/patches/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
new file mode 100644
index 000000000000..c3e11bbe2a90
--- /dev/null
+++ b/patches/crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
@@ -0,0 +1,83 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 18:52:00 +0200
+Subject: [PATCH] crypto: cryptd - add a lock instead
+ preempt_disable/local_bh_disable
+
+cryptd has a per-CPU lock which protected with local_bh_disable() and
+preempt_disable().
+Add an explicit spin_lock to make the locking context more obvious and
+visible to lockdep. Since it is a per-CPU lock, there should be no lock
+contention on the actual spinlock.
+There is a small race-window where we could be migrated to another CPU
+after the cpu_queue has been obtain. This is not a problem because the
+actual ressource is protected by the spinlock.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ crypto/cryptd.c | 19 +++++++++----------
+ 1 file changed, 9 insertions(+), 10 deletions(-)
+
+diff --git a/crypto/cryptd.c b/crypto/cryptd.c
+index addca7bae33f..8ad657cddc0a 100644
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -39,6 +39,7 @@ MODULE_PARM_DESC(cryptd_max_cpu_qlen, "Set cryptd Max queue depth");
+ struct cryptd_cpu_queue {
+ struct crypto_queue queue;
+ struct work_struct work;
++ spinlock_t qlock;
+ };
+
+ struct cryptd_queue {
+@@ -117,6 +118,7 @@ static int cryptd_init_queue(struct cryptd_queue *queue,
+ cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
+ crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
+ INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
++ spin_lock_init(&cpu_queue->qlock);
+ }
+ pr_info("cryptd: max_cpu_qlen set to %d\n", max_cpu_qlen);
+ return 0;
+@@ -141,8 +143,10 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ struct cryptd_cpu_queue *cpu_queue;
+ atomic_t *refcnt;
+
+- cpu = get_cpu();
+- cpu_queue = this_cpu_ptr(queue->cpu_queue);
++ cpu_queue = raw_cpu_ptr(queue->cpu_queue);
++ spin_lock_bh(&cpu_queue->qlock);
++ cpu = smp_processor_id();
++
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+
+ refcnt = crypto_tfm_ctx(request->tfm);
+@@ -158,7 +162,7 @@ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ atomic_inc(refcnt);
+
+ out_put_cpu:
+- put_cpu();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ return err;
+ }
+@@ -174,16 +178,11 @@ static void cryptd_queue_worker(struct work_struct *work)
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+- * preempt_disable/enable is used to prevent being preempted by
+- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+- * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+- local_bh_disable();
+- preempt_disable();
++ spin_lock_bh(&cpu_queue->qlock);
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+- preempt_enable();
+- local_bh_enable();
++ spin_unlock_bh(&cpu_queue->qlock);
+
+ if (!req)
+ return;
+--
+2.18.0
+
diff --git a/patches/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch b/patches/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
new file mode 100644
index 000000000000..d673117ba1b4
--- /dev/null
+++ b/patches/crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
@@ -0,0 +1,81 @@
+From: Mike Galbraith <efault@gmx.de>
+Date: Wed, 11 Jul 2018 17:14:47 +0200
+Subject: [PATCH] crypto: scompress - serialize RT percpu scratch buffer
+ access with a local lock
+
+| BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:974
+| in_atomic(): 1, irqs_disabled(): 0, pid: 1401, name: cryptomgr_test
+| Preemption disabled at:
+| [<ffff00000849941c>] scomp_acomp_comp_decomp+0x34/0x1a0
+| CPU: 21 PID: 1401 Comm: cryptomgr_test Tainted: G W 4.16.18-rt9-rt #1
+| Hardware name: www.cavium.com crb-1s/crb-1s, BIOS 0.3 Apr 25 2017
+| Call trace:
+| dump_backtrace+0x0/0x1c8
+| show_stack+0x24/0x30
+| dump_stack+0xac/0xe8
+| ___might_sleep+0x124/0x188
+| rt_spin_lock+0x40/0x88
+| zip_load_instr+0x44/0x170 [thunderx_zip]
+| zip_deflate+0x184/0x378 [thunderx_zip]
+| zip_compress+0xb0/0x130 [thunderx_zip]
+| zip_scomp_compress+0x48/0x60 [thunderx_zip]
+| scomp_acomp_comp_decomp+0xd8/0x1a0
+| scomp_acomp_compress+0x24/0x30
+| test_acomp+0x15c/0x558
+| alg_test_comp+0xc0/0x128
+| alg_test.part.6+0x120/0x2c0
+| alg_test+0x6c/0xa0
+| cryptomgr_test+0x50/0x58
+| kthread+0x134/0x138
+| ret_from_fork+0x10/0x18
+
+Mainline disables preemption to serialize percpu scratch buffer access,
+causing the splat above. Serialize with a local lock for RT instead.
+
+Signed-off-by: Mike Galbraith <efault@gmx.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ crypto/scompress.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/crypto/scompress.c b/crypto/scompress.c
+index 968bbcf65c94..c2f0077e0801 100644
+--- a/crypto/scompress.c
++++ b/crypto/scompress.c
+@@ -24,6 +24,7 @@
+ #include <linux/cryptouser.h>
+ #include <net/netlink.h>
+ #include <linux/scatterlist.h>
++#include <linux/locallock.h>
+ #include <crypto/scatterwalk.h>
+ #include <crypto/internal/acompress.h>
+ #include <crypto/internal/scompress.h>
+@@ -34,6 +35,7 @@ static void * __percpu *scomp_src_scratches;
+ static void * __percpu *scomp_dst_scratches;
+ static int scomp_scratch_users;
+ static DEFINE_MUTEX(scomp_lock);
++static DEFINE_LOCAL_IRQ_LOCK(scomp_scratches_lock);
+
+ #ifdef CONFIG_NET
+ static int crypto_scomp_report(struct sk_buff *skb, struct crypto_alg *alg)
+@@ -146,7 +148,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ void **tfm_ctx = acomp_tfm_ctx(tfm);
+ struct crypto_scomp *scomp = *tfm_ctx;
+ void **ctx = acomp_request_ctx(req);
+- const int cpu = get_cpu();
++ const int cpu = local_lock_cpu(scomp_scratches_lock);
+ u8 *scratch_src = *per_cpu_ptr(scomp_src_scratches, cpu);
+ u8 *scratch_dst = *per_cpu_ptr(scomp_dst_scratches, cpu);
+ int ret;
+@@ -181,7 +183,7 @@ static int scomp_acomp_comp_decomp(struct acomp_req *req, int dir)
+ 1);
+ }
+ out:
+- put_cpu();
++ local_unlock_cpu(scomp_scratches_lock);
+ return ret;
+ }
+
+--
+2.18.0
+
diff --git a/patches/efi-Allow-efi-runtime.patch b/patches/efi-Allow-efi-runtime.patch
new file mode 100644
index 000000000000..b6a3580a4de6
--- /dev/null
+++ b/patches/efi-Allow-efi-runtime.patch
@@ -0,0 +1,30 @@
+From 667af2f3d8ccf947fe7c9dac0b59b175963163ba Mon Sep 17 00:00:00 2001
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 15:06:10 +0200
+Subject: [PATCH 2/2] efi: Allow efi=runtime
+
+In case the option "efi=noruntime" is default at built-time, the user
+could overwrite its sate by `efi=runtime' and allow it again.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firmware/efi/efi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
+index 62c6e4b6ce3e..d6176ce50b45 100644
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -110,6 +110,9 @@ static int __init parse_efi_cmdline(char *str)
+ if (parse_option_str(str, "noruntime"))
+ disable_runtime = true;
+
++ if (parse_option_str(str, "runtime"))
++ disable_runtime = false;
++
+ return 0;
+ }
+ early_param("efi", parse_efi_cmdline);
+--
+2.18.0
+
diff --git a/patches/efi-Disable-runtime-services-on-RT.patch b/patches/efi-Disable-runtime-services-on-RT.patch
new file mode 100644
index 000000000000..56ad26be68da
--- /dev/null
+++ b/patches/efi-Disable-runtime-services-on-RT.patch
@@ -0,0 +1,38 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Thu, 26 Jul 2018 15:03:16 +0200
+Subject: [PATCH] efi: Disable runtime services on RT
+
+Based on meassurements the EFI functions get_variable /
+get_next_variable take up to 2us which looks okay.
+The functions get_time, set_time take around 10ms. Those 10ms are too
+much. Even one ms would be too much.
+Ard mentioned that SetVariable might even trigger larger latencies if
+the firware will erase flash blocks on NOR.
+
+The time-functions are used by efi-rtc and can be triggered during
+runtimed (either via explicit read/write or ntp sync).
+
+The variable write could be used by pstore.
+These functions can be disabled without much of a loss. The poweroff /
+reboot hooks may be provided by PSCI.
+
+Disable EFI's runtime wrappers.
+
+This was observed on "EFI v2.60 by SoftIron Overdrive 1000".
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ drivers/firmware/efi/efi.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/firmware/efi/efi.c
++++ b/drivers/firmware/efi/efi.c
+@@ -75,7 +75,7 @@ static unsigned long *efi_tables[] = {
+ &efi.mem_attr_table,
+ };
+
+-static bool disable_runtime;
++static bool disable_runtime = IS_ENABLED(CONFIG_PREEMPT_RT_BASE);
+ static int __init setup_noefi(char *arg)
+ {
+ disable_runtime = true;
diff --git a/patches/localversion.patch b/patches/localversion.patch
index e16fb07c0a7d..58842b503a27 100644
--- a/patches/localversion.patch
+++ b/patches/localversion.patch
@@ -10,4 +10,4 @@ Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
--- /dev/null
+++ b/localversion-rt
@@ -0,0 +1 @@
-+-rt10
++-rt11
diff --git a/patches/series b/patches/series
index 53a8546e2775..134482620004 100644
--- a/patches/series
+++ b/patches/series
@@ -158,6 +158,7 @@ arm-unwind-use_raw_lock.patch
############################################################
irqchip-gic-v3-its-Make-its_lock-a-raw_spin_lock_t.patch
irqchip-gic-v3-its-Move-ITS-pend_page-allocation-int.patch
+efi-Allow-efi-runtime.patch
############################################################
# Needs to address review feedback
@@ -242,6 +243,7 @@ x86-use-gen-rwsem-spinlocks-rt.patch
leds-trigger-disable-CPU-trigger-on-RT.patch
cpufreq-drop-K8-s-driver-from-beeing-selected.patch
md-disable-bcache.patch
+efi-Disable-runtime-services-on-RT.patch
# PRINTK
printk-kill.patch
@@ -485,7 +487,7 @@ powerpc-ps3-device-init.c-adapt-to-completions-using.patch
ARM-enable-irq-in-translation-section-permission-fau.patch
genirq-update-irq_set_irqchip_state-documentation.patch
KVM-arm-arm64-downgrade-preempt_disable-d-region-to-.patch
-arm64-fpsimd-use-a-local_lock-instead-of-local_bh_di.patch
+arm64-fpsimd-use-preemp_disable-in-addition-to-local.patch
# KGDB
kgb-serial-hackaround.patch
@@ -507,6 +509,8 @@ scsi-fcoe-rt-aware.patch
x86-crypto-reduce-preempt-disabled-regions.patch
crypto-Reduce-preempt-disabled-regions-more-algos.patch
crypto-limit-more-FPU-enabled-sections.patch
+crypto-scompress-serialize-RT-percpu-scratch-buffer-.patch
+crypto-cryptd-add-a-lock-instead-preempt_disable-loc.patch
# RANDOM
panic-disable-random-on-rt.patch
@@ -527,6 +531,7 @@ net-make-devnet_rename_seq-a-mutex.patch
# CRYPTO
peterz-srcu-crypto-chain.patch
+Revert-crypto-Convert-crypto-notifier-chain-to-SRCU.patch
# LOCKDEP
lockdep-selftest-only-do-hardirq-context-test-for-raw-spinlock.patch