summaryrefslogtreecommitdiff
path: root/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch')
-rw-r--r--patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch89
1 files changed, 89 insertions, 0 deletions
diff --git a/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch b/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch
new file mode 100644
index 000000000000..cb8e8d588157
--- /dev/null
+++ b/patches/crypto-cryptd-Protect-per-CPU-resource-by-disabling-.patch
@@ -0,0 +1,89 @@
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Date: Tue, 3 May 2022 08:30:01 +0200
+Subject: [PATCH] crypto: cryptd - Protect per-CPU resource by disabling BH.
+
+The access to cryptd_queue::cpu_queue is synchronized by disabling
+preemption in cryptd_enqueue_request() and disabling BH in
+cryptd_queue_worker(). This implies that access is allowed from BH.
+
+If cryptd_enqueue_request() is invoked from preemptible context _and_
+soft interrupt then this can lead to list corruption since
+cryptd_enqueue_request() is not protected against access from
+soft interrupt.
+
+Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
+to ensure BH is always disabled.
+Remove preempt_disable() from cryptd_queue_worker() since it is not
+needed because local_bh_disable() ensures synchronisation.
+
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://lkml.kernel.org/r/YnKWuLQZdPwSdRTh@linutronix.de
+---
+ crypto/cryptd.c | 23 +++++++++++------------
+ 1 file changed, 11 insertions(+), 12 deletions(-)
+
+--- a/crypto/cryptd.c
++++ b/crypto/cryptd.c
+@@ -39,6 +39,10 @@ struct cryptd_cpu_queue {
+ };
+
+ struct cryptd_queue {
++ /*
++ * Protected by disabling BH to allow enqueueing from softinterrupt and
++ * dequeuing from kworker (cryptd_queue_worker()).
++ */
+ struct cryptd_cpu_queue __percpu *cpu_queue;
+ };
+
+@@ -125,28 +129,28 @@ static void cryptd_fini_queue(struct cry
+ static int cryptd_enqueue_request(struct cryptd_queue *queue,
+ struct crypto_async_request *request)
+ {
+- int cpu, err;
++ int err;
+ struct cryptd_cpu_queue *cpu_queue;
+ refcount_t *refcnt;
+
+- cpu = get_cpu();
++ local_bh_disable();
+ cpu_queue = this_cpu_ptr(queue->cpu_queue);
+ err = crypto_enqueue_request(&cpu_queue->queue, request);
+
+ refcnt = crypto_tfm_ctx(request->tfm);
+
+ if (err == -ENOSPC)
+- goto out_put_cpu;
++ goto out;
+
+- queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
++ queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
+
+ if (!refcount_read(refcnt))
+- goto out_put_cpu;
++ goto out;
+
+ refcount_inc(refcnt);
+
+-out_put_cpu:
+- put_cpu();
++out:
++ local_bh_enable();
+
+ return err;
+ }
+@@ -162,15 +166,10 @@ static void cryptd_queue_worker(struct w
+ cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
+ /*
+ * Only handle one request at a time to avoid hogging crypto workqueue.
+- * preempt_disable/enable is used to prevent being preempted by
+- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
+- * cryptd_enqueue_request() being accessed from software interrupts.
+ */
+ local_bh_disable();
+- preempt_disable();
+ backlog = crypto_get_backlog(&cpu_queue->queue);
+ req = crypto_dequeue_request(&cpu_queue->queue);
+- preempt_enable();
+ local_bh_enable();
+
+ if (!req)