summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-02-10 18:22:05 +0100
committerSebastian Andrzej Siewior <bigeasy@linutronix.de>2022-02-10 19:13:53 +0100
commita2d2d54409481aa23a3e11ab9559a843e36a79ec (patch)
treeb45ac618f7bce55ae8c70c94d5a473a2cc9c1c4c
parent5c787904eadfd7fbcf182e7f11e892f9be063b6d (diff)
downloadlinux-rt-a2d2d54409481aa23a3e11ab9559a843e36a79ec.tar.gz
random: Move crng_fast_load() to the worker.
crng_fast_load() is invoked from hard IRQ context and acquires a spinlock_t via a trylock. If the lock is locked in hard IRQ context then the following locking attempt (on another CPU) will PI-boost the wrong task. Move the crng_fast_load() invocation into the worker. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
-rw-r--r--drivers/char/random.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 260e45245032..e4bde9c91765 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1047,6 +1047,17 @@ static void mix_interrupt_randomness(struct work_struct *work)
struct fast_pool *fast_pool = container_of(work, struct fast_pool, mix);
u8 pool[sizeof(fast_pool->pool)];
+ if (unlikely(crng_init == 0)) {
+ size_t ret;
+
+ ret = crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool));
+ if (ret) {
+ WRITE_ONCE(fast_pool->count, 0);
+ fast_pool->last = jiffies;
+ return;
+ }
+ }
+
/*
* Since this is the result of a trip through the scheduler, xor in
* a cycle counter. It can't hurt, and might help.
@@ -1089,11 +1100,17 @@ void add_interrupt_randomness(int irq)
new_count = ++fast_pool->count;
if (unlikely(crng_init == 0)) {
- if (new_count >= 64 &&
- crng_fast_load((u8 *)fast_pool->pool, sizeof(fast_pool->pool)) > 0) {
- fast_pool->count = 0;
- fast_pool->last = now;
- }
+ if (new_count & FAST_POOL_MIX_INFLIGHT)
+ return;
+
+ if (new_count < 64)
+ return;
+
+ fast_pool->count |= FAST_POOL_MIX_INFLIGHT;
+ if (unlikely(!fast_pool->mix.func))
+ INIT_WORK(&fast_pool->mix, mix_interrupt_randomness);
+ queue_work_on(raw_smp_processor_id(), system_highpri_wq,
+ &fast_pool->mix);
return;
}