diff options
Diffstat (limited to 'drivers/char')
-rw-r--r-- | drivers/char/random.c | 46 |
1 files changed, 21 insertions, 25 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c index bbc9c1c304b9..4aec2fb45813 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -109,7 +109,7 @@ bool rng_is_initialized(void) } EXPORT_SYMBOL(rng_is_initialized); -static void crng_set_ready(struct work_struct *work) +static void __cold crng_set_ready(struct work_struct *work) { static_branch_enable(&crng_is_ready); } @@ -148,7 +148,7 @@ EXPORT_SYMBOL(wait_for_random_bytes); * returns: 0 if callback is successfully added * -EALREADY if pool is already initialised (callback not called) */ -int register_random_ready_notifier(struct notifier_block *nb) +int __cold register_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; int ret = -EALREADY; @@ -166,7 +166,7 @@ int register_random_ready_notifier(struct notifier_block *nb) /* * Delete a previously registered readiness callback function. */ -int unregister_random_ready_notifier(struct notifier_block *nb) +int __cold unregister_random_ready_notifier(struct notifier_block *nb) { unsigned long flags; int ret; @@ -177,7 +177,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb) return ret; } -static void process_random_ready_list(void) +static void __cold process_random_ready_list(void) { unsigned long flags; @@ -187,15 +187,9 @@ static void process_random_ready_list(void) } #define warn_unseeded_randomness() \ - _warn_unseeded_randomness(__func__, (void *)_RET_IP_) - -static void _warn_unseeded_randomness(const char *func_name, void *caller) -{ - if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready()) - return; - printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", - func_name, caller, crng_init); -} + if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \ + printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \ + __func__, (void *)_RET_IP_, crng_init) /********************************************************************* @@ -614,7 +608,7 @@ EXPORT_SYMBOL(get_random_u32); * This function is called when the CPU is coming up, with entry * CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP. */ -int random_prepare_cpu(unsigned int cpu) +int __cold random_prepare_cpu(unsigned int cpu) { /* * When the cpu comes back online, immediately invalidate both @@ -789,13 +783,15 @@ static void extract_entropy(void *buf, size_t len) memzero_explicit(&block, sizeof(block)); } -static void credit_init_bits(size_t bits) +#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits) + +static void __cold _credit_init_bits(size_t bits) { static struct execute_work set_ready; unsigned int new, orig, add; unsigned long flags; - if (crng_ready() || !bits) + if (!bits) return; add = min_t(size_t, bits, POOL_BITS); @@ -979,7 +975,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void add_bootloader_randomness(const void *buf, size_t len) +void __cold add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) @@ -995,7 +991,7 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain); * don't credit it, but we do immediately force a reseed after so * that it's used by the crng posthaste. */ -void add_vmfork_randomness(const void *unique_vm_id, size_t len) +void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len) { add_device_randomness(unique_vm_id, len); if (crng_ready()) { @@ -1008,13 +1004,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t len) EXPORT_SYMBOL_GPL(add_vmfork_randomness); #endif -int register_random_vmfork_notifier(struct notifier_block *nb) +int __cold register_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_register(&vmfork_chain, nb); } EXPORT_SYMBOL_GPL(register_random_vmfork_notifier); -int unregister_random_vmfork_notifier(struct notifier_block *nb) +int __cold unregister_random_vmfork_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&vmfork_chain, nb); } @@ -1059,7 +1055,7 @@ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2) * This function is called when the CPU has just come online, with * entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE. */ -int random_online_cpu(unsigned int cpu) +int __cold random_online_cpu(unsigned int cpu) { /* * During CPU shutdown and before CPU onlining, add_interrupt_ @@ -1214,7 +1210,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu if (in_hardirq()) this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1; else - credit_init_bits(bits); + _credit_init_bits(bits); } void add_input_randomness(unsigned int type, unsigned int code, unsigned int value) @@ -1242,7 +1238,7 @@ void add_disk_randomness(struct gendisk *disk) } EXPORT_SYMBOL_GPL(add_disk_randomness); -void rand_initialize_disk(struct gendisk *disk) +void __cold rand_initialize_disk(struct gendisk *disk) { struct timer_rand_state *state; @@ -1271,7 +1267,7 @@ void rand_initialize_disk(struct gendisk *disk) * * So the re-arming always happens in the entropy loop itself. */ -static void entropy_timer(struct timer_list *t) +static void __cold entropy_timer(struct timer_list *t) { credit_init_bits(1); } @@ -1280,7 +1276,7 @@ static void entropy_timer(struct timer_list *t) * If we have an actual cycle counter, see if we can * generate enough entropy with timing noise */ -static void try_to_generate_entropy(void) +static void __cold try_to_generate_entropy(void) { struct { unsigned long entropy; |