Skip to content

Commit 560181c

Browse files
committed
random: move initialization functions out of hot pages
Much of random.c is devoted to initializing the rng and accounting for when a sufficient amount of entropy has been added. In a perfect world, this would all happen during init, and so we could mark these functions as __init. But in reality, this isn't the case: sometimes the rng only finishes initializing some seconds after system init is finished. For this reason, at the moment, a whole host of functions that are only used relatively close to system init and then never again are intermixed with functions that are used in hot code all the time. This creates more cache misses than necessary. In order to pack the hot code closer together, this commit moves the initialization functions that can't be marked as __init into .text.unlikely by way of the __cold attribute. Of particular note is moving credit_init_bits() into a macro wrapper that inlines the crng_ready() static branch check. This avoids a function call to a nop+ret, and most notably prevents extra entropy arithmetic from being computed in mix_interrupt_randomness(). Reviewed-by: Dominik Brodowski <[email protected]> Signed-off-by: Jason A. Donenfeld <[email protected]>
1 parent a194026 commit 560181c

File tree

1 file changed

+21
-25
lines changed

1 file changed

+21
-25
lines changed

drivers/char/random.c

Lines changed: 21 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ bool rng_is_initialized(void)
110110
}
111111
EXPORT_SYMBOL(rng_is_initialized);
112112

113-
static void crng_set_ready(struct work_struct *work)
113+
static void __cold crng_set_ready(struct work_struct *work)
114114
{
115115
static_branch_enable(&crng_is_ready);
116116
}
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
149149
* returns: 0 if callback is successfully added
150150
* -EALREADY if pool is already initialised (callback not called)
151151
*/
152-
int register_random_ready_notifier(struct notifier_block *nb)
152+
int __cold register_random_ready_notifier(struct notifier_block *nb)
153153
{
154154
unsigned long flags;
155155
int ret = -EALREADY;
@@ -167,7 +167,7 @@ int register_random_ready_notifier(struct notifier_block *nb)
167167
/*
168168
* Delete a previously registered readiness callback function.
169169
*/
170-
int unregister_random_ready_notifier(struct notifier_block *nb)
170+
int __cold unregister_random_ready_notifier(struct notifier_block *nb)
171171
{
172172
unsigned long flags;
173173
int ret;
@@ -178,7 +178,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb)
178178
return ret;
179179
}
180180

181-
static void process_random_ready_list(void)
181+
static void __cold process_random_ready_list(void)
182182
{
183183
unsigned long flags;
184184

@@ -188,15 +188,9 @@ static void process_random_ready_list(void)
188188
}
189189

190190
#define warn_unseeded_randomness() \
191-
_warn_unseeded_randomness(__func__, (void *)_RET_IP_)
192-
193-
static void _warn_unseeded_randomness(const char *func_name, void *caller)
194-
{
195-
if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready())
196-
return;
197-
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
198-
func_name, caller, crng_init);
199-
}
191+
if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
192+
printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
193+
__func__, (void *)_RET_IP_, crng_init)
200194

201195

202196
/*********************************************************************
@@ -615,7 +609,7 @@ EXPORT_SYMBOL(get_random_u32);
615609
* This function is called when the CPU is coming up, with entry
616610
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
617611
*/
618-
int random_prepare_cpu(unsigned int cpu)
612+
int __cold random_prepare_cpu(unsigned int cpu)
619613
{
620614
/*
621615
* When the cpu comes back online, immediately invalidate both
@@ -790,13 +784,15 @@ static void extract_entropy(void *buf, size_t len)
790784
memzero_explicit(&block, sizeof(block));
791785
}
792786

793-
static void credit_init_bits(size_t bits)
787+
#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
788+
789+
static void __cold _credit_init_bits(size_t bits)
794790
{
795791
static struct execute_work set_ready;
796792
unsigned int new, orig, add;
797793
unsigned long flags;
798794

799-
if (crng_ready() || !bits)
795+
if (!bits)
800796
return;
801797

802798
add = min_t(size_t, bits, POOL_BITS);
@@ -1011,7 +1007,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
10111007
* Handle random seed passed by bootloader, and credit it if
10121008
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
10131009
*/
1014-
void add_bootloader_randomness(const void *buf, size_t len)
1010+
void __cold add_bootloader_randomness(const void *buf, size_t len)
10151011
{
10161012
mix_pool_bytes(buf, len);
10171013
if (trust_bootloader)
@@ -1027,7 +1023,7 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
10271023
* don't credit it, but we do immediately force a reseed after so
10281024
* that it's used by the crng posthaste.
10291025
*/
1030-
void add_vmfork_randomness(const void *unique_vm_id, size_t len)
1026+
void __cold add_vmfork_randomness(const void *unique_vm_id, size_t len)
10311027
{
10321028
add_device_randomness(unique_vm_id, len);
10331029
if (crng_ready()) {
@@ -1040,13 +1036,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t len)
10401036
EXPORT_SYMBOL_GPL(add_vmfork_randomness);
10411037
#endif
10421038

1043-
int register_random_vmfork_notifier(struct notifier_block *nb)
1039+
int __cold register_random_vmfork_notifier(struct notifier_block *nb)
10441040
{
10451041
return blocking_notifier_chain_register(&vmfork_chain, nb);
10461042
}
10471043
EXPORT_SYMBOL_GPL(register_random_vmfork_notifier);
10481044

1049-
int unregister_random_vmfork_notifier(struct notifier_block *nb)
1045+
int __cold unregister_random_vmfork_notifier(struct notifier_block *nb)
10501046
{
10511047
return blocking_notifier_chain_unregister(&vmfork_chain, nb);
10521048
}
@@ -1091,7 +1087,7 @@ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
10911087
* This function is called when the CPU has just come online, with
10921088
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
10931089
*/
1094-
int random_online_cpu(unsigned int cpu)
1090+
int __cold random_online_cpu(unsigned int cpu)
10951091
{
10961092
/*
10971093
* During CPU shutdown and before CPU onlining, add_interrupt_
@@ -1246,7 +1242,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu
12461242
if (in_hardirq())
12471243
this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
12481244
else
1249-
credit_init_bits(bits);
1245+
_credit_init_bits(bits);
12501246
}
12511247

12521248
void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
@@ -1274,7 +1270,7 @@ void add_disk_randomness(struct gendisk *disk)
12741270
}
12751271
EXPORT_SYMBOL_GPL(add_disk_randomness);
12761272

1277-
void rand_initialize_disk(struct gendisk *disk)
1273+
void __cold rand_initialize_disk(struct gendisk *disk)
12781274
{
12791275
struct timer_rand_state *state;
12801276

@@ -1309,7 +1305,7 @@ struct entropy_timer_state {
13091305
*
13101306
* So the re-arming always happens in the entropy loop itself.
13111307
*/
1312-
static void entropy_timer(struct timer_list *timer)
1308+
static void __cold entropy_timer(struct timer_list *timer)
13131309
{
13141310
struct entropy_timer_state *state = container_of(timer, struct entropy_timer_state, timer);
13151311

@@ -1323,7 +1319,7 @@ static void entropy_timer(struct timer_list *timer)
13231319
* If we have an actual cycle counter, see if we can
13241320
* generate enough entropy with timing noise
13251321
*/
1326-
static void try_to_generate_entropy(void)
1322+
static void __cold try_to_generate_entropy(void)
13271323
{
13281324
enum { NUM_TRIAL_SAMPLES = 8192, MAX_SAMPLES_PER_BIT = 32 };
13291325
struct entropy_timer_state stack;

0 commit comments

Comments
 (0)