@@ -110,7 +110,7 @@ bool rng_is_initialized(void)
110
110
}
111
111
EXPORT_SYMBOL (rng_is_initialized );
112
112
113
- static void crng_set_ready (struct work_struct * work )
113
+ static void __cold crng_set_ready (struct work_struct * work )
114
114
{
115
115
static_branch_enable (& crng_is_ready );
116
116
}
@@ -149,7 +149,7 @@ EXPORT_SYMBOL(wait_for_random_bytes);
149
149
* returns: 0 if callback is successfully added
150
150
* -EALREADY if pool is already initialised (callback not called)
151
151
*/
152
- int register_random_ready_notifier (struct notifier_block * nb )
152
+ int __cold register_random_ready_notifier (struct notifier_block * nb )
153
153
{
154
154
unsigned long flags ;
155
155
int ret = - EALREADY ;
@@ -167,7 +167,7 @@ int register_random_ready_notifier(struct notifier_block *nb)
167
167
/*
168
168
* Delete a previously registered readiness callback function.
169
169
*/
170
- int unregister_random_ready_notifier (struct notifier_block * nb )
170
+ int __cold unregister_random_ready_notifier (struct notifier_block * nb )
171
171
{
172
172
unsigned long flags ;
173
173
int ret ;
@@ -178,7 +178,7 @@ int unregister_random_ready_notifier(struct notifier_block *nb)
178
178
return ret ;
179
179
}
180
180
181
- static void process_random_ready_list (void )
181
+ static void __cold process_random_ready_list (void )
182
182
{
183
183
unsigned long flags ;
184
184
@@ -188,15 +188,9 @@ static void process_random_ready_list(void)
188
188
}
189
189
190
190
#define warn_unseeded_randomness () \
191
- _warn_unseeded_randomness(__func__, (void *)_RET_IP_)
192
-
193
- static void _warn_unseeded_randomness (const char * func_name , void * caller )
194
- {
195
- if (!IS_ENABLED (CONFIG_WARN_ALL_UNSEEDED_RANDOM ) || crng_ready ())
196
- return ;
197
- printk_deferred (KERN_NOTICE "random: %s called from %pS with crng_init=%d\n" ,
198
- func_name , caller , crng_init );
199
- }
191
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
192
+ printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n", \
193
+ __func__, (void *)_RET_IP_, crng_init)
200
194
201
195
202
196
/*********************************************************************
@@ -615,7 +609,7 @@ EXPORT_SYMBOL(get_random_u32);
615
609
* This function is called when the CPU is coming up, with entry
616
610
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
617
611
*/
618
- int random_prepare_cpu (unsigned int cpu )
612
+ int __cold random_prepare_cpu (unsigned int cpu )
619
613
{
620
614
/*
621
615
* When the cpu comes back online, immediately invalidate both
@@ -790,13 +784,15 @@ static void extract_entropy(void *buf, size_t len)
790
784
memzero_explicit (& block , sizeof (block ));
791
785
}
792
786
793
- static void credit_init_bits (size_t bits )
787
+ #define credit_init_bits (bits ) if (!crng_ready()) _credit_init_bits(bits)
788
+
789
+ static void __cold _credit_init_bits (size_t bits )
794
790
{
795
791
static struct execute_work set_ready ;
796
792
unsigned int new , orig , add ;
797
793
unsigned long flags ;
798
794
799
- if (crng_ready () || !bits )
795
+ if (!bits )
800
796
return ;
801
797
802
798
add = min_t (size_t , bits , POOL_BITS );
@@ -1011,7 +1007,7 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
1011
1007
* Handle random seed passed by bootloader, and credit it if
1012
1008
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
1013
1009
*/
1014
- void add_bootloader_randomness (const void * buf , size_t len )
1010
+ void __cold add_bootloader_randomness (const void * buf , size_t len )
1015
1011
{
1016
1012
mix_pool_bytes (buf , len );
1017
1013
if (trust_bootloader )
@@ -1027,7 +1023,7 @@ static BLOCKING_NOTIFIER_HEAD(vmfork_chain);
1027
1023
* don't credit it, but we do immediately force a reseed after so
1028
1024
* that it's used by the crng posthaste.
1029
1025
*/
1030
- void add_vmfork_randomness (const void * unique_vm_id , size_t len )
1026
+ void __cold add_vmfork_randomness (const void * unique_vm_id , size_t len )
1031
1027
{
1032
1028
add_device_randomness (unique_vm_id , len );
1033
1029
if (crng_ready ()) {
@@ -1040,13 +1036,13 @@ void add_vmfork_randomness(const void *unique_vm_id, size_t len)
1040
1036
EXPORT_SYMBOL_GPL (add_vmfork_randomness );
1041
1037
#endif
1042
1038
1043
- int register_random_vmfork_notifier (struct notifier_block * nb )
1039
+ int __cold register_random_vmfork_notifier (struct notifier_block * nb )
1044
1040
{
1045
1041
return blocking_notifier_chain_register (& vmfork_chain , nb );
1046
1042
}
1047
1043
EXPORT_SYMBOL_GPL (register_random_vmfork_notifier );
1048
1044
1049
- int unregister_random_vmfork_notifier (struct notifier_block * nb )
1045
+ int __cold unregister_random_vmfork_notifier (struct notifier_block * nb )
1050
1046
{
1051
1047
return blocking_notifier_chain_unregister (& vmfork_chain , nb );
1052
1048
}
@@ -1091,7 +1087,7 @@ static void fast_mix(unsigned long s[4], unsigned long v1, unsigned long v2)
1091
1087
* This function is called when the CPU has just come online, with
1092
1088
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
1093
1089
*/
1094
- int random_online_cpu (unsigned int cpu )
1090
+ int __cold random_online_cpu (unsigned int cpu )
1095
1091
{
1096
1092
/*
1097
1093
* During CPU shutdown and before CPU onlining, add_interrupt_
@@ -1246,7 +1242,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned int nu
1246
1242
if (in_hardirq ())
1247
1243
this_cpu_ptr (& irq_randomness )-> count += max (1u , bits * 64 ) - 1 ;
1248
1244
else
1249
- credit_init_bits (bits );
1245
+ _credit_init_bits (bits );
1250
1246
}
1251
1247
1252
1248
void add_input_randomness (unsigned int type , unsigned int code , unsigned int value )
@@ -1274,7 +1270,7 @@ void add_disk_randomness(struct gendisk *disk)
1274
1270
}
1275
1271
EXPORT_SYMBOL_GPL (add_disk_randomness );
1276
1272
1277
- void rand_initialize_disk (struct gendisk * disk )
1273
+ void __cold rand_initialize_disk (struct gendisk * disk )
1278
1274
{
1279
1275
struct timer_rand_state * state ;
1280
1276
@@ -1309,7 +1305,7 @@ struct entropy_timer_state {
1309
1305
*
1310
1306
* So the re-arming always happens in the entropy loop itself.
1311
1307
*/
1312
- static void entropy_timer (struct timer_list * timer )
1308
+ static void __cold entropy_timer (struct timer_list * timer )
1313
1309
{
1314
1310
struct entropy_timer_state * state = container_of (timer , struct entropy_timer_state , timer );
1315
1311
@@ -1323,7 +1319,7 @@ static void entropy_timer(struct timer_list *timer)
1323
1319
* If we have an actual cycle counter, see if we can
1324
1320
* generate enough entropy with timing noise
1325
1321
*/
1326
- static void try_to_generate_entropy (void )
1322
+ static void __cold try_to_generate_entropy (void )
1327
1323
{
1328
1324
enum { NUM_TRIAL_SAMPLES = 8192 , MAX_SAMPLES_PER_BIT = 32 };
1329
1325
struct entropy_timer_state stack ;
0 commit comments