288
288
#define SEC_XFER_SIZE 512
289
289
#define EXTRACT_SIZE 10
290
290
291
- #define DEBUG_RANDOM_BOOT 0
292
291
293
292
#define LONGS (x ) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
294
293
@@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng,
437
436
static void _crng_backtrack_protect (struct crng_state * crng ,
438
437
__u8 tmp [CHACHA20_BLOCK_SIZE ], int used );
439
438
static void process_random_ready_list (void );
439
+ static void _get_random_bytes (void * buf , int nbytes );
440
440
441
441
/**********************************************************************
442
442
*
@@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng)
777
777
_extract_entropy (& input_pool , & crng -> state [4 ],
778
778
sizeof (__u32 ) * 12 , 0 );
779
779
else
780
- get_random_bytes (& crng -> state [4 ], sizeof (__u32 ) * 12 );
780
+ _get_random_bytes (& crng -> state [4 ], sizeof (__u32 ) * 12 );
781
781
for (i = 4 ; i < 16 ; i ++ ) {
782
782
if (!arch_get_random_seed_long (& rv ) &&
783
783
!arch_get_random_long (& rv ))
@@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
851
851
}
852
852
}
853
853
854
- static inline void crng_wait_ready (void )
855
- {
856
- wait_event_interruptible (crng_init_wait , crng_ready ());
857
- }
858
-
859
854
static void _extract_crng (struct crng_state * crng ,
860
855
__u8 out [CHACHA20_BLOCK_SIZE ])
861
856
{
@@ -1477,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1477
1472
return ret ;
1478
1473
}
1479
1474
1475
+ #define warn_unseeded_randomness (previous ) \
1476
+ _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1477
+
1478
+ static void _warn_unseeded_randomness (const char * func_name , void * caller ,
1479
+ void * * previous )
1480
+ {
1481
+ #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1482
+ const bool print_once = false;
1483
+ #else
1484
+ static bool print_once __read_mostly ;
1485
+ #endif
1486
+
1487
+ if (print_once ||
1488
+ crng_ready () ||
1489
+ (previous && (caller == READ_ONCE (* previous ))))
1490
+ return ;
1491
+ WRITE_ONCE (* previous , caller );
1492
+ #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1493
+ print_once = true;
1494
+ #endif
1495
+ pr_notice ("random: %s called from %pF with crng_init=%d\n" ,
1496
+ func_name , caller , crng_init );
1497
+ }
1498
+
1480
1499
/*
1481
1500
* This function is the exported kernel interface. It returns some
1482
1501
* number of good random numbers, suitable for key generation, seeding
1483
1502
* TCP sequence numbers, etc. It does not rely on the hardware random
1484
1503
* number generator. For random bytes direct from the hardware RNG
1485
- * (when available), use get_random_bytes_arch().
1504
+ * (when available), use get_random_bytes_arch(). In order to ensure
1505
+ * that the randomness provided by this function is okay, the function
1506
+ * wait_for_random_bytes() should be called and return 0 at least once
1507
+ * at any point prior.
1486
1508
*/
1487
- void get_random_bytes (void * buf , int nbytes )
1509
+ static void _get_random_bytes (void * buf , int nbytes )
1488
1510
{
1489
1511
__u8 tmp [CHACHA20_BLOCK_SIZE ];
1490
1512
1491
- #if DEBUG_RANDOM_BOOT > 0
1492
- if (!crng_ready ())
1493
- printk (KERN_NOTICE "random: %pF get_random_bytes called "
1494
- "with crng_init = %d\n" , (void * ) _RET_IP_ , crng_init );
1495
- #endif
1496
1513
trace_get_random_bytes (nbytes , _RET_IP_ );
1497
1514
1498
1515
while (nbytes >= CHACHA20_BLOCK_SIZE ) {
@@ -1509,8 +1526,34 @@ void get_random_bytes(void *buf, int nbytes)
1509
1526
crng_backtrack_protect (tmp , CHACHA20_BLOCK_SIZE );
1510
1527
memzero_explicit (tmp , sizeof (tmp ));
1511
1528
}
1529
+
1530
+ void get_random_bytes (void * buf , int nbytes )
1531
+ {
1532
+ static void * previous ;
1533
+
1534
+ warn_unseeded_randomness (& previous );
1535
+ _get_random_bytes (buf , nbytes );
1536
+ }
1512
1537
EXPORT_SYMBOL (get_random_bytes );
1513
1538
1539
+ /*
1540
+ * Wait for the urandom pool to be seeded and thus guaranteed to supply
1541
+ * cryptographically secure random numbers. This applies to: the /dev/urandom
1542
+ * device, the get_random_bytes function, and the get_random_{u32,u64,int,long}
1543
+ * family of functions. Using any of these functions without first calling
1544
+ * this function forfeits the guarantee of security.
1545
+ *
1546
+ * Returns: 0 if the urandom pool has been seeded.
1547
+ * -ERESTARTSYS if the function was interrupted by a signal.
1548
+ */
1549
+ int wait_for_random_bytes (void )
1550
+ {
1551
+ if (likely (crng_ready ()))
1552
+ return 0 ;
1553
+ return wait_event_interruptible (crng_init_wait , crng_ready ());
1554
+ }
1555
+ EXPORT_SYMBOL (wait_for_random_bytes );
1556
+
1514
1557
/*
1515
1558
* Add a callback function that will be invoked when the nonblocking
1516
1559
* pool is initialised.
@@ -1865,6 +1908,8 @@ const struct file_operations urandom_fops = {
1865
1908
SYSCALL_DEFINE3 (getrandom , char __user * , buf , size_t , count ,
1866
1909
unsigned int , flags )
1867
1910
{
1911
+ int ret ;
1912
+
1868
1913
if (flags & ~(GRND_NONBLOCK |GRND_RANDOM ))
1869
1914
return - EINVAL ;
1870
1915
@@ -1877,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
1877
1922
if (!crng_ready ()) {
1878
1923
if (flags & GRND_NONBLOCK )
1879
1924
return - EAGAIN ;
1880
- crng_wait_ready ();
1881
- if (signal_pending ( current ))
1882
- return - ERESTARTSYS ;
1925
+ ret = wait_for_random_bytes ();
1926
+ if (unlikely ( ret ))
1927
+ return ret ;
1883
1928
}
1884
1929
return urandom_read (NULL , buf , count , NULL );
1885
1930
}
@@ -2040,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_
2040
2085
/*
2041
2086
* Get a random word for internal kernel use only. The quality of the random
2042
2087
* number is either as good as RDRAND or as good as /dev/urandom, with the
2043
- * goal of being quite fast and not depleting entropy.
2088
+ * goal of being quite fast and not depleting entropy. In order to ensure
2089
+ * that the randomness provided by this function is okay, the function
2090
+ * wait_for_random_bytes() should be called and return 0 at least once
2091
+ * at any point prior.
2044
2092
*/
2045
2093
static DEFINE_PER_CPU (struct batched_entropy , batched_entropy_u64 ) ;
2046
2094
u64 get_random_u64 (void )
2047
2095
{
2048
2096
u64 ret ;
2049
- bool use_lock = READ_ONCE ( crng_init ) < 2 ;
2097
+ bool use_lock ;
2050
2098
unsigned long flags = 0 ;
2051
2099
struct batched_entropy * batch ;
2100
+ static void * previous ;
2052
2101
2053
2102
#if BITS_PER_LONG == 64
2054
2103
if (arch_get_random_long ((unsigned long * )& ret ))
@@ -2059,6 +2108,9 @@ u64 get_random_u64(void)
2059
2108
return ret ;
2060
2109
#endif
2061
2110
2111
+ warn_unseeded_randomness (& previous );
2112
+
2113
+ use_lock = READ_ONCE (crng_init ) < 2 ;
2062
2114
batch = & get_cpu_var (batched_entropy_u64 );
2063
2115
if (use_lock )
2064
2116
read_lock_irqsave (& batched_entropy_reset_lock , flags );
@@ -2078,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2078
2130
u32 get_random_u32 (void )
2079
2131
{
2080
2132
u32 ret ;
2081
- bool use_lock = READ_ONCE ( crng_init ) < 2 ;
2133
+ bool use_lock ;
2082
2134
unsigned long flags = 0 ;
2083
2135
struct batched_entropy * batch ;
2136
+ static void * previous ;
2084
2137
2085
2138
if (arch_get_random_int (& ret ))
2086
2139
return ret ;
2087
2140
2141
+ warn_unseeded_randomness (& previous );
2142
+
2143
+ use_lock = READ_ONCE (crng_init ) < 2 ;
2088
2144
batch = & get_cpu_var (batched_entropy_u32 );
2089
2145
if (use_lock )
2090
2146
read_lock_irqsave (& batched_entropy_reset_lock , flags );
0 commit comments