1
1
/*
2
2
* random.c -- A strong random number generator
3
3
*
4
+ * Copyright (C) 2017 Jason A. Donenfeld <[email protected] >. All
5
+ * Rights Reserved.
6
+ *
4
7
* Copyright Matt Mackall <[email protected] >, 2003, 2004, 2005
5
8
*
6
9
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762
765
static struct crng_state * * crng_node_pool __read_mostly ;
763
766
#endif
764
767
768
+ static void invalidate_batched_entropy (void );
769
+
765
770
static void crng_initialize (struct crng_state * crng )
766
771
{
767
772
int i ;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
799
804
cp ++ ; crng_init_cnt ++ ; len -- ;
800
805
}
801
806
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH ) {
807
+ invalidate_batched_entropy ();
802
808
crng_init = 1 ;
803
809
wake_up_interruptible (& crng_init_wait );
804
810
pr_notice ("random: fast init done\n" );
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
836
842
memzero_explicit (& buf , sizeof (buf ));
837
843
crng -> init_time = jiffies ;
838
844
if (crng == & primary_crng && crng_init < 2 ) {
845
+ invalidate_batched_entropy ();
839
846
crng_init = 2 ;
840
847
process_random_ready_list ();
841
848
wake_up_interruptible (& crng_init_wait );
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
1097
1104
static __u32 get_reg (struct fast_pool * f , struct pt_regs * regs )
1098
1105
{
1099
1106
__u32 * ptr = (__u32 * ) regs ;
1100
- unsigned long flags ;
1107
+ unsigned int idx ;
1101
1108
1102
1109
if (regs == NULL )
1103
1110
return 0 ;
1104
- local_irq_save ( flags );
1105
- if (f -> reg_idx >= sizeof (struct pt_regs ) / sizeof (__u32 ))
1106
- f -> reg_idx = 0 ;
1107
- ptr += f -> reg_idx ++ ;
1108
- local_irq_restore ( flags );
1111
+ idx = READ_ONCE ( f -> reg_idx );
1112
+ if (idx >= sizeof (struct pt_regs ) / sizeof (__u32 ))
1113
+ idx = 0 ;
1114
+ ptr += idx ++ ;
1115
+ WRITE_ONCE ( f -> reg_idx , idx );
1109
1116
return * ptr ;
1110
1117
}
1111
1118
@@ -2023,6 +2030,7 @@ struct batched_entropy {
2023
2030
};
2024
2031
unsigned int position ;
2025
2032
};
2033
+ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED (batched_entropy_reset_lock );
2026
2034
2027
2035
/*
2028
2036
* Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
2033
2041
u64 get_random_u64 (void )
2034
2042
{
2035
2043
u64 ret ;
2044
+ bool use_lock = crng_init < 2 ;
2045
+ unsigned long flags ;
2036
2046
struct batched_entropy * batch ;
2037
2047
2038
2048
#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
2045
2055
#endif
2046
2056
2047
2057
batch = & get_cpu_var (batched_entropy_u64 );
2058
+ if (use_lock )
2059
+ read_lock_irqsave (& batched_entropy_reset_lock , flags );
2048
2060
if (batch -> position % ARRAY_SIZE (batch -> entropy_u64 ) == 0 ) {
2049
2061
extract_crng ((u8 * )batch -> entropy_u64 );
2050
2062
batch -> position = 0 ;
2051
2063
}
2052
2064
ret = batch -> entropy_u64 [batch -> position ++ ];
2065
+ if (use_lock )
2066
+ read_unlock_irqrestore (& batched_entropy_reset_lock , flags );
2053
2067
put_cpu_var (batched_entropy_u64 );
2054
2068
return ret ;
2055
2069
}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
2059
2073
u32 get_random_u32 (void )
2060
2074
{
2061
2075
u32 ret ;
2076
+ bool use_lock = crng_init < 2 ;
2077
+ unsigned long flags ;
2062
2078
struct batched_entropy * batch ;
2063
2079
2064
2080
if (arch_get_random_int (& ret ))
2065
2081
return ret ;
2066
2082
2067
2083
batch = & get_cpu_var (batched_entropy_u32 );
2084
+ if (use_lock )
2085
+ read_lock_irqsave (& batched_entropy_reset_lock , flags );
2068
2086
if (batch -> position % ARRAY_SIZE (batch -> entropy_u32 ) == 0 ) {
2069
2087
extract_crng ((u8 * )batch -> entropy_u32 );
2070
2088
batch -> position = 0 ;
2071
2089
}
2072
2090
ret = batch -> entropy_u32 [batch -> position ++ ];
2091
+ if (use_lock )
2092
+ read_unlock_irqrestore (& batched_entropy_reset_lock , flags );
2073
2093
put_cpu_var (batched_entropy_u32 );
2074
2094
return ret ;
2075
2095
}
2076
2096
EXPORT_SYMBOL (get_random_u32 );
2077
2097
2098
+ /* It's important to invalidate all potential batched entropy that might
2099
+ * be stored before the crng is initialized, which we can do lazily by
2100
+ * simply resetting the counter to zero so that it's re-extracted on the
2101
+ * next usage. */
2102
+ static void invalidate_batched_entropy (void )
2103
+ {
2104
+ int cpu ;
2105
+ unsigned long flags ;
2106
+
2107
+ write_lock_irqsave (& batched_entropy_reset_lock , flags );
2108
+ for_each_possible_cpu (cpu ) {
2109
+ per_cpu_ptr (& batched_entropy_u32 , cpu )-> position = 0 ;
2110
+ per_cpu_ptr (& batched_entropy_u64 , cpu )-> position = 0 ;
2111
+ }
2112
+ write_unlock_irqrestore (& batched_entropy_reset_lock , flags );
2113
+ }
2114
+
2078
2115
/**
2079
2116
* randomize_page - Generate a random, page aligned address
2080
2117
* @start: The smallest acceptable address the caller will take.
0 commit comments