Skip to content

Commit 5ad9345

Browse files
committed
Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random
Pull randomness fixes from Ted Ts'o: "Improve performance by using a lockless update mechanism suggested by Linus, and make sure we refresh per-CPU entropy returned get_random_* as soon as the CRNG is initialized" * tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: random: invalidate batched entropy after crng init random: use lockless method of accessing and updating f->reg_idx
2 parents 5e38b72 + b169c13 commit 5ad9345

File tree

1 file changed

+43
-6
lines changed

1 file changed

+43
-6
lines changed

drivers/char/random.c

Lines changed: 43 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,9 @@
11
/*
22
* random.c -- A strong random number generator
33
*
4+
* Copyright (C) 2017 Jason A. Donenfeld <[email protected]>. All
5+
* Rights Reserved.
6+
*
47
* Copyright Matt Mackall <[email protected]>, 2003, 2004, 2005
58
*
69
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
@@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
762765
static struct crng_state **crng_node_pool __read_mostly;
763766
#endif
764767

768+
static void invalidate_batched_entropy(void);
769+
765770
static void crng_initialize(struct crng_state *crng)
766771
{
767772
int i;
@@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
799804
cp++; crng_init_cnt++; len--;
800805
}
801806
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807+
invalidate_batched_entropy();
802808
crng_init = 1;
803809
wake_up_interruptible(&crng_init_wait);
804810
pr_notice("random: fast init done\n");
@@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
836842
memzero_explicit(&buf, sizeof(buf));
837843
crng->init_time = jiffies;
838844
if (crng == &primary_crng && crng_init < 2) {
845+
invalidate_batched_entropy();
839846
crng_init = 2;
840847
process_random_ready_list();
841848
wake_up_interruptible(&crng_init_wait);
@@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
10971104
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
10981105
{
10991106
__u32 *ptr = (__u32 *) regs;
1100-
unsigned long flags;
1107+
unsigned int idx;
11011108

11021109
if (regs == NULL)
11031110
return 0;
1104-
local_irq_save(flags);
1105-
if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
1106-
f->reg_idx = 0;
1107-
ptr += f->reg_idx++;
1108-
local_irq_restore(flags);
1111+
idx = READ_ONCE(f->reg_idx);
1112+
if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1113+
idx = 0;
1114+
ptr += idx++;
1115+
WRITE_ONCE(f->reg_idx, idx);
11091116
return *ptr;
11101117
}
11111118

@@ -2023,6 +2030,7 @@ struct batched_entropy {
20232030
};
20242031
unsigned int position;
20252032
};
2033+
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
20262034

20272035
/*
20282036
* Get a random word for internal kernel use only. The quality of the random
@@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
20332041
u64 get_random_u64(void)
20342042
{
20352043
u64 ret;
2044+
bool use_lock = crng_init < 2;
2045+
unsigned long flags;
20362046
struct batched_entropy *batch;
20372047

20382048
#if BITS_PER_LONG == 64
@@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
20452055
#endif
20462056

20472057
batch = &get_cpu_var(batched_entropy_u64);
2058+
if (use_lock)
2059+
read_lock_irqsave(&batched_entropy_reset_lock, flags);
20482060
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
20492061
extract_crng((u8 *)batch->entropy_u64);
20502062
batch->position = 0;
20512063
}
20522064
ret = batch->entropy_u64[batch->position++];
2065+
if (use_lock)
2066+
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
20532067
put_cpu_var(batched_entropy_u64);
20542068
return ret;
20552069
}
@@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
20592073
u32 get_random_u32(void)
20602074
{
20612075
u32 ret;
2076+
bool use_lock = crng_init < 2;
2077+
unsigned long flags;
20622078
struct batched_entropy *batch;
20632079

20642080
if (arch_get_random_int(&ret))
20652081
return ret;
20662082

20672083
batch = &get_cpu_var(batched_entropy_u32);
2084+
if (use_lock)
2085+
read_lock_irqsave(&batched_entropy_reset_lock, flags);
20682086
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
20692087
extract_crng((u8 *)batch->entropy_u32);
20702088
batch->position = 0;
20712089
}
20722090
ret = batch->entropy_u32[batch->position++];
2091+
if (use_lock)
2092+
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
20732093
put_cpu_var(batched_entropy_u32);
20742094
return ret;
20752095
}
20762096
EXPORT_SYMBOL(get_random_u32);
20772097

2098+
/* It's important to invalidate all potential batched entropy that might
2099+
* be stored before the crng is initialized, which we can do lazily by
2100+
* simply resetting the counter to zero so that it's re-extracted on the
2101+
* next usage. */
2102+
static void invalidate_batched_entropy(void)
2103+
{
2104+
int cpu;
2105+
unsigned long flags;
2106+
2107+
write_lock_irqsave(&batched_entropy_reset_lock, flags);
2108+
for_each_possible_cpu (cpu) {
2109+
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
2110+
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
2111+
}
2112+
write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
2113+
}
2114+
20782115
/**
20792116
* randomize_page - Generate a random, page aligned address
20802117
* @start: The smallest acceptable address the caller will take.

0 commit comments

Comments
 (0)