Skip to content

Commit 4a072c7

Browse files
zx2c4tytso
authored andcommitted
random: silence compiler warnings and fix race
Odd versions of gcc for the sh4 architecture will actually warn about flags being used while uninitialized, so we set them to zero. Non crazy gccs will optimize that out again, so it doesn't make a difference. Next, over aggressive gccs could inline the expression that defines use_lock, which could then introduce a race resulting in a lock imbalance. By using READ_ONCE, we prevent that fate. Finally, we make that assignment const, so that gcc can still optimize a nice amount. Finally, we fix a potential deadlock between primary_crng.lock and batched_entropy_reset_lock, where they could be called in opposite order. Moving the call to invalidate_batched_entropy to outside the lock rectifies this issue. Fixes: b169c13 Signed-off-by: Jason A. Donenfeld <[email protected]> Signed-off-by: Theodore Ts'o <[email protected]> Cc: [email protected]
1 parent b169c13 commit 4a072c7

File tree

1 file changed

+6
-6
lines changed

1 file changed

+6
-6
lines changed

drivers/char/random.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -803,13 +803,13 @@ static int crng_fast_load(const char *cp, size_t len)
803803
p[crng_init_cnt % CHACHA20_KEY_SIZE] ^= *cp;
804804
cp++; crng_init_cnt++; len--;
805805
}
806+
spin_unlock_irqrestore(&primary_crng.lock, flags);
806807
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
807808
invalidate_batched_entropy();
808809
crng_init = 1;
809810
wake_up_interruptible(&crng_init_wait);
810811
pr_notice("random: fast init done\n");
811812
}
812-
spin_unlock_irqrestore(&primary_crng.lock, flags);
813813
return 1;
814814
}
815815

@@ -841,14 +841,14 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
841841
}
842842
memzero_explicit(&buf, sizeof(buf));
843843
crng->init_time = jiffies;
844+
spin_unlock_irqrestore(&primary_crng.lock, flags);
844845
if (crng == &primary_crng && crng_init < 2) {
845846
invalidate_batched_entropy();
846847
crng_init = 2;
847848
process_random_ready_list();
848849
wake_up_interruptible(&crng_init_wait);
849850
pr_notice("random: crng init done\n");
850851
}
851-
spin_unlock_irqrestore(&primary_crng.lock, flags);
852852
}
853853

854854
static inline void crng_wait_ready(void)
@@ -2041,8 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
20412041
u64 get_random_u64(void)
20422042
{
20432043
u64 ret;
2044-
bool use_lock = crng_init < 2;
2045-
unsigned long flags;
2044+
bool use_lock = READ_ONCE(crng_init) < 2;
2045+
unsigned long flags = 0;
20462046
struct batched_entropy *batch;
20472047

20482048
#if BITS_PER_LONG == 64
@@ -2073,8 +2073,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
20732073
u32 get_random_u32(void)
20742074
{
20752075
u32 ret;
2076-
bool use_lock = crng_init < 2;
2077-
unsigned long flags;
2076+
bool use_lock = READ_ONCE(crng_init) < 2;
2077+
unsigned long flags = 0;
20782078
struct batched_entropy *batch;
20792079

20802080
if (arch_get_random_int(&ret))

0 commit comments

Comments
 (0)