aboutsummaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
authorGravatar Theodore Ts'o <tytso@mit.edu> 2018-04-11 15:23:56 -0400
committerGravatar Theodore Ts'o <tytso@mit.edu> 2018-04-14 11:59:19 -0400
commit8ef35c866f8862df074a49a93b0309725812dea8 (patch)
tree982e05c942d90c435fdb4dc0f303ae2c4cac22d6 /drivers/char
parentrandom: use a different mixing algorithm for add_device_randomness() (diff)
downloadlinux-8ef35c866f8862df074a49a93b0309725812dea8.tar.gz
linux-8ef35c866f8862df074a49a93b0309725812dea8.tar.bz2
linux-8ef35c866f8862df074a49a93b0309725812dea8.zip
random: set up the NUMA crng instances after the CRNG is fully initialized
Until the primary_crng is fully initialized, don't initialize the NUMA crng nodes. Otherwise users of /dev/urandom on NUMA systems before the CRNG is fully initialized can get very bad quality randomness. Of course everyone should move to getrandom(2) where this won't be an issue, but there's a lot of legacy code out there. This related to CVE-2018-1108. Reported-by: Jann Horn <jannh@google.com> Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/random.c46
1 files changed, 27 insertions, 19 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 6baa828c0493..02d792f7933f 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng)
crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
}
+#ifdef CONFIG_NUMA
+static void numa_crng_init(void)
+{
+ int i;
+ struct crng_state *crng;
+ struct crng_state **pool;
+
+ pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
+ for_each_online_node(i) {
+ crng = kmalloc_node(sizeof(struct crng_state),
+ GFP_KERNEL | __GFP_NOFAIL, i);
+ spin_lock_init(&crng->lock);
+ crng_initialize(crng);
+ pool[i] = crng;
+ }
+ mb();
+ if (cmpxchg(&crng_node_pool, NULL, pool)) {
+ for_each_node(i)
+ kfree(pool[i]);
+ kfree(pool);
+ }
+}
+#else
+static void numa_crng_init(void) {}
+#endif
+
/*
* crng_fast_load() can be called by code in the interrupt service
* path. So we can't afford to dilly-dally.
@@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
spin_unlock_irqrestore(&primary_crng.lock, flags);
if (crng == &primary_crng && crng_init < 2) {
invalidate_batched_entropy();
+ numa_crng_init();
crng_init = 2;
process_random_ready_list();
wake_up_interruptible(&crng_init_wait);
@@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r)
*/
static int rand_initialize(void)
{
-#ifdef CONFIG_NUMA
- int i;
- struct crng_state *crng;
- struct crng_state **pool;
-#endif
-
init_std_data(&input_pool);
init_std_data(&blocking_pool);
crng_initialize(&primary_crng);
-
-#ifdef CONFIG_NUMA
- pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
- for_each_online_node(i) {
- crng = kmalloc_node(sizeof(struct crng_state),
- GFP_KERNEL | __GFP_NOFAIL, i);
- spin_lock_init(&crng->lock);
- crng_initialize(crng);
- pool[i] = crng;
- }
- mb();
- crng_node_pool = pool;
-#endif
return 0;
}
early_initcall(rand_initialize);