aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2025-06-10 11:27:08 +0200
committerJason A. Donenfeld <Jason@zx2c4.com>2025-06-10 11:34:23 -0600
commit3778dcb2dcc1ce03a2b25e5d951fe9912e843d1e (patch)
treec8d29a9902439c90e0032465cef2edd66ba667a4
parentprandom: remove next_pseudo_random32 (diff)
downloadlinux-rng-master.tar.xz
linux-rng-master.zip
random: use offstack cpumask when necessaryHEADmaster
The entropy generation function keeps a local cpu mask on the stack, which can trigger warnings in configurations with a large number of CPUs: drivers/char/random.c:1292:20: error: stack frame size (1288) exceeds limit (1280) in 'try_to_generate_entropy' [-Werror,-Wframe-larger-than] Use the cpumask interface to dynamically allocate it in those configurations. Fixes: 1c21fe00eda7 ("random: spread out jitter callback to different CPUs") Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
-rw-r--r--drivers/char/random.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 654b1fda52f0..d45383d57919 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1296,6 +1296,7 @@ static void __cold try_to_generate_entropy(void)
struct entropy_timer_state *stack = PTR_ALIGN((void *)stack_bytes, SMP_CACHE_BYTES);
unsigned int i, num_different = 0;
unsigned long last = random_get_entropy();
+ cpumask_var_t timer_cpus;
int cpu = -1;
for (i = 0; i < NUM_TRIAL_SAMPLES - 1; ++i) {
@@ -1310,13 +1311,15 @@ static void __cold try_to_generate_entropy(void)
atomic_set(&stack->samples, 0);
timer_setup_on_stack(&stack->timer, entropy_timer, 0);
+ if (!alloc_cpumask_var(&timer_cpus, GFP_KERNEL))
+ goto out;
+
while (!crng_ready() && !signal_pending(current)) {
/*
* Check !timer_pending() and then ensure that any previous callback has finished
* executing by checking timer_delete_sync_try(), before queueing the next one.
*/
if (!timer_pending(&stack->timer) && timer_delete_sync_try(&stack->timer) >= 0) {
- struct cpumask timer_cpus;
unsigned int num_cpus;
/*
@@ -1326,19 +1329,19 @@ static void __cold try_to_generate_entropy(void)
preempt_disable();
/* Only schedule callbacks on timer CPUs that are online. */
- cpumask_and(&timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
- num_cpus = cpumask_weight(&timer_cpus);
+ cpumask_and(timer_cpus, housekeeping_cpumask(HK_TYPE_TIMER), cpu_online_mask);
+ num_cpus = cpumask_weight(timer_cpus);
/* In very bizarre case of misconfiguration, fallback to all online. */
if (unlikely(num_cpus == 0)) {
- timer_cpus = *cpu_online_mask;
- num_cpus = cpumask_weight(&timer_cpus);
+ *timer_cpus = *cpu_online_mask;
+ num_cpus = cpumask_weight(timer_cpus);
}
/* Basic CPU round-robin, which avoids the current CPU. */
do {
- cpu = cpumask_next(cpu, &timer_cpus);
+ cpu = cpumask_next(cpu, timer_cpus);
if (cpu >= nr_cpu_ids)
- cpu = cpumask_first(&timer_cpus);
+ cpu = cpumask_first(timer_cpus);
} while (cpu == smp_processor_id() && num_cpus > 1);
/* Expiring the timer at `jiffies` means it's the next tick. */
@@ -1354,6 +1357,8 @@ static void __cold try_to_generate_entropy(void)
}
mix_pool_bytes(&stack->entropy, sizeof(stack->entropy));
+ free_cpumask_var(timer_cpus);
+out:
timer_delete_sync(&stack->timer);
timer_destroy_on_stack(&stack->timer);
}