clocksource: Use migrate_disable() to avoid calling get_random_u32() in atomic context
[ Upstream commit 6bb05a33337b2c842373857b63de5c9bf1ae2a09 ]
The following bug report happened with a PREEMPT_RT kernel:
BUG: sleeping function called from invalid context at kernel/locking/spinlock_rt.c:48
in_atomic(): 1, irqs_disabled(): 0, non_block: 0, pid: 2012, name: kwatchdog
preempt_count: 1, expected: 0
RCU nest depth: 0, expected: 0
get_random_u32+0x4f/0x110
clocksource_verify_choose_cpus+0xab/0x1a0
clocksource_verify_percpu.part.0+0x6b/0x330
clocksource_watchdog_kthread+0x193/0x1a0
It is due to the fact that clocksource_verify_choose_cpus() is invoked with
preemption disabled. This function invokes get_random_u32() to obtain
random numbers for choosing CPUs. The batched_entropy_32 local lock and/or
the base_crng.lock spinlock in driver/char/random.c will be acquired during
the call. In PREEMPT_RT kernel, they are both sleeping locks and so cannot
be acquired in atomic context.
Fix this problem by using migrate_disable() to allow smp_processor_id() to
be reliably used without introducing atomic context. preempt_disable() is
then called after clocksource_verify_choose_cpus() but before the
clocksource measurement is being run to avoid introducing unexpected
latency.
Fixes: 7560c02bdf
("clocksource: Check per-CPU clock synchronization when marked unstable")
Suggested-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Link: https://lore.kernel.org/all/20250131173323.891943-2-longman@redhat.com
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
0bebe3e832
commit
852805b6cb
@ -348,10 +348,10 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
||||
cpumask_clear(&cpus_ahead);
|
||||
cpumask_clear(&cpus_behind);
|
||||
cpus_read_lock();
|
||||
preempt_disable();
|
||||
migrate_disable();
|
||||
clocksource_verify_choose_cpus();
|
||||
if (cpumask_empty(&cpus_chosen)) {
|
||||
preempt_enable();
|
||||
migrate_enable();
|
||||
cpus_read_unlock();
|
||||
pr_warn("Not enough CPUs to check clocksource '%s'.\n", cs->name);
|
||||
return;
|
||||
@ -359,6 +359,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
||||
testcpu = smp_processor_id();
|
||||
pr_info("Checking clocksource %s synchronization from CPU %d to CPUs %*pbl.\n",
|
||||
cs->name, testcpu, cpumask_pr_args(&cpus_chosen));
|
||||
preempt_disable();
|
||||
for_each_cpu(cpu, &cpus_chosen) {
|
||||
if (cpu == testcpu)
|
||||
continue;
|
||||
@ -378,6 +379,7 @@ void clocksource_verify_percpu(struct clocksource *cs)
|
||||
cs_nsec_min = cs_nsec;
|
||||
}
|
||||
preempt_enable();
|
||||
migrate_enable();
|
||||
cpus_read_unlock();
|
||||
if (!cpumask_empty(&cpus_ahead))
|
||||
pr_warn(" CPUs %*pbl ahead of CPU %d for clocksource %s.\n",
|
||||
|
Loading…
Reference in New Issue
Block a user