summaryrefslogtreecommitdiff
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <rostedt@goodmis.org>2024-09-05 11:33:59 -0400
committerSteven Rostedt (Google) <rostedt@goodmis.org>2024-09-05 12:01:37 -0400
commit5bfbcd1ee57b607fd29e4645c7f350dd385dd9ad (patch)
tree79580f6a2b90272cc6811066b927baae45c2d77d /kernel/trace
parente6a53481da292d970d1edf0d8831121d1c5e2f0d (diff)
tracing/timerlat: Add interface_lock around clearing of kthread in stop_kthread()
The timerlat interface will get and put the task that is part of the "kthread" field of the osn_var to keep it around until all references are released. But here's a race in the "stop_kthread()" code that will call put_task_struct() on the kthread if it is not a kernel thread. This can race with the releasing of the references to that task struct and the put_task_struct() can be called twice when it should have been called just once. Take the interface_lock() in stop_kthread() to synchronize this change. But to do so, the function stop_per_cpu_kthreads() needs to change the loop from for_each_online_cpu() to for_each_possible_cpu() and remove the cpu_read_lock(), as the interface_lock can not be taken while the cpu locks are held. The only side effect of this change is that it may do some extra work, as the per_cpu variables of the offline CPUs would not be set anyway, and would simply be skipped in the loop. Remove unneeded "return;" in stop_kthread(). Cc: stable@vger.kernel.org Cc: Masami Hiramatsu <mhiramat@kernel.org> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: Tomas Glozar <tglozar@redhat.com> Cc: John Kacur <jkacur@redhat.com> Cc: "Luis Claudio R. Goncalves" <lgoncalv@redhat.com> Link: https://lore.kernel.org/20240905113359.2b934242@gandalf.local.home Fixes: e88ed227f639e ("tracing/timerlat: Add user-space interface") Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/trace_osnoise.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/kernel/trace/trace_osnoise.c b/kernel/trace/trace_osnoise.c
index 48e5014dd4ab..bbe47781617e 100644
--- a/kernel/trace/trace_osnoise.c
+++ b/kernel/trace/trace_osnoise.c
@@ -1953,8 +1953,12 @@ static void stop_kthread(unsigned int cpu)
{
struct task_struct *kthread;
+ mutex_lock(&interface_lock);
kthread = per_cpu(per_cpu_osnoise_var, cpu).kthread;
if (kthread) {
+ per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
+ mutex_unlock(&interface_lock);
+
if (cpumask_test_and_clear_cpu(cpu, &kthread_cpumask) &&
!WARN_ON(!test_bit(OSN_WORKLOAD, &osnoise_options))) {
kthread_stop(kthread);
@@ -1967,8 +1971,8 @@ static void stop_kthread(unsigned int cpu)
kill_pid(kthread->thread_pid, SIGKILL, 1);
put_task_struct(kthread);
}
- per_cpu(per_cpu_osnoise_var, cpu).kthread = NULL;
} else {
+ mutex_unlock(&interface_lock);
/* if no workload, just return */
if (!test_bit(OSN_WORKLOAD, &osnoise_options)) {
/*
@@ -1976,7 +1980,6 @@ static void stop_kthread(unsigned int cpu)
*/
per_cpu(per_cpu_osnoise_var, cpu).sampling = false;
barrier();
- return;
}
}
}
@@ -1991,12 +1994,8 @@ static void stop_per_cpu_kthreads(void)
{
int cpu;
- cpus_read_lock();
-
- for_each_online_cpu(cpu)
+ for_each_possible_cpu(cpu)
stop_kthread(cpu);
-
- cpus_read_unlock();
}
/*